code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import pytest
import numpy as np
import sys
if (sys.version_info > (3, 0)):
from io import StringIO
else:
from StringIO import StringIO
from keras_contrib import callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Conv2D, Flatten, Activation
from keras import backend as K
n_out = 11 # with 1 neuron dead, 1/11 is just below the threshold of 10% with verbose = False
def check_print(do_train, expected_warnings, nr_dead=None, perc_dead=None):
"""
Receive stdout to check if correct warning message is delivered
:param nr_dead: int
:param perc_dead: float, 10% should be written as 0.1
"""
saved_stdout = sys.stdout
out = StringIO()
out.flush()
sys.stdout = out # overwrite current stdout
do_train()
stdoutput = out.getvalue().strip() # get prints, can be something like: "Layer dense (#0) has 2 dead neurons (20.00%)!"
str_to_count = "dead neurons"
count = stdoutput.count(str_to_count)
sys.stdout = saved_stdout # restore stdout
out.close()
assert expected_warnings == count
if expected_warnings and (nr_dead is not None):
str_to_check = 'has {} dead'.format(nr_dead)
assert str_to_check in stdoutput, '"{}" not in "{}"'.format(str_to_check, stdoutput)
if expected_warnings and (perc_dead is not None):
str_to_check = 'neurons ({:.2%})!'.format(perc_dead)
assert str_to_check in stdoutput, '"{}" not in "{}"'.format(str_to_check, stdoutput)
def test_DeadDeadReluDetector():
n_samples = 9
input_shape = (n_samples, 3, 4) # 4 input features
shape_out = (n_samples, 3, n_out) # 11 output features
shape_weights = (4, n_out)
# ignore batch size
input_shape_dense = tuple(input_shape[1:])
def do_test(weights, expected_warnings, verbose, nr_dead=None, perc_dead=None):
def do_train():
dataset = np.ones(input_shape) # data to be fed as training
model = Sequential()
model.add(Dense(n_out, activation='relu', input_shape=input_shape_dense,
use_bias=False, weights=[weights], name='dense'))
model.compile(optimizer='sgd', loss='categorical_crossentropy')
model.fit(
dataset,
np.ones(shape_out),
batch_size=1,
epochs=1,
callbacks=[callbacks.DeadReluDetector(dataset, verbose=verbose)],
verbose=False
)
check_print(do_train, expected_warnings, nr_dead, perc_dead)
weights_1_dead = np.ones(shape_weights) # weights that correspond to NN with 1/11 neurons dead
weights_2_dead = np.ones(shape_weights) # weights that correspond to NN with 2/11 neurons dead
weights_all_dead = np.zeros(shape_weights) # weights that correspond to all neurons dead
weights_1_dead[:, 0] = 0
weights_2_dead[:, 0:2] = 0
do_test(weights_1_dead, verbose=True, expected_warnings=1, nr_dead=1, perc_dead=1. / n_out)
do_test(weights_1_dead, verbose=False, expected_warnings=0)
do_test(weights_2_dead, verbose=True, expected_warnings=1, nr_dead=2, perc_dead=2. / n_out)
# do_test(weights_all_dead, verbose=True, expected_warnings=1, nr_dead=n_out, perc_dead=1.)
def test_DeadDeadReluDetector_bias():
n_samples = 9
input_shape = (n_samples, 4) # 4 input features
shape_weights = (4, n_out)
shape_bias = (n_out, )
shape_out = (n_samples, n_out) # 11 output features
# ignore batch size
input_shape_dense = tuple(input_shape[1:])
def do_test(weights, bias, expected_warnings, verbose, nr_dead=None, perc_dead=None):
def do_train():
dataset = np.ones(input_shape) # data to be fed as training
model = Sequential()
model.add(Dense(n_out, activation='relu', input_shape=input_shape_dense,
use_bias=True, weights=[weights, bias], name='dense'))
model.compile(optimizer='sgd', loss='categorical_crossentropy')
model.fit(
dataset,
np.ones(shape_out),
batch_size=1,
epochs=1,
callbacks=[callbacks.DeadReluDetector(dataset, verbose=verbose)],
verbose=False
)
check_print(do_train, expected_warnings, nr_dead, perc_dead)
weights_1_dead = np.ones(shape_weights) # weights that correspond to NN with 1/11 neurons dead
weights_2_dead = np.ones(shape_weights) # weights that correspond to NN with 2/11 neurons dead
weights_all_dead = np.zeros(shape_weights) # weights that correspond to all neurons dead
weights_1_dead[:, 0] = 0
weights_2_dead[:, 0:2] = 0
bias = np.zeros(shape_bias)
do_test(weights_1_dead, bias, verbose=True, expected_warnings=1, nr_dead=1, perc_dead=1. / n_out)
do_test(weights_1_dead, bias, verbose=False, expected_warnings=0)
do_test(weights_2_dead, bias, verbose=True, expected_warnings=1, nr_dead=2, perc_dead=2. / n_out)
# do_test(weights_all_dead, bias, verbose=True, expected_warnings=1, nr_dead=n_out, perc_dead=1.)
def test_DeadDeadReluDetector_conv():
n_samples = 9
# (5, 5) kernel, 4 input featuremaps and 11 output featuremaps
if K.image_data_format() == 'channels_last':
input_shape = (n_samples, 5, 5, 4)
else:
input_shape = (n_samples, 4, 5, 5)
# ignore batch size
input_shape_conv = tuple(input_shape[1:])
shape_weights = (5, 5, 4, n_out)
shape_out = (n_samples, n_out)
def do_test(weights_bias, expected_warnings, verbose, nr_dead=None, perc_dead=None):
"""
:param perc_dead: as float, 10% should be written as 0.1
"""
def do_train():
dataset = np.ones(input_shape) # data to be fed as training
model = Sequential()
model.add(Conv2D(n_out, (5, 5), activation='relu', input_shape=input_shape_conv,
use_bias=True, weights=weights_bias, name='conv'))
model.add(Flatten()) # to handle Theano's categorical crossentropy
model.compile(optimizer='sgd', loss='categorical_crossentropy')
model.fit(
dataset,
np.ones(shape_out),
batch_size=1,
epochs=1,
callbacks=[callbacks.DeadReluDetector(dataset, verbose=verbose)],
verbose=False
)
check_print(do_train, expected_warnings, nr_dead, perc_dead)
weights_1_dead = np.ones(shape_weights) # weights that correspond to NN with 1/11 neurons dead
weights_1_dead[..., 0] = 0
weights_2_dead = np.ones(shape_weights) # weights that correspond to NN with 2/11 neurons dead
weights_2_dead[..., 0:2] = 0
weights_all_dead = np.zeros(shape_weights) # weights that correspond to NN with all neurons dead
bias = np.zeros((11, ))
weights_bias_1_dead = [weights_1_dead, bias]
weights_bias_2_dead = [weights_2_dead, bias]
weights_bias_all_dead = [weights_all_dead, bias]
do_test(weights_bias_1_dead, verbose=True, expected_warnings=1, nr_dead=1, perc_dead=1. / n_out)
do_test(weights_bias_1_dead, verbose=False, expected_warnings=0)
do_test(weights_bias_2_dead, verbose=True, expected_warnings=1, nr_dead=2, perc_dead=2. / n_out)
# do_test(weights_bias_all_dead, verbose=True, expected_warnings=1, nr_dead=n_out, perc_dead=1.)
def test_DeadDeadReluDetector_activation():
"""
Tests that using "Activation" layer does not throw error
"""
input_data = Input(shape=(1,))
output_data = Activation('relu')(input_data)
model = Model(input_data, output_data)
model.compile(optimizer='adadelta', loss='binary_crossentropy')
model.fit(
np.array([[1]]),
np.array([[1]]),
epochs=1,
validation_data=(np.array([[1]]), np.array([[1]])),
callbacks=[callbacks.DeadReluDetector(np.array([[1]]))]
)
if __name__ == '__main__':
pytest.main([__file__])
| [
"StringIO.StringIO",
"keras.layers.Conv2D",
"keras.backend.image_data_format",
"numpy.ones",
"keras.layers.Flatten",
"keras_contrib.callbacks.DeadReluDetector",
"pytest.main",
"keras.models.Sequential",
"numpy.array",
"numpy.zeros",
"keras.layers.Input",
"keras.models.Model",
"keras.layers.A... | [((705, 715), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (713, 715), False, 'from StringIO import StringIO\n'), ((2600, 2622), 'numpy.ones', 'np.ones', (['shape_weights'], {}), '(shape_weights)\n', (2607, 2622), True, 'import numpy as np\n'), ((2700, 2722), 'numpy.ones', 'np.ones', (['shape_weights'], {}), '(shape_weights)\n', (2707, 2722), True, 'import numpy as np\n'), ((2802, 2825), 'numpy.zeros', 'np.zeros', (['shape_weights'], {}), '(shape_weights)\n', (2810, 2825), True, 'import numpy as np\n'), ((4410, 4432), 'numpy.ones', 'np.ones', (['shape_weights'], {}), '(shape_weights)\n', (4417, 4432), True, 'import numpy as np\n'), ((4510, 4532), 'numpy.ones', 'np.ones', (['shape_weights'], {}), '(shape_weights)\n', (4517, 4532), True, 'import numpy as np\n'), ((4612, 4635), 'numpy.zeros', 'np.zeros', (['shape_weights'], {}), '(shape_weights)\n', (4620, 4635), True, 'import numpy as np\n'), ((4756, 4776), 'numpy.zeros', 'np.zeros', (['shape_bias'], {}), '(shape_bias)\n', (4764, 4776), True, 'import numpy as np\n'), ((6565, 6587), 'numpy.ones', 'np.ones', (['shape_weights'], {}), '(shape_weights)\n', (6572, 6587), True, 'import numpy as np\n'), ((6700, 6722), 'numpy.ones', 'np.ones', (['shape_weights'], {}), '(shape_weights)\n', (6707, 6722), True, 'import numpy as np\n'), ((6837, 6860), 'numpy.zeros', 'np.zeros', (['shape_weights'], {}), '(shape_weights)\n', (6845, 6860), True, 'import numpy as np\n'), ((6930, 6945), 'numpy.zeros', 'np.zeros', (['(11,)'], {}), '((11,))\n', (6938, 6945), True, 'import numpy as np\n'), ((7612, 7629), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (7617, 7629), False, 'from keras.layers import Input, Dense, Conv2D, Flatten, Activation\n'), ((7691, 7721), 'keras.models.Model', 'Model', (['input_data', 'output_data'], {}), '(input_data, output_data)\n', (7696, 7721), False, 'from keras.models import Sequential, Model\n'), ((8036, 8059), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (8047, 8059), False, 'import pytest\n'), ((5287, 5308), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (5306, 5308), True, 'from keras import backend as K\n'), ((7648, 7666), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7658, 7666), False, 'from keras.layers import Input, Dense, Conv2D, Flatten, Activation\n'), ((7813, 7828), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (7821, 7828), True, 'import numpy as np\n'), ((7838, 7853), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (7846, 7853), True, 'import numpy as np\n'), ((1917, 1937), 'numpy.ones', 'np.ones', (['input_shape'], {}), '(input_shape)\n', (1924, 1937), True, 'import numpy as np\n'), ((1990, 2002), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2000, 2002), False, 'from keras.models import Sequential, Model\n'), ((3724, 3744), 'numpy.ones', 'np.ones', (['input_shape'], {}), '(input_shape)\n', (3731, 3744), True, 'import numpy as np\n'), ((3795, 3807), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3805, 3807), False, 'from keras.models import Sequential, Model\n'), ((5794, 5814), 'numpy.ones', 'np.ones', (['input_shape'], {}), '(input_shape)\n', (5801, 5814), True, 'import numpy as np\n'), ((5865, 5877), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5875, 5877), False, 'from keras.models import Sequential, Model\n'), ((2025, 2141), 'keras.layers.Dense', 'Dense', (['n_out'], {'activation': '"""relu"""', 'input_shape': 'input_shape_dense', 'use_bias': '(False)', 'weights': '[weights]', 'name': '"""dense"""'}), "(n_out, activation='relu', input_shape=input_shape_dense, use_bias=\n False, weights=[weights], name='dense')\n", (2030, 2141), False, 'from keras.layers import Input, Dense, Conv2D, Flatten, Activation\n'), ((2306, 2324), 'numpy.ones', 'np.ones', (['shape_out'], {}), '(shape_out)\n', (2313, 2324), True, 'import numpy as np\n'), ((3830, 3951), 'keras.layers.Dense', 'Dense', (['n_out'], {'activation': '"""relu"""', 'input_shape': 'input_shape_dense', 'use_bias': '(True)', 'weights': '[weights, bias]', 'name': '"""dense"""'}), "(n_out, activation='relu', input_shape=input_shape_dense, use_bias=\n True, weights=[weights, bias], name='dense')\n", (3835, 3951), False, 'from keras.layers import Input, Dense, Conv2D, Flatten, Activation\n'), ((4116, 4134), 'numpy.ones', 'np.ones', (['shape_out'], {}), '(shape_out)\n', (4123, 4134), True, 'import numpy as np\n'), ((5900, 6024), 'keras.layers.Conv2D', 'Conv2D', (['n_out', '(5, 5)'], {'activation': '"""relu"""', 'input_shape': 'input_shape_conv', 'use_bias': '(True)', 'weights': 'weights_bias', 'name': '"""conv"""'}), "(n_out, (5, 5), activation='relu', input_shape=input_shape_conv,\n use_bias=True, weights=weights_bias, name='conv')\n", (5906, 6024), False, 'from keras.layers import Input, Dense, Conv2D, Flatten, Activation\n'), ((6073, 6082), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6080, 6082), False, 'from keras.layers import Input, Dense, Conv2D, Flatten, Activation\n'), ((6271, 6289), 'numpy.ones', 'np.ones', (['shape_out'], {}), '(shape_out)\n', (6278, 6289), True, 'import numpy as np\n'), ((7898, 7913), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (7906, 7913), True, 'import numpy as np\n'), ((7915, 7930), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (7923, 7930), True, 'import numpy as np\n'), ((7979, 7994), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (7987, 7994), True, 'import numpy as np\n'), ((2409, 2461), 'keras_contrib.callbacks.DeadReluDetector', 'callbacks.DeadReluDetector', (['dataset'], {'verbose': 'verbose'}), '(dataset, verbose=verbose)\n', (2435, 2461), False, 'from keras_contrib import callbacks\n'), ((4219, 4271), 'keras_contrib.callbacks.DeadReluDetector', 'callbacks.DeadReluDetector', (['dataset'], {'verbose': 'verbose'}), '(dataset, verbose=verbose)\n', (4245, 4271), False, 'from keras_contrib import callbacks\n'), ((6374, 6426), 'keras_contrib.callbacks.DeadReluDetector', 'callbacks.DeadReluDetector', (['dataset'], {'verbose': 'verbose'}), '(dataset, verbose=verbose)\n', (6400, 6426), False, 'from keras_contrib import callbacks\n')] |
# -*- coding: utf-8 -*-
"""Unit tests for classifier base class functionality."""
__author__ = ["mloning", "fkiraly", "TonyBagnall", "MatthewMiddlehurst"]
import numpy as np
import pandas as pd
import pytest
from sktime.classification.base import (
BaseClassifier,
_check_classifier_input,
_internal_convert,
)
from sktime.classification.feature_based import Catch22Classifier
from sktime.utils._testing.panel import _make_classification_y, _make_panel
class _DummyClassifier(BaseClassifier):
"""Dummy classifier for testing base class fit/predict/predict_proba."""
def _fit(self, X, y):
"""Fit dummy."""
return self
def _predict(self, X):
"""Predict dummy."""
return self
def _predict_proba(self, X):
"""Predict proba dummy."""
return self
class _DummyComposite(_DummyClassifier):
"""Dummy classifier for testing base class fit/predict/predict_proba."""
def __init__(self, foo):
self.foo = foo
class _DummyHandlesAllInput(BaseClassifier):
"""Dummy classifier for testing base class fit/predict/predict_proba."""
_tags = {
"capability:multivariate": True,
"capability:unequal_length": True,
"capability:missing_values": True,
}
def _fit(self, X, y):
"""Fit dummy."""
return self
def _predict(self, X):
"""Predict dummy."""
return self
def _predict_proba(self, X):
"""Predict proba dummy."""
return self
class _DummyConvertPandas(BaseClassifier):
"""Dummy classifier for testing base class fit/predict/predict_proba."""
_tags = {
"X_inner_mtype": "nested_univ", # which type do _fit/_predict, support for X?
}
def _fit(self, X, y):
"""Fit dummy."""
return self
def _predict(self, X):
"""Predict dummy."""
return self
def _predict_proba(self, X):
"""Predict proba dummy."""
return self
multivariate_message = r"multivariate series"
missing_message = r"missing values"
unequal_message = r"unequal length series"
incorrect_X_data_structure = r"must be a np.array or a pd.Series"
incorrect_y_data_structure = r"must be 1-dimensional"
def test_base_classifier_fit():
"""Test function for the BaseClassifier class fit.
Test fit. It should:
1. Work with 2D, 3D and DataFrame for X and nparray for y.
2. Calculate the number of classes and record the fit time.
3. have self.n_jobs set or throw an exception if the classifier can
multithread.
4. Set the class dictionary correctly.
5. Set is_fitted after a call to _fit.
6. Return self.
"""
dummy = _DummyClassifier()
cases = 5
length = 10
test_X1 = np.random.uniform(-1, 1, size=(cases, length))
test_X2 = np.random.uniform(-1, 1, size=(cases, 2, length))
test_X3 = _create_example_dataframe(cases=cases, dimensions=1, length=length)
test_X4 = _create_example_dataframe(cases=cases, dimensions=3, length=length)
test_y1 = np.random.randint(0, 2, size=(cases))
result = dummy.fit(test_X1, test_y1)
assert result is dummy
with pytest.raises(ValueError, match=multivariate_message):
result = dummy.fit(test_X2, test_y1)
assert result is dummy
result = dummy.fit(test_X3, test_y1)
assert result is dummy
with pytest.raises(ValueError, match=multivariate_message):
result = dummy.fit(test_X4, test_y1)
assert result is dummy
# Raise a specific error if y is in a 2D matrix (1,cases)
test_y2 = np.array([test_y1])
# What if y is in a 2D matrix (cases,1)?
test_y2 = np.array([test_y1]).transpose()
with pytest.raises(ValueError, match=incorrect_y_data_structure):
result = dummy.fit(test_X1, test_y2)
# Pass a data fram
with pytest.raises(ValueError, match=incorrect_X_data_structure):
result = dummy.fit(test_X1, test_X3)
TF = [True, False]
@pytest.mark.parametrize("missing", TF)
@pytest.mark.parametrize("multivariate", TF)
@pytest.mark.parametrize("unequal", TF)
def test_check_capabilities(missing, multivariate, unequal):
"""Test the checking of capabilities.
There are eight different combinations to be tested with a classifier that can
handle it and that cannot. Obvs could loop, but I think its clearer to just
explicitly test;
"""
handles_none = _DummyClassifier()
handles_none_composite = _DummyComposite(_DummyClassifier())
# checks that errors are raised
if missing:
with pytest.raises(ValueError, match=missing_message):
handles_none._check_capabilities(missing, multivariate, unequal)
if multivariate:
with pytest.raises(ValueError, match=multivariate_message):
handles_none._check_capabilities(missing, multivariate, unequal)
if unequal:
with pytest.raises(ValueError, match=unequal_message):
handles_none._check_capabilities(missing, multivariate, unequal)
if not missing and not multivariate and not unequal:
handles_none._check_capabilities(missing, multivariate, unequal)
if missing:
with pytest.warns(UserWarning, match=missing_message):
handles_none_composite._check_capabilities(missing, multivariate, unequal)
if multivariate:
with pytest.warns(UserWarning, match=multivariate_message):
handles_none_composite._check_capabilities(missing, multivariate, unequal)
if unequal:
with pytest.warns(UserWarning, match=unequal_message):
handles_none_composite._check_capabilities(missing, multivariate, unequal)
if not missing and not multivariate and not unequal:
handles_none_composite._check_capabilities(missing, multivariate, unequal)
handles_all = _DummyHandlesAllInput()
handles_all._check_capabilities(missing, multivariate, unequal)
def test_convert_input():
"""Test the conversions from dataframe to numpy.
1. Pass a 2D numpy X, get a 3D numpy X
2. Pass a 3D numpy X, get a 3D numpy X
3. Pass a pandas numpy X, equal length, get a 3D numpy X
4. Pass a pd.Series y, get a pd.Series back
5. Pass a np.ndarray y, get a pd.Series back
"""
cases = 5
length = 10
test_X1 = np.random.uniform(-1, 1, size=(cases, length))
test_X2 = np.random.uniform(-1, 1, size=(cases, 2, length))
tester = _DummyClassifier()
tempX = tester._convert_X(test_X2)
assert tempX.shape[0] == cases and tempX.shape[1] == 2 and tempX.shape[2] == length
instance_list = []
for _ in range(0, cases):
instance_list.append(pd.Series(np.random.randn(10)))
test_X3 = _create_example_dataframe(cases=cases, dimensions=1, length=length)
test_X4 = _create_example_dataframe(cases=cases, dimensions=3, length=length)
tempX = tester._convert_X(test_X3)
assert tempX.shape[0] == cases and tempX.shape[1] == 1 and tempX.shape[2] == length
tempX = tester._convert_X(test_X4)
assert tempX.shape[0] == cases and tempX.shape[1] == 3 and tempX.shape[2] == length
tester = _DummyConvertPandas()
tempX = tester._convert_X(test_X2)
assert isinstance(tempX, pd.DataFrame)
assert tempX.shape[0] == cases
assert tempX.shape[1] == 2
test_y1 = np.random.randint(0, 1, size=(cases))
test_y1 = pd.Series(test_y1)
tempX, tempY = _internal_convert(test_X1, test_y1)
assert isinstance(tempY, np.ndarray)
assert isinstance(tempX, np.ndarray)
assert tempX.ndim == 3
def test__check_classifier_input():
"""Test for valid estimator format.
1. Test correct: X: np.array of 2 and 3 dimensions vs y:np.array and np.Series
2. Test correct: X: pd.DataFrame with 1 and 3 cols vs y:np.array and np.Series
3. Test incorrect: X with fewer cases than y
4. Test incorrect: y as a list
5. Test incorrect: too few cases or too short a series
"""
# 1. Test correct: X: np.array of 2 and 3 dimensions vs y:np.array and np.Series
test_X1 = np.random.uniform(-1, 1, size=(5, 10))
test_X2 = np.random.uniform(-1, 1, size=(5, 2, 10))
test_y1 = np.random.randint(0, 1, size=5)
test_y2 = pd.Series(np.random.randn(5))
_check_classifier_input(test_X2)
_check_classifier_input(test_X2, test_y1)
_check_classifier_input(test_X2, test_y2)
# 2. Test correct: X: pd.DataFrame with 1 (univariate) and 3 cols(multivariate) vs
# y:np.array and np.Series
test_X3 = _create_nested_dataframe(5, 1, 10)
test_X4 = _create_nested_dataframe(5, 3, 10)
_check_classifier_input(test_X3, test_y1)
_check_classifier_input(test_X4, test_y1)
_check_classifier_input(test_X3, test_y2)
_check_classifier_input(test_X4, test_y2)
# 3. Test incorrect: X with fewer cases than y
test_X5 = np.random.uniform(-1, 1, size=(3, 4, 10))
with pytest.raises(ValueError, match=r".*Mismatch in number of cases*."):
_check_classifier_input(test_X5, test_y1)
# 4. Test incorrect data type: y is a List
test_y3 = [1, 2, 3, 4, 5]
with pytest.raises(
TypeError, match=r".*X is not of a supported input data " r"type.*"
):
_check_classifier_input(test_X1, test_y3)
# 5. Test incorrect: too few cases or too short a series
with pytest.raises(ValueError, match=r".*Minimum number of cases required*."):
_check_classifier_input(test_X2, test_y1, enforce_min_instances=6)
def _create_example_dataframe(cases=5, dimensions=1, length=10):
"""Create a simple data frame set of time series (X) for testing."""
test_X = pd.DataFrame(dtype=np.float32)
for i in range(0, dimensions):
instance_list = []
for _ in range(0, cases):
instance_list.append(pd.Series(np.random.randn(length)))
test_X["dimension_" + str(i)] = instance_list
return test_X
def _create_nested_dataframe(cases=5, dimensions=1, length=10):
testy = pd.DataFrame(dtype=np.float32)
for i in range(0, dimensions):
instance_list = []
for _ in range(0, cases):
instance_list.append(pd.Series(np.random.randn(length)))
testy["dimension_" + str(i + 1)] = instance_list
return testy
def _create_unequal_length_nested_dataframe(cases=5, dimensions=1, length=10):
testy = pd.DataFrame(dtype=np.float32)
for i in range(0, dimensions):
instance_list = []
for _ in range(0, cases - 1):
instance_list.append(pd.Series(np.random.randn(length)))
instance_list.append(pd.Series(np.random.randn(length - 1)))
testy["dimension_" + str(i + 1)] = instance_list
return testy
MTYPES = ["numpy3D", "pd-multiindex", "df-list", "numpyflat", "nested_univ"]
@pytest.mark.parametrize("mtype", MTYPES)
def test_input_conversion_fit_predict(mtype):
"""Test that base class lets all Panel mtypes through."""
y = _make_classification_y()
X = _make_panel(return_mtype=mtype)
clf = Catch22Classifier()
clf.fit(X, y)
clf.predict(X)
clf = _DummyConvertPandas()
clf.fit(X, y)
clf.predict(X)
| [
"pandas.Series",
"sktime.classification.feature_based.Catch22Classifier",
"pandas.DataFrame",
"numpy.array",
"pytest.mark.parametrize",
"numpy.random.randint",
"sktime.utils._testing.panel._make_classification_y",
"pytest.raises",
"numpy.random.uniform",
"sktime.classification.base._internal_conve... | [((3943, 3981), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""missing"""', 'TF'], {}), "('missing', TF)\n", (3966, 3981), False, 'import pytest\n'), ((3983, 4026), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""multivariate"""', 'TF'], {}), "('multivariate', TF)\n", (4006, 4026), False, 'import pytest\n'), ((4028, 4066), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""unequal"""', 'TF'], {}), "('unequal', TF)\n", (4051, 4066), False, 'import pytest\n'), ((10668, 10708), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mtype"""', 'MTYPES'], {}), "('mtype', MTYPES)\n", (10691, 10708), False, 'import pytest\n'), ((2744, 2790), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(cases, length)'}), '(-1, 1, size=(cases, length))\n', (2761, 2790), True, 'import numpy as np\n'), ((2805, 2854), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(cases, 2, length)'}), '(-1, 1, size=(cases, 2, length))\n', (2822, 2854), True, 'import numpy as np\n'), ((3033, 3068), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'cases'}), '(0, 2, size=cases)\n', (3050, 3068), True, 'import numpy as np\n'), ((3555, 3574), 'numpy.array', 'np.array', (['[test_y1]'], {}), '([test_y1])\n', (3563, 3574), True, 'import numpy as np\n'), ((6249, 6295), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(cases, length)'}), '(-1, 1, size=(cases, length))\n', (6266, 6295), True, 'import numpy as np\n'), ((6310, 6359), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(cases, 2, length)'}), '(-1, 1, size=(cases, 2, length))\n', (6327, 6359), True, 'import numpy as np\n'), ((7248, 7283), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': 'cases'}), '(0, 1, size=cases)\n', (7265, 7283), True, 'import numpy as np\n'), ((7300, 7318), 'pandas.Series', 'pd.Series', (['test_y1'], {}), '(test_y1)\n', (7309, 7318), True, 'import pandas as pd\n'), ((7338, 7373), 'sktime.classification.base._internal_convert', '_internal_convert', (['test_X1', 'test_y1'], {}), '(test_X1, test_y1)\n', (7355, 7373), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((7978, 8016), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(5, 10)'}), '(-1, 1, size=(5, 10))\n', (7995, 8016), True, 'import numpy as np\n'), ((8031, 8072), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(5, 2, 10)'}), '(-1, 1, size=(5, 2, 10))\n', (8048, 8072), True, 'import numpy as np\n'), ((8087, 8118), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': '(5)'}), '(0, 1, size=5)\n', (8104, 8118), True, 'import numpy as np\n'), ((8167, 8199), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X2'], {}), '(test_X2)\n', (8190, 8199), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((8204, 8245), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X2', 'test_y1'], {}), '(test_X2, test_y1)\n', (8227, 8245), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((8250, 8291), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X2', 'test_y2'], {}), '(test_X2, test_y2)\n', (8273, 8291), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((8512, 8553), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X3', 'test_y1'], {}), '(test_X3, test_y1)\n', (8535, 8553), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((8558, 8599), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X4', 'test_y1'], {}), '(test_X4, test_y1)\n', (8581, 8599), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((8604, 8645), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X3', 'test_y2'], {}), '(test_X3, test_y2)\n', (8627, 8645), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((8650, 8691), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X4', 'test_y2'], {}), '(test_X4, test_y2)\n', (8673, 8691), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((8757, 8798), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3, 4, 10)'}), '(-1, 1, size=(3, 4, 10))\n', (8774, 8798), True, 'import numpy as np\n'), ((9533, 9563), 'pandas.DataFrame', 'pd.DataFrame', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (9545, 9563), True, 'import pandas as pd\n'), ((9879, 9909), 'pandas.DataFrame', 'pd.DataFrame', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (9891, 9909), True, 'import pandas as pd\n'), ((10242, 10272), 'pandas.DataFrame', 'pd.DataFrame', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (10254, 10272), True, 'import pandas as pd\n'), ((10825, 10849), 'sktime.utils._testing.panel._make_classification_y', '_make_classification_y', ([], {}), '()\n', (10847, 10849), False, 'from sktime.utils._testing.panel import _make_classification_y, _make_panel\n'), ((10858, 10889), 'sktime.utils._testing.panel._make_panel', '_make_panel', ([], {'return_mtype': 'mtype'}), '(return_mtype=mtype)\n', (10869, 10889), False, 'from sktime.utils._testing.panel import _make_classification_y, _make_panel\n'), ((10901, 10920), 'sktime.classification.feature_based.Catch22Classifier', 'Catch22Classifier', ([], {}), '()\n', (10918, 10920), False, 'from sktime.classification.feature_based import Catch22Classifier\n'), ((3148, 3201), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'multivariate_message'}), '(ValueError, match=multivariate_message)\n', (3161, 3201), False, 'import pytest\n'), ((3352, 3405), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'multivariate_message'}), '(ValueError, match=multivariate_message)\n', (3365, 3405), False, 'import pytest\n'), ((3675, 3734), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'incorrect_y_data_structure'}), '(ValueError, match=incorrect_y_data_structure)\n', (3688, 3734), False, 'import pytest\n'), ((3813, 3872), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'incorrect_X_data_structure'}), '(ValueError, match=incorrect_X_data_structure)\n', (3826, 3872), False, 'import pytest\n'), ((8143, 8161), 'numpy.random.randn', 'np.random.randn', (['(5)'], {}), '(5)\n', (8158, 8161), True, 'import numpy as np\n'), ((8808, 8874), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*Mismatch in number of cases*."""'}), "(ValueError, match='.*Mismatch in number of cases*.')\n", (8821, 8874), False, 'import pytest\n'), ((8885, 8926), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X5', 'test_y1'], {}), '(test_X5, test_y1)\n', (8908, 8926), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((9013, 9090), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '""".*X is not of a supported input data type.*"""'}), "(TypeError, match='.*X is not of a supported input data type.*')\n", (9026, 9090), False, 'import pytest\n'), ((9119, 9160), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X1', 'test_y3'], {}), '(test_X1, test_y3)\n', (9142, 9160), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((9231, 9302), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*Minimum number of cases required*."""'}), "(ValueError, match='.*Minimum number of cases required*.')\n", (9244, 9302), False, 'import pytest\n'), ((9313, 9379), 'sktime.classification.base._check_classifier_input', '_check_classifier_input', (['test_X2', 'test_y1'], {'enforce_min_instances': '(6)'}), '(test_X2, test_y1, enforce_min_instances=6)\n', (9336, 9379), False, 'from sktime.classification.base import BaseClassifier, _check_classifier_input, _internal_convert\n'), ((3634, 3653), 'numpy.array', 'np.array', (['[test_y1]'], {}), '([test_y1])\n', (3642, 3653), True, 'import numpy as np\n'), ((4532, 4580), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'missing_message'}), '(ValueError, match=missing_message)\n', (4545, 4580), False, 'import pytest\n'), ((4693, 4746), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'multivariate_message'}), '(ValueError, match=multivariate_message)\n', (4706, 4746), False, 'import pytest\n'), ((4854, 4902), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'unequal_message'}), '(ValueError, match=unequal_message)\n', (4867, 4902), False, 'import pytest\n'), ((5141, 5189), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': 'missing_message'}), '(UserWarning, match=missing_message)\n', (5153, 5189), False, 'import pytest\n'), ((5312, 5365), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': 'multivariate_message'}), '(UserWarning, match=multivariate_message)\n', (5324, 5365), False, 'import pytest\n'), ((5483, 5531), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': 'unequal_message'}), '(UserWarning, match=unequal_message)\n', (5495, 5531), False, 'import pytest\n'), ((6611, 6630), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (6626, 6630), True, 'import numpy as np\n'), ((10481, 10508), 'numpy.random.randn', 'np.random.randn', (['(length - 1)'], {}), '(length - 1)\n', (10496, 10508), True, 'import numpy as np\n'), ((9703, 9726), 'numpy.random.randn', 'np.random.randn', (['length'], {}), '(length)\n', (9718, 9726), True, 'import numpy as np\n'), ((10049, 10072), 'numpy.random.randn', 'np.random.randn', (['length'], {}), '(length)\n', (10064, 10072), True, 'import numpy as np\n'), ((10416, 10439), 'numpy.random.randn', 'np.random.randn', (['length'], {}), '(length)\n', (10431, 10439), True, 'import numpy as np\n')] |
import subprocess
import os
import numpy as np
def main():
header_lines = ['#!/bin/bash']
out_file = '#SBATCH --output=wolff-{0:0.1f}-{1:0.1f}.out'
job_name = '#SBATCH --job-name="{0:0.1f}-{1:0.1f}"'
script_file = 'wolff-{0:0.1f}-{1:0.1f}.sh'
run_command = './wolff {0} {1} {2} {3}'
filename = 'wolff-{0:0.1f}-{1:0.1f}.txt'
num_scripts = 12
num_T = 125
Ts = np.linspace(0.01, 5, num_T)
scripts = []
#print(Ts)
for i in range(num_scripts):
low_idx = i * num_T / num_scripts
if i != 0:
low_idx += 1
T_low = Ts[low_idx]
high_idx = (i + 1) * num_T / num_scripts
if i == num_scripts - 1:
high_idx = -1
T_high = Ts[high_idx]
#print(T_low, T_high)
script_lines = [l for l in header_lines]
script_lines.append(out_file.format(T_low, T_high))
script_lines.append(job_name.format(T_low, T_high))
if high_idx != -1:
script_lines.append(run_command.format(T_low, T_high, len(Ts[low_idx:high_idx + 1]), filename.format(T_low, T_high)))
else:
script_lines.append(run_command.format(T_low, T_high, len(Ts[low_idx:]), filename.format(T_low, T_high)))
scripts.append(script_file.format(T_low, T_high))
out = open(scripts[i], 'w')
for l in script_lines:
#print(l)
out.write(l + '\n')
out.close()
#print(scripts)
procs = []
for script in scripts:
procs.append(subprocess.Popen(['sbatch', script]))
if __name__ == "__main__":
main()
| [
"subprocess.Popen",
"numpy.linspace"
] | [((401, 428), 'numpy.linspace', 'np.linspace', (['(0.01)', '(5)', 'num_T'], {}), '(0.01, 5, num_T)\n', (412, 428), True, 'import numpy as np\n'), ((1547, 1583), 'subprocess.Popen', 'subprocess.Popen', (["['sbatch', script]"], {}), "(['sbatch', script])\n", (1563, 1583), False, 'import subprocess\n')] |
import os
import torch
import argparse
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchviz import make_dot
import torch.nn.functional as F
from timeit import default_timer as timer
from utils import load_data, DEVICE, human_time
class Net(nn.Module):
def __init__(self, gpu=False):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 18, 5) # 18 * 32 * 116
self.pool1 = nn.MaxPool2d(2) # 18 * 16 * 58
self.conv2 = nn.Conv2d(18, 48, 5) # 48 * 12 * 54
self.pool2 = nn.MaxPool2d(2) # 48 * 6 * 27
self.drop = nn.Dropout(0.5)
self.fc1 = nn.Linear(48 * 6 * 27, 360)
self.fc2 = nn.Linear(360, 19 * 4)
if gpu:
self.to(DEVICE)
if str(DEVICE) == 'cpu':
self.device = 'cpu'
else:
self.device = torch.cuda.get_device_name(0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 48 * 6 * 27)
x = self.drop(x)
x = F.relu(self.fc1(x))
x = self.fc2(x).view(-1, 4, 19)
x = F.softmax(x, dim=2)
x = x.view(-1, 4 * 19)
return x
def save(self, name, folder='./models'):
if not os.path.exists(folder):
os.makedirs(folder)
path = os.path.join(folder, name)
torch.save(self.state_dict(), path)
def load(self, name, folder='./models'):
path = os.path.join(folder, name)
map_location = 'cpu' if self.device == 'cpu' else 'gpu'
static_dict = torch.load(path, map_location)
self.load_state_dict(static_dict)
self.eval()
def graph(self):
x = torch.rand(1, 3, 36, 120)
y = self(x)
return make_dot(y, params=dict(self.named_parameters()))
def loss_batch(model, loss_func, data, opt=None):
xb, yb = data['image'], data['label']
batch_size = len(xb)
out = model(xb)
loss = loss_func(out, yb)
single_correct, whole_correct = 0, 0
if opt is not None:
opt.zero_grad()
loss.backward()
opt.step()
else: # calc accuracy
yb = yb.view(-1, 4, 19)
out_matrix = out.view(-1, 4, 19)
_, ans = torch.max(yb, 2)
_, predicted = torch.max(out_matrix, 2)
compare = (predicted == ans)
single_correct = compare.sum().item()
for i in range(batch_size):
if compare[i].sum().item() == 4:
whole_correct += 1
del out_matrix
loss_item = loss.item()
del out
del loss
return loss_item, single_correct, whole_correct, batch_size
def fit(epochs, model, loss_func, opt, train_dl, valid_dl, verbose=None):
max_acc = 0
patience_limit = 5
patience = 0
for epoch in range(epochs):
patience += 1
running_loss = 0.0
total_nums = 0
model.train()
for i, data in enumerate(train_dl):
loss, _, _, s = loss_batch(model, loss_func, data, opt)
if isinstance(verbose, int):
running_loss += loss * s
total_nums += s
if i % verbose == verbose - 1:
ave_loss = running_loss / total_nums
print('[Epoch {}][Batch {}] got training loss: {:.6f}'
.format(epoch + 1, i + 1, ave_loss))
total_nums = 0
running_loss = 0.0
model.eval() # validate model, working for drop out layer.
with torch.no_grad():
losses, single, whole, batch_size = zip(
*[loss_batch(model, loss_func, data) for data in valid_dl]
)
total_size = np.sum(batch_size)
val_loss = np.sum(np.multiply(losses, batch_size)) / total_size
single_rate = 100 * np.sum(single) / (total_size * 4)
whole_rate = 100 * np.sum(whole) / total_size
if single_rate > max_acc:
patience = 0
max_acc = single_rate
model.save('pretrained')
print('After epoch {}: \n'
'\tLoss: {:.6f}\n'
'\tSingle Acc: {:.2f}%\n'
'\tWhole Acc: {:.2f}%'
.format(epoch + 1, val_loss, single_rate, whole_rate))
if patience > patience_limit:
print('Early stop at epoch {}'.format(epoch + 1))
break
def train(use_gpu=True, epochs=40, verbose=500):
train_dl, valid_dl = load_data(batch_size=4, split_rate=0.2, gpu=use_gpu)
model = Net(use_gpu)
opt = optim.Adadelta(model.parameters())
criterion = nn.BCELoss()
start = timer()
fit(epochs, model, criterion, opt, train_dl, valid_dl, verbose)
end = timer()
t = human_time(start, end)
print('Total training time using {}: {}'.format(model.device, t))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Neural Network to break captchas')
parser.add_argument('--epochs', help='Number of epochs to be executed',
type=int)
parser.add_argument('--verbose', help='Show data at each N value',
type=int)
parser.add_argument('--gpu', dest='gpu', action='store_true')
parser.add_argument('--gpu-false', help='forces model to run on CPU',
action='store_false', dest='gpu')
parser.set_defaults(gpu=True)
args = parser.parse_args()
train(use_gpu=args.gpu, epochs=args.epochs, verbose=args.verbose)
| [
"torch.nn.Dropout",
"utils.load_data",
"torch.max",
"torch.nn.functional.softmax",
"os.path.exists",
"numpy.multiply",
"argparse.ArgumentParser",
"utils.human_time",
"torch.cuda.get_device_name",
"os.makedirs",
"timeit.default_timer",
"torch.load",
"os.path.join",
"torch.nn.Conv2d",
"num... | [((4503, 4555), 'utils.load_data', 'load_data', ([], {'batch_size': '(4)', 'split_rate': '(0.2)', 'gpu': 'use_gpu'}), '(batch_size=4, split_rate=0.2, gpu=use_gpu)\n', (4512, 4555), False, 'from utils import load_data, DEVICE, human_time\n'), ((4642, 4654), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4652, 4654), True, 'import torch.nn as nn\n'), ((4667, 4674), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4672, 4674), True, 'from timeit import default_timer as timer\n'), ((4753, 4760), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4758, 4760), True, 'from timeit import default_timer as timer\n'), ((4769, 4791), 'utils.human_time', 'human_time', (['start', 'end'], {}), '(start, end)\n', (4779, 4791), False, 'from utils import load_data, DEVICE, human_time\n'), ((4904, 4975), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Neural Network to break captchas"""'}), "(description='Neural Network to break captchas')\n", (4927, 4975), False, 'import argparse\n'), ((376, 395), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(18)', '(5)'], {}), '(3, 18, 5)\n', (385, 395), True, 'import torch.nn as nn\n'), ((434, 449), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (446, 449), True, 'import torch.nn as nn\n'), ((487, 507), 'torch.nn.Conv2d', 'nn.Conv2d', (['(18)', '(48)', '(5)'], {}), '(18, 48, 5)\n', (496, 507), True, 'import torch.nn as nn\n'), ((545, 560), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (557, 560), True, 'import torch.nn as nn\n'), ((596, 611), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (606, 611), True, 'import torch.nn as nn\n'), ((631, 658), 'torch.nn.Linear', 'nn.Linear', (['(48 * 6 * 27)', '(360)'], {}), '(48 * 6 * 27, 360)\n', (640, 658), True, 'import torch.nn as nn\n'), ((678, 700), 'torch.nn.Linear', 'nn.Linear', (['(360)', '(19 * 4)'], {}), '(360, 19 * 4)\n', (687, 700), True, 'import torch.nn as nn\n'), ((1189, 1208), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(2)'}), '(x, dim=2)\n', (1198, 1208), True, 'import torch.nn.functional as F\n'), ((1389, 1415), 'os.path.join', 'os.path.join', (['folder', 'name'], {}), '(folder, name)\n', (1401, 1415), False, 'import os\n'), ((1521, 1547), 'os.path.join', 'os.path.join', (['folder', 'name'], {}), '(folder, name)\n', (1533, 1547), False, 'import os\n'), ((1634, 1664), 'torch.load', 'torch.load', (['path', 'map_location'], {}), '(path, map_location)\n', (1644, 1664), False, 'import torch\n'), ((1761, 1786), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(36)', '(120)'], {}), '(1, 3, 36, 120)\n', (1771, 1786), False, 'import torch\n'), ((2291, 2307), 'torch.max', 'torch.max', (['yb', '(2)'], {}), '(yb, 2)\n', (2300, 2307), False, 'import torch\n'), ((2331, 2355), 'torch.max', 'torch.max', (['out_matrix', '(2)'], {}), '(out_matrix, 2)\n', (2340, 2355), False, 'import torch\n'), ((3757, 3775), 'numpy.sum', 'np.sum', (['batch_size'], {}), '(batch_size)\n', (3763, 3775), True, 'import numpy as np\n'), ((1318, 1340), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1332, 1340), False, 'import os\n'), ((1354, 1373), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1365, 1373), False, 'import os\n'), ((3577, 3592), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3590, 3592), False, 'import torch\n'), ((867, 896), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (893, 896), False, 'import torch\n'), ((3802, 3833), 'numpy.multiply', 'np.multiply', (['losses', 'batch_size'], {}), '(losses, batch_size)\n', (3813, 3833), True, 'import numpy as np\n'), ((3876, 3890), 'numpy.sum', 'np.sum', (['single'], {}), '(single)\n', (3882, 3890), True, 'import numpy as np\n'), ((3937, 3950), 'numpy.sum', 'np.sum', (['whole'], {}), '(whole)\n', (3943, 3950), True, 'import numpy as np\n')] |
from pommerman.constants import Action
import numpy as np
class DataAugmentor():
"""
A class that creates new valid state transitions based on the input
transition.
"""
def __init__(self) -> None:
pass
def augment(self, obs: dict, action: Action, reward: float, nobs: dict, done: bool) -> list:
"""
Take a state transition and create one or more new ones from it.
The default implementation simply returns the same data it got
in, but wrapped in a tuple and put into a list.
:param obs: The intial state
:param action: The action chosen by the model
:param reward: The reward given for the transition
:param nobs: The new state after the action was taken
:param done: True if the episode is finished, otherwise false
:return: A list of state transitions, where each transition is
represented by a tuple
"""
return [(obs, action, reward, nobs, done)]
class DataAugmentor_v1(DataAugmentor):
"""
A class that creates new valid state transitions based on the input
transition using rotation, mirroring and point reflecting.
"""
def _init_(self) -> None:
pass
def augment(self, obs: dict, action: int, reward: float, nobs: dict, done: bool) -> list:
"""
Take a state transition and create rotated, mirrored and point reflected versions from it
:param obs: The intial state
:param action: The action chosen by the model
:param reward: The reward given for the transition
:param nobs: The new state after the action was taken
:param done: True if the episode is finished, otherwise false
:return: A list of state transitions, where each transition is
represented by a tuple
"""
transitions = []
# rotate transitions by 90, 180 and 270 degrees counterclockwise
obs_90 = self.rotate_obs(obs)
nobs_90 = self.rotate_obs(nobs)
action_90 = self.rotate_action(action)
obs_180 = self.rotate_obs(obs_90)
nobs_180 = self.rotate_obs(nobs_90)
action_180 = self.rotate_action(action_90)
obs_270 = self.rotate_obs(obs_180)
nobs_270 = self.rotate_obs(nobs_180)
action_270 = self.rotate_action(action_180)
transitions.append((obs_90, action_90, reward, nobs_90, done))
transitions.append((obs_180, action_180, reward, nobs_180, done))
transitions.append((obs_270, action_270, reward, nobs_270, done))
# mirror transitions horizontally and vertically
obs_mirrored_hor = self.mirror_horizontal_obs(obs)
nobs_mirrored_hor = self.mirror_horizontal_obs(nobs)
action_mirrored_hor = self.mirror_horizontal_action(action)
obs_mirrored_ver = self.mirror_vertical_obs(obs)
nobs_mirrored_ver = self.mirror_vertical_obs(nobs)
action_mirrored_ver = self.mirror_vertical_action(action)
transitions.append((obs_mirrored_hor, action_mirrored_hor, reward, nobs_mirrored_hor, done))
transitions.append((obs_mirrored_ver, action_mirrored_ver, reward, nobs_mirrored_ver, done))
# point reflect transition
obs_point_refl = self.mirror_horizontal_obs(obs_mirrored_ver)
nobs_point_refl = self.mirror_horizontal_obs(nobs_mirrored_ver)
action_point_refl = self.mirror_horizontal_action(action_mirrored_ver)
transitions.append((obs_point_refl, action_point_refl, reward, nobs_point_refl, done))
# mirror transitions diagonally
obs_mirrored_dia_1 = self.rotate_obs(obs_mirrored_ver)
nobs_mirrored_dia_1 = self.rotate_obs(nobs_mirrored_ver)
action_mirrored_dia_1 = self.rotate_action(action_mirrored_ver)
obs_mirrored_dia_2 = self.rotate_obs(obs_mirrored_hor)
nobs_mirrored_dia_2 = self.rotate_obs(nobs_mirrored_hor)
action_mirrored_dia_2 = self.rotate_action(action_mirrored_hor)
transitions.append((obs_mirrored_dia_1, action_mirrored_dia_1, reward, nobs_mirrored_dia_1, done))
transitions.append((obs_mirrored_dia_2, action_mirrored_dia_2, reward, nobs_mirrored_dia_2, done))
return transitions
def rotate_obs(self, obs):
"""
Rotate an agents observation by 90 degrees counter-clockwise
"""
rotated_obs = obs.copy()
rotated_obs['board'] = np.rot90(obs['board'])
rotated_obs['bomb_blast_strength'] = np.rot90(obs['bomb_blast_strength'])
rotated_obs['bomb_life'] = np.rot90(obs['bomb_life'])
rotated_obs['bomb_moving_direction'] = np.rot90(obs['bomb_moving_direction'])
rotated_obs['flame_life'] = np.rot90(obs['flame_life'])
# rotate position of agent
rotated_obs['position'] = (-obs['position'][1]+10, obs['position'][0])
return rotated_obs
def rotate_action(self, action):
"""
Rotate an agents actions by 90 degrees counter-clockwise
"""
if action == Action.Stop.value or action == Action.Bomb.value:
action_rotated = action
elif action == Action.Up.value:
action_rotated = Action.Left.value
elif action == Action.Down.value:
action_rotated = Action.Right.value
elif action == Action.Left.value:
action_rotated = Action.Down.value
elif action == Action.Right.value:
action_rotated = Action.Up.value
return action_rotated
def mirror_horizontal_obs(self, obs):
"""
Mirror an agents observation horizontally
"""
mirrored_obs = obs.copy()
mirrored_obs['board'] = np.flip(obs['board'], 1)
mirrored_obs['bomb_blast_strength'] = np.flip(obs['bomb_blast_strength'], 1)
mirrored_obs['bomb_life'] = np.flip(obs['bomb_life'], 1)
mirrored_obs['bomb_moving_direction'] = np.flip(obs['bomb_moving_direction'], 1)
mirrored_obs['flame_life'] = np.flip(obs['flame_life'], 1)
# mirror position of agent
mirrored_obs['position'] = (10-obs['position'][0], obs['position'][1])
return mirrored_obs
def mirror_horizontal_action(self, action):
'''
Mirror an agents observation horizontally
'''
if action == Action.Stop.value or action == Action.Bomb.value or action == Action.Up.value or action == Action.Down.value:
action_mirrored = action
elif action == Action.Left.value:
action_mirrored = Action.Right.value
elif action == Action.Right.value:
action_mirrored = Action.Left.value
return action_mirrored
def mirror_vertical_obs(self, obs):
"""
Mirror an agents transition vertically
"""
mirrored_obs = obs.copy()
mirrored_obs['board'] = np.flip(obs['board'], 0)
mirrored_obs['bomb_blast_strength'] = np.flip(obs['bomb_blast_strength'], 0)
mirrored_obs['bomb_life'] = np.flip(obs['bomb_life'], 0)
mirrored_obs['bomb_moving_direction'] = np.flip(obs['bomb_moving_direction'], 0)
mirrored_obs['flame_life'] = np.flip(obs['flame_life'], 0)
# mirror position of agent
mirrored_obs['position'] = (obs['position'][0], 10-obs['position'][1])
return mirrored_obs
def mirror_vertical_action(self, action):
"""
Mirror an agents actions vertically
"""
if action == Action.Stop.value or action == Action.Bomb.value or action == Action.Left.value or action == Action.Right.value:
action_mirrored = action
elif action == Action.Up.value:
action_mirrored = Action.Down.value
elif action == Action.Down.value:
action_mirrored = Action.Up.value
return action_mirrored
| [
"numpy.flip",
"numpy.rot90"
] | [((4511, 4533), 'numpy.rot90', 'np.rot90', (["obs['board']"], {}), "(obs['board'])\n", (4519, 4533), True, 'import numpy as np\n'), ((4580, 4616), 'numpy.rot90', 'np.rot90', (["obs['bomb_blast_strength']"], {}), "(obs['bomb_blast_strength'])\n", (4588, 4616), True, 'import numpy as np\n'), ((4654, 4680), 'numpy.rot90', 'np.rot90', (["obs['bomb_life']"], {}), "(obs['bomb_life'])\n", (4662, 4680), True, 'import numpy as np\n'), ((4730, 4768), 'numpy.rot90', 'np.rot90', (["obs['bomb_moving_direction']"], {}), "(obs['bomb_moving_direction'])\n", (4738, 4768), True, 'import numpy as np\n'), ((4807, 4834), 'numpy.rot90', 'np.rot90', (["obs['flame_life']"], {}), "(obs['flame_life'])\n", (4815, 4834), True, 'import numpy as np\n'), ((5806, 5830), 'numpy.flip', 'np.flip', (["obs['board']", '(1)'], {}), "(obs['board'], 1)\n", (5813, 5830), True, 'import numpy as np\n'), ((5878, 5916), 'numpy.flip', 'np.flip', (["obs['bomb_blast_strength']", '(1)'], {}), "(obs['bomb_blast_strength'], 1)\n", (5885, 5916), True, 'import numpy as np\n'), ((5955, 5983), 'numpy.flip', 'np.flip', (["obs['bomb_life']", '(1)'], {}), "(obs['bomb_life'], 1)\n", (5962, 5983), True, 'import numpy as np\n'), ((6034, 6074), 'numpy.flip', 'np.flip', (["obs['bomb_moving_direction']", '(1)'], {}), "(obs['bomb_moving_direction'], 1)\n", (6041, 6074), True, 'import numpy as np\n'), ((6114, 6143), 'numpy.flip', 'np.flip', (["obs['flame_life']", '(1)'], {}), "(obs['flame_life'], 1)\n", (6121, 6143), True, 'import numpy as np\n'), ((6993, 7017), 'numpy.flip', 'np.flip', (["obs['board']", '(0)'], {}), "(obs['board'], 0)\n", (7000, 7017), True, 'import numpy as np\n'), ((7065, 7103), 'numpy.flip', 'np.flip', (["obs['bomb_blast_strength']", '(0)'], {}), "(obs['bomb_blast_strength'], 0)\n", (7072, 7103), True, 'import numpy as np\n'), ((7142, 7170), 'numpy.flip', 'np.flip', (["obs['bomb_life']", '(0)'], {}), "(obs['bomb_life'], 0)\n", (7149, 7170), True, 'import numpy as np\n'), ((7221, 7261), 'numpy.flip', 'np.flip', (["obs['bomb_moving_direction']", '(0)'], {}), "(obs['bomb_moving_direction'], 0)\n", (7228, 7261), True, 'import numpy as np\n'), ((7301, 7330), 'numpy.flip', 'np.flip', (["obs['flame_life']", '(0)'], {}), "(obs['flame_life'], 0)\n", (7308, 7330), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""Ploting data."""
import numpy as np
import matplotlib.pyplot as plt
import datetime
import math
from scipy.interpolate import spline
def get_sec():
"""Get second."""
return int(datetime.datetime.now().strftime("%S"))
def setup(graph, kind):
"""Setup pyplot graph."""
if kind == 'time':
graph.set_title('Data on time')
graph.set_xlabel('Time (s)')
graph.set_ylabel('s(t)')
graph.axis([0, 60, -1, 1])
elif kind == 'freq':
graph.set_title('Data on freq')
graph.set_xlabel('Freq (Hz)')
graph.set_ylabel('|S(f)|')
def plot_sig(data):
"""Plot signal function."""
t_plot.scatter(data['t'][-1], data['v'][-1])
if len(data['t']) > 1:
t_plot.plot(data['t'][-2:], data['v'][-2:])
def plot_fft(data):
"""Calculate and plot fft."""
if len(data['t']) > 1:
fft = np.fft.fft(data['v'])
spec = np.abs(fft) ** 2
freq = np.fft.fftfreq(len(fft), delta)
freq, spec = zip(*sorted(zip(freq, spec)))
f_plot.clear()
setup(f_plot, 'freq')
f_plot.scatter(freq, spec)
if len(data['t']) > 10:
x_sm = np.array(freq)
x_smooth = np.linspace(x_sm.min(), x_sm.max(), 100)
y_smooth = spline(freq, spec, x_smooth)
f_plot.plot(x_smooth, y_smooth)
# Two subplots, the axes array is time
f, [t_plot, f_plot] = plt.subplots(2)
delta = 0.0005
max_freq = 1.0 / (2.0 * delta)
setup(t_plot, 'time')
setup(f_plot, 'freq')
plt.ion()
data = {'t': [], 'v': []}
for i in range(30):
data['t'].append(get_sec())
data['v'].append(math.sin(0.1047 * 5 * get_sec()))
plot_sig(data)
plot_fft(data)
plt.pause(delta)
while True:
plt.pause(delta)
| [
"numpy.abs",
"numpy.fft.fft",
"numpy.array",
"datetime.datetime.now",
"scipy.interpolate.spline",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots"
] | [((1449, 1464), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1461, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1566), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1564, 1566), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1759), 'matplotlib.pyplot.pause', 'plt.pause', (['delta'], {}), '(delta)\n', (1752, 1759), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1793), 'matplotlib.pyplot.pause', 'plt.pause', (['delta'], {}), '(delta)\n', (1786, 1793), True, 'import matplotlib.pyplot as plt\n'), ((918, 939), 'numpy.fft.fft', 'np.fft.fft', (["data['v']"], {}), "(data['v'])\n", (928, 939), True, 'import numpy as np\n'), ((955, 966), 'numpy.abs', 'np.abs', (['fft'], {}), '(fft)\n', (961, 966), True, 'import numpy as np\n'), ((1211, 1225), 'numpy.array', 'np.array', (['freq'], {}), '(freq)\n', (1219, 1225), True, 'import numpy as np\n'), ((1313, 1341), 'scipy.interpolate.spline', 'spline', (['freq', 'spec', 'x_smooth'], {}), '(freq, spec, x_smooth)\n', (1319, 1341), False, 'from scipy.interpolate import spline\n'), ((237, 260), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (258, 260), False, 'import datetime\n')] |
import numpy as np
from typing import Tuple
from typing import List
from typing import Any
import matplotlib.pyplot as plt
import cv2
from GroundedScan.gym_minigrid.minigrid import DIR_TO_VEC
# TODO faster
def topo_sort(items, constraints):
if not constraints:
return items
items = list(items)
constraints = list(constraints)
out = []
while len(items) > 0:
roots = [
i for i in items
if not any(c[1] == i for c in constraints)
]
assert len(roots) > 0, (items, constraints)
to_pop = roots[0]
items.remove(to_pop)
constraints = [c for c in constraints if c[0] != to_pop]
out.append(to_pop)
return out
def random_weights(size: int) -> np.ndarray:
return 2 * (np.random.random(size) - 0.5)
def accept_weights(size: int) -> np.ndarray:
return np.ones(size)
def plan_step(position: Tuple[int, int], move_direction: int):
"""
:param position: current position of form (x-axis, y-axis) (i.e. column, row)
:param move_direction: East is 0, south is 1, west is 2, north is 3.
:return: next position of form (x-axis, y-axis) (i.e. column, row)
"""
assert 0 <= move_direction < 4
dir_vec = DIR_TO_VEC[move_direction]
return position + dir_vec
def one_hot(size: int, idx: int) -> np.ndarray:
one_hot_vector = np.zeros(size, dtype=int)
one_hot_vector[idx] = 1
return one_hot_vector
def generate_possible_object_names(color: str, shape: str) -> List[str]:
# TODO: does this still make sense when size is not small or large
names = [shape, ' '.join([color, shape])]
return names
def save_counter(description, counter, file):
file.write(description + ": \n")
for key, occurrence_count in counter.items():
file.write(" {}: {}\n".format(key, occurrence_count))
def bar_plot(values: dict, title: str, save_path: str, errors={}, y_axis_label="Occurrence"):
sorted_values = list(values.items())
sorted_values = [(y, x) for x, y in sorted_values]
sorted_values.sort()
values_per_label = [value[0] for value in sorted_values]
if len(errors) > 0:
sorted_errors = [errors[value[1]] for value in sorted_values]
else:
sorted_errors = None
labels = [value[1] for value in sorted_values]
assert len(labels) == len(values_per_label)
y_pos = np.arange(len(labels))
plt.bar(y_pos, values_per_label, yerr=sorted_errors, align='center', alpha=0.5)
plt.gcf().subplots_adjust(bottom=0.2, )
plt.xticks(y_pos, labels, rotation=90, fontsize="xx-small")
plt.ylabel(y_axis_label)
plt.title(title)
plt.savefig(save_path)
plt.close()
def grouped_bar_plot(values: dict, group_one_key: Any, group_two_key: Any, title: str, save_path: str,
errors_group_one={}, errors_group_two={}, y_axis_label="Occurence", sort_on_key=True):
sorted_values = list(values.items())
if sort_on_key:
sorted_values.sort()
values_group_one = [value[1][group_one_key] for value in sorted_values]
values_group_two = [value[1][group_two_key] for value in sorted_values]
if len(errors_group_one) > 0:
sorted_errors_group_one = [errors_group_one[value[0]] for value in sorted_values]
sorted_errors_group_two = [errors_group_two[value[0]] for value in sorted_values]
else:
sorted_errors_group_one = None
sorted_errors_group_two = None
labels = [value[0] for value in sorted_values]
assert len(labels) == len(values_group_one)
assert len(labels) == len(values_group_two)
y_pos = np.arange(len(labels))
fig, ax = plt.subplots()
width = 0.35
p1 = ax.bar(y_pos, values_group_one, width, yerr=sorted_errors_group_one, align='center', alpha=0.5)
p2 = ax.bar(y_pos + width, values_group_two, width, yerr=sorted_errors_group_two, align='center', alpha=0.5)
plt.gcf().subplots_adjust(bottom=0.2, )
plt.xticks(y_pos, labels, rotation=90, fontsize="xx-small")
plt.ylabel(y_axis_label)
plt.title(title)
ax.legend((p1[0], p2[0]), (group_one_key, group_two_key))
plt.savefig(save_path)
plt.close()
def numpy_array_to_image(numpy_array, image_name):
plt.imsave(image_name, numpy_array)
def image_to_numpy_array(image_path):
im = cv2.imread(image_path)
return np.flip(im, 2) # cv2 returns image in BGR order
| [
"numpy.flip",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.imsave",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.title",
"cv2.imre... | [((865, 878), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (872, 878), True, 'import numpy as np\n'), ((1364, 1389), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'int'}), '(size, dtype=int)\n', (1372, 1389), True, 'import numpy as np\n'), ((2402, 2481), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'values_per_label'], {'yerr': 'sorted_errors', 'align': '"""center"""', 'alpha': '(0.5)'}), "(y_pos, values_per_label, yerr=sorted_errors, align='center', alpha=0.5)\n", (2409, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2530, 2589), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {'rotation': '(90)', 'fontsize': '"""xx-small"""'}), "(y_pos, labels, rotation=90, fontsize='xx-small')\n", (2540, 2589), True, 'import matplotlib.pyplot as plt\n'), ((2594, 2618), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_axis_label'], {}), '(y_axis_label)\n', (2604, 2618), True, 'import matplotlib.pyplot as plt\n'), ((2623, 2639), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2632, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2645, 2667), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (2656, 2667), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2683), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2681, 2683), True, 'import matplotlib.pyplot as plt\n'), ((3638, 3652), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3650, 3652), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3995), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {'rotation': '(90)', 'fontsize': '"""xx-small"""'}), "(y_pos, labels, rotation=90, fontsize='xx-small')\n", (3946, 3995), True, 'import matplotlib.pyplot as plt\n'), ((4000, 4024), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_axis_label'], {}), '(y_axis_label)\n', (4010, 4024), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4045), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4038, 4045), True, 'import matplotlib.pyplot as plt\n'), ((4113, 4135), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (4124, 4135), True, 'import matplotlib.pyplot as plt\n'), ((4140, 4151), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4149, 4151), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4244), 'matplotlib.pyplot.imsave', 'plt.imsave', (['image_name', 'numpy_array'], {}), '(image_name, numpy_array)\n', (4219, 4244), True, 'import matplotlib.pyplot as plt\n'), ((4294, 4316), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (4304, 4316), False, 'import cv2\n'), ((4328, 4342), 'numpy.flip', 'np.flip', (['im', '(2)'], {}), '(im, 2)\n', (4335, 4342), True, 'import numpy as np\n'), ((777, 799), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (793, 799), True, 'import numpy as np\n'), ((2486, 2495), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2493, 2495), True, 'import matplotlib.pyplot as plt\n'), ((3892, 3901), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3899, 3901), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 09:34:07 2020
kinematics and kinetics diagrams for multi-index dataframes in human gait
@author: nikorose
"""
# import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import math
from itertools import combinations
from matplotlib.font_manager import FontProperties
import matplotlib as mpl
import inspect
import pandas as pd
import numpy as np
from curvature import smoothness
from shapely.geometry import MultiPoint, Polygon
from descartes import PolygonPatch
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from pathlib import PurePath
import os
import pylab
class plot_dynamic:
def __init__(self, SD = False, ext='png',
dpi=500, save=False, plt_style='seaborn', alpha=1.5,
folder='Figures', axs_size=None, fig_size=[5,7]):
"""
Parameters
----------
SD : bool, optional
DESCRIPTION. The default is False.
ext : str, optional
DESCRIPTION. The default is 'png'.
dpi : int, optional
DESCRIPTION. The default is 500.
save : bool, optional
DESCRIPTION. The default is False.
plt_style : TYPE, optional
DESCRIPTION. The default is 'seaborn'.
alpha : TYPE, optional
DESCRIPTION. The default is 1.5.
Returns
-------
None.
"""
self.sd = SD
self.ext = ext
self.dpi = dpi
self.save = save
self.alpha = alpha
self.axs_size = axs_size
self.fig_size = fig_size
plt.style.use(plt_style)
self.colors = plt.rcParams['axes.prop_cycle'].by_key()['color']*5
self.root_path = PurePath(os.getcwd())
if not os.path.exists(folder):
os.makedirs(folder)
self.save_folder = self.root_path / folder
def rc_params(self, proportion = 1, nrows=5, ncols=7):
mpl.rcParams["text.usetex"] = True #To use latex
if proportion >= 4:
# Adjusting plot parameters for bigger figures
mpl.rcParams['axes.titlesize'] = 4*proportion
mpl.rcParams['axes.labelsize'] = 6*proportion
# mpl.rcParams['lines.linewidth'] = proportion*0.3
# mpl.rcParams['lines.markersize'] = 4*proportion
mpl.rcParams['xtick.labelsize'] = 5*proportion
mpl.rcParams['ytick.labelsize'] = 5*proportion
mpl.rcParams['legend.fontsize'] = 7*proportion
mpl.rcParams['legend.handlelength'] = proportion*3
else:
# Adjusting plot parameters for small figures
mpl.rcParams['axes.titlesize'] = 4*proportion
mpl.rcParams['axes.labelsize'] = 5*proportion
# mpl.rcParams['lines.linewidth'] = proportion
mpl.rcParams['lines.markersize'] = 4*proportion
mpl.rcParams['xtick.labelsize'] = 4*proportion
mpl.rcParams['ytick.labelsize'] = 4*proportion
mpl.rcParams['legend.fontsize'] = 5*proportion
mpl.rcParams['legend.handlelength'] = proportion / 2
mpl.rcParams['figure.figsize'] = ncols, nrows
#backgroud color
mpl.rcParams['axes.facecolor'] = 'white'
def get_mult_num(self, integer):
"""
Function to generate the best combination among a number of plots
From https://stackoverflow.com/questions/54556363/finding-two-integers-that-multiply-to-20-can-i-make-this-code-more-pythonic
"""
given_comb = [1,1,2,2,3,3,4,4,5,5,6,6,7,7]
self.given_comb = [(i, j) for i, j in list(combinations(given_comb,2)) \
if i * j == integer]
return self.given_comb
def if_object(self, df_object, cols, rows):
# Getting speed labels
if cols is not None:
columns_0 = df_object.labels_first_col[cols]
else:
columns_0 = df_object.labels_first_col
# Getting positions
if rows is not None:
rows_0 = df_object.labels_first_rows[rows]
else:
rows_0 = df_object.labels_first_rows
# Mean and SD
columns_1 = df_object.labels_second_col
#Gait cycle discretization
rows_1 = df_object.labels_second_rows
# Dataframe from the object
df_= df_object.df_
y_label = df_object.y_label
cycle_title = df_object.cycle_title
return columns_0, columns_1, rows_0, rows_1, df_, y_label, cycle_title
def if_df(self, df_object, cols, rows):
# Getting speed labels
columns_0 = df_object.columns.get_level_values(0).unique()
if cols is not None:
columns_0 = columns_0[cols]
# Getting positions
rows_0 = df_object.index.get_level_values(0).unique()
if rows is not None:
rows_0 = rows_0[rows]
# Mean and SD
columns_1 = df_object.columns.get_level_values(1).unique()
#Gait cycle discretization
rows_1 = df_object.index.get_level_values(1).unique()
# Dataframe from the object
df_= df_object
cycle_title = df_object.index.names[1]
y_label = ''
return columns_0, columns_1, rows_0, rows_1, df_, y_label, cycle_title
def gait_plot(self, df_object, cols=None, rows=None, title=False, legend=True,
show=True):
"""
Parameters
----------
df_object : object
Object of the data that containts DFs, labels name, titles, options
cols : int list
Integer list with column numbers positions.
rows : TYPE
Integer list with index numbers positions.
title : str, optional
Figure suptitle. The default is False.
Returns
-------
fig : fig object
fig object to combine later
"""
# Cleaning the plots
self.clear_plot_mem()
if isinstance(df_object, pd.DataFrame): # <- if is an object
columns_0, columns_1, rows_0, rows_1, df_, y_label, cycle_title = \
self.if_df(df_object, cols, rows)
else:
columns_0, columns_1, rows_0, rows_1, df_, y_label, cycle_title = \
self.if_object(df_object, cols, rows)
if self.axs_size is not None:
nrows, ncols = self.axs_size
else:
# Suitable distribution for plotting
nrows, ncols = self.get_mult_num(len(rows_0))[-1]
# Adjusting the plot settings
self.rc_params(self.alpha, 8, 10) #Letter size
fig, axs = plt.subplots(nrows=nrows, ncols=ncols,
squeeze=False, figsize=self.fig_size)
fig.tight_layout(pad=2*self.alpha/ncols)
count = 0
for k, ax in np.ndenumerate(axs):
for i in columns_0:
if self.sd:
sd_min = df_[i,columns_1[0]][rows_0[count]].values
mean = df_[i,columns_1[1]][rows_0[count]].values
sd_max = df_[i,columns_1[2]][rows_0[count]].values
ax.fill_between(rows_1, sd_min, sd_max, alpha=0.2)
else:
try:
mean = df_[i,columns_1[1]][rows_0[count]].values
except IndexError:
mean = df_[i,columns_1[0]][rows_0[count]].values
ax.plot(rows_1, mean, '-')
ax.set_xlabel(cycle_title)
ax.set_ylabel('{} {}'.format(rows_0[count], y_label))
count += 1
if legend:
fig.legend(columns_0, bbox_to_anchor=[0.5, 0.5], loc='center',
ncol=int(len(columns_0)/2), fancybox=True)
#In case y label is not displyed vary this parameter
plt.subplots_adjust(left=0.05)
if title: plt.suptitle('{} {}'.format(title, y_label))
if show: plt.show()
if self.save:
self.save_fig(fig, title)
return fig
def save_fig(self, fig, title):
os.chdir(self.save_folder)
fig.savefig('{}.{}'.format(title, self.ext),
format= self.ext, dpi=self.dpi)
os.chdir(self.root_path)
def clear_plot_mem(self):
"""
In order to not accumulate memory data on every plot
Returns
-------
None.
"""
plt.cla()
plt.clf()
plt.close()
class plot_ankle_DJS(plot_dynamic):
def __init__(self, SD = False, ext='png',
dpi=500, save=False, plt_style='seaborn',
alpha=1.5, sep=True, fig_size=[5,5], params=None):
#Setting plot parameters
# Cleaning the plots
super().__init__(SD, ext, dpi, save, plt_style, alpha)
self.sd = SD
self.ext = ext
self.dpi = dpi
self.save = save
self.alpha = alpha
self.sep = sep
self.idx = pd.IndexSlice
self.fig_size = fig_size
self.params ={'sharex':True, 'sharey':True, 'arr_size': int(self.alpha*3),
'color_DJS': self.colors, 'color_symbols': self.colors,
'color_reg': self.colors, 'left_margin': 0.15,
'yticks': None,
'xticks': None,
'hide_labels':(False, False),
'alpha_prod': 0.3,
'alpha_absorb': 0.1,
'DJS_linewidth': 0.4,
'reg_linewidth': 0.8,
'sd_linewidth': 0.3*self.alpha/self.fig_size[0],
'grid':True,
'text': False,
'tp_labels' : {'I.C.':(3,3),'ERP':(2.5,2.5),
'LRP':(1.2,1.0),'DP':(1,1.1),'S':(1.2,1.1),
'TS':(1,1)},
'instances': ['CP','ERP', 'LRP', 'DP', 'S']}
if params is not None:
self.params.update(params)
plt.style.use(plt_style)
def deg2rad(self, row_name):
self.df_.loc[self.idx[row_name,:],:] = self.df_.loc[self.idx[row_name,:],
:].apply(np.deg2rad, axis=0)
def rm_static_pos(self, row_name):
self.df_.loc[self.idx[row_name,:],:] = self.df_.loc[self.idx[row_name,:],
:].apply(lambda x: x - x[0])
def separared(self, rows):
areas_prod = []
areas_abs = []
direction = []
for _ , self.ax in np.ndenumerate(self.axs):
try:
#This exception is for unpaired plots in order to get rid of empty axes
if self.sd:
self.ang_mean = self.extract_data([rows[0], self.count, 1]).squeeze()
self.mom_mean = self.extract_data([rows[1], self.count, 1]).squeeze()
label = self.columns_first[self.count]
else:
self.ang_mean = self.extract_data([rows[0], 0, self.count]).squeeze()
self.mom_mean = self.extract_data([rows[1], 0, self.count]).squeeze()
label = self.columns_second[self.count]
if self.sd:
self.sd_plot(rows)
if not self.params['grid']:
self.ax.grid(False)
self.ax.spines['right'].set_visible(False)
self.ax.spines['top'].set_visible(False)
line_plot = self.ax.plot(self.ang_mean, self.mom_mean,
color= self.params['color_DJS'][self.count],
label= label,
linewidth=self.params['DJS_linewidth'])
if self.integrate:
_prod, _abs, _dir = self.areas()
areas_abs.append(_abs)
areas_prod.append(_prod)
direction.append(_dir)
if isinstance(self.TP, pd.DataFrame):
self.reg_lines()
if self.ang_mean.shape[0] <= 300:
arr_space = 2
else:
arr_space = 20
self.add_arrow(line_plot, step=arr_space)
if self.params['text']:
self.labels_inside()
if not self.params['hide_labels'][1]:
#showing only 1 column y labels and last row x labels
if self.count % self.sep[1] == 0:
self.ax.set_ylabel(self.y_label)
if not self.params['hide_labels'][0]:
if self.count >= (self.sep[0]-1)*self.sep[1]:
self.ax.set_xlabel(self.x_label)
if self.legend:
self.ax.legend(ncol=int(len(self.columns_first)/2), fancybox=True,
loc = 'upper left')
self.count +=1
except ValueError:
#Because plots does not match
continue
if self.integrate:
if self.integrate: self.df_areas(areas_abs, areas_prod, direction)
def together(self, rows):
areas_prod = []
areas_abs = []
direction = []
for _ in enumerate(self.columns_first):
if self.sd:
self.ang_mean = self.extract_data([rows[0], self.count, 1]).squeeze()
self.mom_mean = self.extract_data([rows[1], self.count, 1]).squeeze()
else:
self.ang_mean = self.extract_data([rows[0], self.count, self.count]).squeeze()
self.mom_mean = self.extract_data([rows[1], self.count, self.count]).squeeze()
if self.sd:
self.sd_plot(rows)
if not self.params['grid']:
self.ax.grid(False)
self.ax.spines['right'].set_visible(False)
self.ax.spines['top'].set_visible(False)
line_plot = self.ax.plot(self.ang_mean, self.mom_mean,
color= self.params['color_DJS'][self.count],
label= self.columns_first[self.count],
linewidth=self.params['DJS_linewidth'])
if self.integrate:
_prod, _abs, _dir = self.areas()
areas_abs.append(_abs)
areas_prod.append(_prod)
direction.append(_dir)
if isinstance(self.TP, pd.DataFrame):
self.reg_lines()
if self.ang_mean.shape[0] <= 400:
arr_space = 2
else:
arr_space = 20
self.add_arrow(line_plot, step=arr_space)
if self.params['text']:
self.labels_inside()
if not self.params['hide_labels'][0]:
self.ax.set_xlabel(self.x_label)
if not self.params['hide_labels'][1]:
self.ax.set_ylabel(self.y_label)
if self.legend == True:
try:
self.ax.legend(ncol=1, fancybox=True, #int(len(self.columns_first)/2)
loc = 'upper left')
except ZeroDivisionError:
self.ax.legend(ncol=1, fancybox=True,
loc = 'upper left', fontsize=self.alpha*3)
elif self.legend == 'sep':
figLegend = pylab.figure(figsize = self.fig_size)
pylab.figlegend(*self.ax.get_legend_handles_labels(),
loc = 'upper left')
self.save_fig(figLegend, "legend_{}".format(self.title))
else:
pass
self.count +=1
if self.integrate: self.df_areas(areas_abs, areas_prod, direction)
def labels_inside(self):
#Printing point labels
for n_, (tp_lab, val) in enumerate(self.params['tp_labels'].items()):
self.ax.text(self.ang_mean[self.TP.iloc[self.count, n_]]*val[0],
self.mom_mean[self.TP.iloc[self.count, n_]]*val[1],
tp_lab,
color=self.params['color_reg'][self.count])
self.ax.text(0.5, 0.3, r'$W_{net}$', horizontalalignment='center',
verticalalignment='center',
color=self.params['color_reg'][self.count],
transform=self.ax.transAxes)
self.ax.text(0.85, 0.2, r'$W_{abs}$', horizontalalignment='center',
verticalalignment='center', transform=self.ax.transAxes,
color=self.params['color_reg'][self.count])
def df_areas(self, areas_abs, areas_prod, direction):
"""
Returns areas in a df way
Returns
-------
None.
"""
try:
if self.sd:
ind_ = self.columns_first
else:
ind_ = self.columns_second
self.areas = pd.DataFrame(np.array([areas_abs, areas_prod, direction]).T,
columns = ['work abs', 'work prod', 'direction'], index=ind_)
except ValueError:
self.areas = pd.DataFrame(np.array([areas_abs, areas_prod, direction]).T,
columns = ['work abs', 'work prod', 'direction'],
index=self.reg_info_df.index.get_level_values(1).unique())
def is_positive(self):
signedarea = 0
for len_arr in range(self.ang_mean.shape[0]-1):
signedarea += self.ang_mean[len_arr]*self.mom_mean[len_arr+1] - \
self.ang_mean[len_arr+1]*self.mom_mean[len_arr]
if signedarea < 0:
return 'cw'
else:
return 'ccw'
def areas(self):
#We are making the integration of the closed loop
try:
prod = self.integration(self.ang_mean, self.mom_mean,
self.params['color_DJS'][self.count],
alpha = self.params['alpha_prod'])
#To discover which direction the DJS is turning
# If angle at 40% of the gait is bigger than angle in 65% of the gait
# and if the moment at 55 % is bigger than 40%, so It is counter clockwise
# otherwise clockwise
len_vars = self.ang_mean.shape[0]
try:
#Taking the 10 highest values
max_vals = self.ang_mean.argsort()[-15:].values
# Proving that higher moments are generated in max values >0.25 Nm/kg
max_ang = max_vals[self.mom_mean[max_vals].values > 0.25][-1]
except IndexError:
#In few cases no max ang is found, setting and average
max_ang = int(len_vars*0.5)
direction = self.is_positive()
#We pretend to integer the area under the loop
if direction == 'ccw':
X = self.ang_mean[0:max_ang].values
Y = self.mom_mean[0:max_ang].values
pos_init = [-1,0,0,0]
else:
X = self.ang_mean[max_ang:].values
Y = self.mom_mean[max_ang:].values
pos_init = [0,-1,0,0]
#closing the loop with another point in zero
# going to 0 in Y axis and Adding another point in (0,0)
X = np.append(X,[X[pos_init[0]],X[pos_init[2]]])
Y = np.append(Y,[Y[pos_init[1]],Y[pos_init[3]]])
absorb = self.integration(X,Y,
self.params['color_DJS'][self.count],
alpha=self.params['alpha_absorb'])
except ValueError:
#Integration cannot be done
prod = 0
absorb = 0
direction = 0
return prod, absorb, direction
def reg_lines(self):
# Plotting TP
self.ax.scatter(self.ang_mean[self.TP.iloc[self.count]],
self.mom_mean[self.TP.iloc[self.count]],
color=self.params['color_symbols'][self.count],
linewidth = self.alpha/3)
for i in range(self.TP.shape[1]-2):
ang_data = self.ang_mean[self.TP.iloc[self.count][i]: \
self.TP.iloc[self.count][i+1]].values.reshape(-1,1)
mom_data = self.mom_mean[self.TP.iloc[self.count][i]: \
self.TP.iloc[self.count][i+1]].values.reshape(-1,1)
# print(ang_data, mom_data, self.TP.iloc[self.count])
info = self.ridge(ang_data, mom_data)
if i == 0:
info2 = info
else:
for key, item in info.items():
info2[key].extend(item)
reg_info = info2
self.reg_data = reg_info.pop('pred_data')
#We are placing the second level, so if there is only one item we need to take the 0 item
#otherwise the first
if self.columns_second.shape[0] == 3:
num = 1
else:
num = 0
reg_info_df = pd.DataFrame(reg_info)
instance = self.params['instances']
instance = instance[:reg_info_df.shape[0]]
if self.sd:
reg_idx= pd.MultiIndex.from_product([[self.columns_first[self.count]], [self.columns_second[num]],
instance], names=['Speed', 'instance','QS phase'])
else:
reg_idx= pd.MultiIndex.from_product([self.columns_first, [self.columns_second[self.count]],
instance], names=['Speed', 'instance','QS phase'])
reg_info_df.index = reg_idx
if hasattr(self, 'reg_info_df'):
self.reg_info_df = pd.concat([self.reg_info_df, reg_info_df])
else:
self.reg_info_df = reg_info_df
style= ['--', '-.', ':', '--', '-.', ':']
for i, reg in enumerate(self.reg_data):
if self.params['text']:
self.ax.plot(reg[:,0], reg[:,1],
color = self.params['color_reg'][self.count],
linestyle = style[i], zorder=15,
linewidth = self.params['reg_linewidth'],
label=instance[i])
else:
self.ax.plot(reg[:,0], reg[:,1],
color = self.params['color_reg'][self.count],
linestyle = style[i], zorder=15, #style[i] -> for plot in paper
linewidth = self.params['reg_linewidth'])
# Ridge regression
def ridge(self, var1, var2, alpha = 0.001):
"""Function to do Ridge regression
when the slope is so high, it is better to do regression in the opposite
side, it means, normally we predict moment through angles, when the bounds
have 0.03 radians of range we are predicting the angles through moments.
"""
if var1[-1]-var1[0] < 0.03 and var1[-1]-var1[0] > -0.03:
X = var2
Y = var1
inverted = True
else:
X = var1
Y= var2
inverted = False
y_linear_lr = linear_model.Ridge(alpha= alpha)
y_linear_lr.fit(X, Y)
pred = y_linear_lr.predict(X)
# R2 = y_linear_lr.score(X, Y)
SS_Residual = np.sum((Y-pred)**2)
SS_Total = np.sum((Y-np.mean(Y))**2)
R2 = 1 - (float(SS_Residual))/SS_Total
if inverted == False:
pred_mod = (X, pred)
else:
pred_mod = (pred, X)
meanSquare = mean_squared_error(Y, pred)
return {'intercept': [y_linear_lr.intercept_.item(0)],
'stiffness': [y_linear_lr.coef_.item(0)],
'MSE':[meanSquare],
'R2':[R2],
'pred_data': [np.hstack(pred_mod)],
'inverted': [inverted]}
def linear_fun(self,a,b,x):
return a*x+b
def add_reg_lines(self, pred_df, label='Predicted'):
pred_df_ind = pred_df.index.get_level_values(0)
for i, phase in enumerate(self.params['instances'][:-1]):
stiffness = pred_df.loc[self.idx[pred_df_ind[self.count], phase]][1]
intercept = pred_df.loc[self.idx[pred_df_ind[self.count], phase]][0]
ang_data = self.ang_mean[self.TP.iloc[self.count][i]: \
self.TP.iloc[self.count][i+1]].values.reshape(-1,1)
pred_data = self.linear_fun(stiffness,
intercept,
ang_data)
self.ax.plot(ang_data, pred_data,
color= self.params['color_reg'][self.count+1],
linestyle = 'dashdot', label=label)
return pred_data
def plot_DJS(self, df_, cols=None, rows= [0,2],
title='No name given', legend=True, reg=None,
integration= True, rad= True, sup_static= True, header=None):
self.clear_plot_mem()
# Suitable distribution for plotting
self.TP = reg
self.legend = legend
self.header = header
self.integrate = integration
self.index_first = df_.index.get_level_values(0).unique()
self.index_second = df_.index.get_level_values(1).unique()
self.columns_first = df_.columns.get_level_values(0).unique()
self.columns_second = df_.columns.get_level_values(1).unique()
self.y_label = 'Moment '+ r'$[\frac{Nm}{kg}]$'
self.x_label = 'Angle [deg]'
self.title = title
if cols is None:
cols = self.columns_first
self.df_ = df_.loc[:,self.idx[self.columns_first,:]]
else:
self.df_ = df_.loc[:,self.idx[self.columns_first[cols],:]]
#To keep the index column order
self.df_ = self.df_.reindex(self.columns_first[cols], level=0, axis=1)
#To keep the order of the TP
if self.TP is not None:
#check if always you will have at least two levels
#If so this is ok
self.TP = self.TP.reindex(self.columns_first[cols], axis=0, level=-2)
if rad:
self.deg2rad(self.index_first[rows[0]])
self.x_label = 'Angle [rad]'
if sup_static:
self.rm_static_pos(self.index_first[rows[0]])
if rows is None:
rows = self.index_first
self.columns_first = self.df_.columns.get_level_values(0).unique()
if self.sep == True:
nrows, ncols = self.get_mult_num(len(cols))[-1]
elif self.sep == False:
nrows = 1
ncols = 1
elif isinstance(self.sep , list):
nrows, ncols = self.sep
# Adjusting the plot settings
self.rc_params(self.alpha/nrows, self.fig_size[0], self.fig_size[1])
self.count = 0
if self.sep:
self.fig, self.axs = plt.subplots(nrows=nrows, ncols=ncols, sharey=True,
sharex=False, squeeze=False)
self.fig.tight_layout()
self.separared(rows)
else:
self.fig, self.ax = plt.subplots(nrows=nrows, ncols=ncols, sharey=self.params['sharey'],
sharex=self.params['sharex'], squeeze=True)
self.together(rows)
if self.header is not None:
self.fig.suptitle(self.header)
#In case y label is not displyed vary this parameter
plt.subplots_adjust(left=self.params['left_margin'])
#Ticks
if self.params['yticks'] is not None:
plt.yticks(self.params['yticks'])
plt.ylim((self.params['yticks'][0], self.params['yticks'][-1]))
if self.params['xticks'] is not None:
plt.xticks(self.params['xticks'])
plt.xlim((self.params['xticks'][0], self.params['xticks'][-1]))
if self.save:
self.save_fig(self.fig, self.title)
#Setting margins of figure
return self.fig
def sd_plot(self, rows):
"""
Generates the plot for SD either for rows and columns
Parameters
----------
rows : List with rows indexes for angles and moments
Returns
-------
None.
"""
self.ang_sd1 = self.extract_data([rows[0], self.count, 0])
self.ang_sd2 = self.extract_data([rows[0], self.count, 2])
self.mom_sd1 = self.extract_data([rows[1], self.count, 0])
self.mom_sd2 = self.extract_data([rows[1], self.count, 2])
self.err_ang = [self.ang_mean - self.ang_sd1,
self.ang_sd2 - self.ang_mean]
self.err_mom = [self.mom_mean - self.mom_sd1,
self.mom_sd2 - self.mom_mean]
self.ax.errorbar(self.ang_mean, self.mom_mean, xerr=self.err_ang,
color= self.params['color_DJS'][self.count],
elinewidth = self.params['sd_linewidth'])
self.ax.errorbar(self.ang_mean, self.mom_mean, yerr=self.err_mom,
color= self.params['color_DJS'][self.count],
elinewidth = self.params['sd_linewidth'])
return
def extract_data(self, idx_):
"""
Extract the specific feature information
Parameters
----------
idx : list with three items
The first specifies the first index position.
The second specifies the first column position.
The third specifies the second column position
Returns
-------
data : TYPE
DESCRIPTION.
"""
data = self.df_.loc[self.idx[self.index_first[idx_[0]] , :],
self.idx[self.columns_first[idx_[1]],
self.columns_second[idx_[2]]]]
return data
def add_arrow(self, axs, direction='right', step=2):
"""
Add an arrow to a line.
line: Line2D object
position: x-position of the arrow. If None, mean of xdata is taken
direction: 'left' or 'right'
size: size of the arrow in fontsize points
"""
#How often will be repeated
arr_num = self.params['arr_size']*step
axs = axs[0]
color = self.params['color_symbols'][self.count]
xdata = axs.get_xdata()
ydata = axs.get_ydata()
#Selecting the positions
position = xdata[0:-20:arr_num]
# find closest index
start_ind = self.ang_mean[5:-1:arr_num].index.get_level_values(1)
if direction == 'right':
# Be careful when there is no index with decimals
end_ind = self.ang_mean[6::arr_num].index.get_level_values(1)
else:
end_ind = self.ang_mean[0:-1:arr_num].index.get_level_values(1)
for n, ind in enumerate(start_ind):
axs.axes.annotate('',
xytext=(self.ang_mean.loc[self.idx[:,ind]],
self.mom_mean.loc[self.idx[:,ind]]),
xy=(self.ang_mean.loc[self.idx[:,end_ind[n]]],
self.mom_mean.loc[self.idx[:,end_ind[n]]]),
arrowprops=dict(arrowstyle="-|>", color=color),
size=self.params['arr_size'])
def integration(self, var1, var2, color, alpha=0.3): #, dx= 0.5, Min = 0, Max = None):
"""
integration based on two variables with shapely
Parameters
----------
var1 (angle data) : DataFrame, Array containing the values for angles.
var2 (moment data) : DataFrame, Array containing the values for Moment.
Returns
-------
FLOAT
Integral under the curve
"""
# Making pairs
list_area = list(zip(var1, var2))
multi_point = MultiPoint(list_area)
poly2 = Polygon([[p.x, p.y] for p in multi_point])
x,y = poly2.exterior.xy
poly1patch = PolygonPatch(poly2, fc= color, ec=color,
alpha=alpha, zorder=2)
self.ax.add_patch(poly1patch)
return poly2.area
| [
"numpy.hstack",
"numpy.array",
"shapely.geometry.Polygon",
"os.path.exists",
"pandas.MultiIndex.from_product",
"numpy.mean",
"matplotlib.pyplot.style.use",
"numpy.ndenumerate",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.py... | [((1656, 1680), 'matplotlib.pyplot.style.use', 'plt.style.use', (['plt_style'], {}), '(plt_style)\n', (1669, 1680), True, 'import matplotlib.pyplot as plt\n'), ((6671, 6747), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'squeeze': '(False)', 'figsize': 'self.fig_size'}), '(nrows=nrows, ncols=ncols, squeeze=False, figsize=self.fig_size)\n', (6683, 6747), True, 'import matplotlib.pyplot as plt\n'), ((6870, 6889), 'numpy.ndenumerate', 'np.ndenumerate', (['axs'], {}), '(axs)\n', (6884, 6889), True, 'import numpy as np\n'), ((7873, 7903), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.05)'}), '(left=0.05)\n', (7892, 7903), True, 'import matplotlib.pyplot as plt\n'), ((8125, 8151), 'os.chdir', 'os.chdir', (['self.save_folder'], {}), '(self.save_folder)\n', (8133, 8151), False, 'import os\n'), ((8270, 8294), 'os.chdir', 'os.chdir', (['self.root_path'], {}), '(self.root_path)\n', (8278, 8294), False, 'import os\n'), ((8475, 8484), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (8482, 8484), True, 'import matplotlib.pyplot as plt\n'), ((8493, 8502), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8500, 8502), True, 'import matplotlib.pyplot as plt\n'), ((8511, 8522), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8520, 8522), True, 'import matplotlib.pyplot as plt\n'), ((10085, 10109), 'matplotlib.pyplot.style.use', 'plt.style.use', (['plt_style'], {}), '(plt_style)\n', (10098, 10109), True, 'import matplotlib.pyplot as plt\n'), ((10658, 10682), 'numpy.ndenumerate', 'np.ndenumerate', (['self.axs'], {}), '(self.axs)\n', (10672, 10682), True, 'import numpy as np\n'), ((21412, 21434), 'pandas.DataFrame', 'pd.DataFrame', (['reg_info'], {}), '(reg_info)\n', (21424, 21434), True, 'import pandas as pd\n'), ((23569, 23600), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {'alpha': 'alpha'}), '(alpha=alpha)\n', (23587, 23600), False, 'from sklearn import linear_model\n'), ((23731, 23754), 'numpy.sum', 'np.sum', (['((Y - pred) ** 2)'], {}), '((Y - pred) ** 2)\n', (23737, 23754), True, 'import numpy as np\n'), ((23995, 24022), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['Y', 'pred'], {}), '(Y, pred)\n', (24013, 24022), False, 'from sklearn.metrics import mean_squared_error\n'), ((28029, 28081), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': "self.params['left_margin']"}), "(left=self.params['left_margin'])\n", (28048, 28081), True, 'import matplotlib.pyplot as plt\n'), ((32485, 32506), 'shapely.geometry.MultiPoint', 'MultiPoint', (['list_area'], {}), '(list_area)\n', (32495, 32506), False, 'from shapely.geometry import MultiPoint, Polygon\n'), ((32523, 32565), 'shapely.geometry.Polygon', 'Polygon', (['[[p.x, p.y] for p in multi_point]'], {}), '([[p.x, p.y] for p in multi_point])\n', (32530, 32565), False, 'from shapely.geometry import MultiPoint, Polygon\n'), ((32619, 32681), 'descartes.PolygonPatch', 'PolygonPatch', (['poly2'], {'fc': 'color', 'ec': 'color', 'alpha': 'alpha', 'zorder': '(2)'}), '(poly2, fc=color, ec=color, alpha=alpha, zorder=2)\n', (32631, 32681), False, 'from descartes import PolygonPatch\n'), ((1789, 1800), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1798, 1800), False, 'import os\n'), ((1817, 1839), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1831, 1839), False, 'import os\n'), ((1853, 1872), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1864, 1872), False, 'import os\n'), ((7985, 7995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7993, 7995), True, 'import matplotlib.pyplot as plt\n'), ((19640, 19686), 'numpy.append', 'np.append', (['X', '[X[pos_init[0]], X[pos_init[2]]]'], {}), '(X, [X[pos_init[0]], X[pos_init[2]]])\n', (19649, 19686), True, 'import numpy as np\n'), ((19701, 19747), 'numpy.append', 'np.append', (['Y', '[Y[pos_init[1]], Y[pos_init[3]]]'], {}), '(Y, [Y[pos_init[1]], Y[pos_init[3]]])\n', (19710, 19747), True, 'import numpy as np\n'), ((21571, 21717), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[[self.columns_first[self.count]], [self.columns_second[num]], instance]'], {'names': "['Speed', 'instance', 'QS phase']"}), "([[self.columns_first[self.count]], [self.\n columns_second[num]], instance], names=['Speed', 'instance', 'QS phase'])\n", (21597, 21717), True, 'import pandas as pd\n'), ((21776, 21915), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[self.columns_first, [self.columns_second[self.count]], instance]'], {'names': "['Speed', 'instance', 'QS phase']"}), "([self.columns_first, [self.columns_second[self.\n count]], instance], names=['Speed', 'instance', 'QS phase'])\n", (21802, 21915), True, 'import pandas as pd\n'), ((22048, 22090), 'pandas.concat', 'pd.concat', (['[self.reg_info_df, reg_info_df]'], {}), '([self.reg_info_df, reg_info_df])\n', (22057, 22090), True, 'import pandas as pd\n'), ((27453, 27538), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'sharey': '(True)', 'sharex': '(False)', 'squeeze': '(False)'}), '(nrows=nrows, ncols=ncols, sharey=True, sharex=False, squeeze=False\n )\n', (27465, 27538), True, 'import matplotlib.pyplot as plt\n'), ((27693, 27810), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'sharey': "self.params['sharey']", 'sharex': "self.params['sharex']", 'squeeze': '(True)'}), "(nrows=nrows, ncols=ncols, sharey=self.params['sharey'], sharex\n =self.params['sharex'], squeeze=True)\n", (27705, 27810), True, 'import matplotlib.pyplot as plt\n'), ((28155, 28188), 'matplotlib.pyplot.yticks', 'plt.yticks', (["self.params['yticks']"], {}), "(self.params['yticks'])\n", (28165, 28188), True, 'import matplotlib.pyplot as plt\n'), ((28201, 28264), 'matplotlib.pyplot.ylim', 'plt.ylim', (["(self.params['yticks'][0], self.params['yticks'][-1])"], {}), "((self.params['yticks'][0], self.params['yticks'][-1]))\n", (28209, 28264), True, 'import matplotlib.pyplot as plt\n'), ((28326, 28359), 'matplotlib.pyplot.xticks', 'plt.xticks', (["self.params['xticks']"], {}), "(self.params['xticks'])\n", (28336, 28359), True, 'import matplotlib.pyplot as plt\n'), ((28372, 28435), 'matplotlib.pyplot.xlim', 'plt.xlim', (["(self.params['xticks'][0], self.params['xticks'][-1])"], {}), "((self.params['xticks'][0], self.params['xticks'][-1]))\n", (28380, 28435), True, 'import matplotlib.pyplot as plt\n'), ((24240, 24259), 'numpy.hstack', 'np.hstack', (['pred_mod'], {}), '(pred_mod)\n', (24249, 24259), True, 'import numpy as np\n'), ((3677, 3704), 'itertools.combinations', 'combinations', (['given_comb', '(2)'], {}), '(given_comb, 2)\n', (3689, 3704), False, 'from itertools import combinations\n'), ((15563, 15598), 'pylab.figure', 'pylab.figure', ([], {'figsize': 'self.fig_size'}), '(figsize=self.fig_size)\n', (15575, 15598), False, 'import pylab\n'), ((17196, 17240), 'numpy.array', 'np.array', (['[areas_abs, areas_prod, direction]'], {}), '([areas_abs, areas_prod, direction])\n', (17204, 17240), True, 'import numpy as np\n'), ((23787, 23797), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (23794, 23797), True, 'import numpy as np\n'), ((17410, 17454), 'numpy.array', 'np.array', (['[areas_abs, areas_prod, direction]'], {}), '([areas_abs, areas_prod, direction])\n', (17418, 17454), True, 'import numpy as np\n')] |
#-------by HYH -------#
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
##
world=np.array([['red','green','green','red', 'red'],
['red','red', 'green','red', 'red'],
['red','red', 'green','green','red'],
['red','red', 'red', 'red', 'red']])
color=np.array([['r','b','g','w','y'],
['r','b','g','w','y'],
['r','b','g','w','y'],
['r','b','g','w','y']])
nRow,nCol=np.shape(world)
stop=[0,0]
right=[0,1]
left=[0,-1]
down=[1,0]
up=[-1,0]
pMoveCorrect=0.8
pSenseCorrect=0.7
compEntropy=lambda p:-np.sum(np.sum(p*np.log2(p)))
p=1/(nRow*nCol)*np.ones([nRow,nCol])
##
motions=[stop, right, down, down, right]
measurements=['green','green','green','green','green']
##
def sense(p,z,world,pSenseCorrect):
nRow,nCol=np.shape(p)
q=np.zeros([nRow,nCol])
for r in range(nRow):
for c in range(nCol):
if z==world[r][c]:
hit=1
elif z!=world[r][c]:
hit=0
q[r][c]=p[r][c]*(hit*pSenseCorrect+(1-hit)*(1-pSenseCorrect))
q=q/sum(sum(q))
return q
##
def move(p,u,pMoveCorrect):
nRow,nCol=np.shape(p)
q=np.zeros([nRow,nCol])
for r in range(nRow):
for c in range(nCol):
q[r][c]=pMoveCorrect*p[(r-u[0])%nRow][(c-u[1]%nCol)]+(1-pMoveCorrect)*p[r][c]
return q
##
def maxMat(g):
r=g.argmax(axis=0)
temp=g[r,range(g.shape[1])]
index_Row=np.argsort(temp)
c=index_Row[len(index_Row)-1]
r=r[c]
return r,c
##
assert len(motions)==len(measurements),'The variable ''motions'' should be of the same size as ''measurements''!'
entropy=np.zeros([2,len(motions)])
##
plt.ion()
fig1=plt.figure(figsize=(10,10),dpi=80)
_x=np.arange(1,5)
_y=np.arange(1,6)
_xx,_yy=np.meshgrid(_x,_y)
x,y=_xx.ravel(),_yy.ravel()
for i in range(len(motions)):
ax=fig1.add_subplot(2,1,1, projection='3d')
ax.cla()
p=move(p,motions[i],pMoveCorrect)
p0=p
p=sense(p,measurements[i],world,pSenseCorrect)
p1=p
entropy[:,i]=[compEntropy(p0),compEntropy(p1)]
p0=p0.T
p1=p1.T
color0=color.T
p0=p0.reshape(1,-1)
p0=p0.tolist()
p0=sum(p0,[])
p1=p1.reshape(1,-1)
p1=p1.tolist()
p1=sum(p1,[])
color0=color0.reshape(1,-1)
color0=color0.tolist()
color0=sum(color0,[])
ax.bar3d(x,y,z=np.zeros_like(p0),dx=1,dy=1,dz=p0,color=color0,shade=True)
ax.set_title('Step %s\n The probability before sensing'%(i+1))
ax.set_zlim3d(0,0.3)
ax.set_ylim3d(0.5,4.5)
ax=fig1.add_subplot(2,1,2, projection='3d')
ax.bar3d(x,y,z=np.zeros_like(p1),dx=1,dy=1,dz=p1,color=color0,shade=True)
plt.title('The probability after sensing')
ax.set_zlim3d(0,0.3)
ax.set_ylim3d(0.5,4.5)
plt.pause(1)
##
print('The Posterior:','\n',p)
r,c=maxMat(p)
print('The largest probability %s occurs at cell (%s, %s)\n'%(p[r][c],r,c))
fig2=plt.figure(figsize=(10,10),dpi=80)
plt.plot(range(1,len(motions)+1),entropy[0,:],'bo',MarkerSize=10,label='After sensing')
plt.plot(range(1,len(motions)+1),entropy[1,:],'rx',MarkerSize=10,label='Before sensing')
plt.xlim(0,len(motions)+1)
plt.xlabel('Step')
plt.ylabel('Entropy')
plt.legend()
plt.ioff()
plt.show() | [
"matplotlib.pyplot.title",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"numpy.log2",
"matplotlib.pyplot.ioff",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.ion",
"numpy.meshgrid",
"numpy.... | [((124, 300), 'numpy.array', 'np.array', (["[['red', 'green', 'green', 'red', 'red'], ['red', 'red', 'green', 'red',\n 'red'], ['red', 'red', 'green', 'green', 'red'], ['red', 'red', 'red',\n 'red', 'red']]"], {}), "([['red', 'green', 'green', 'red', 'red'], ['red', 'red', 'green',\n 'red', 'red'], ['red', 'red', 'green', 'green', 'red'], ['red', 'red',\n 'red', 'red', 'red']])\n", (132, 300), True, 'import numpy as np\n'), ((345, 467), 'numpy.array', 'np.array', (["[['r', 'b', 'g', 'w', 'y'], ['r', 'b', 'g', 'w', 'y'], ['r', 'b', 'g', 'w',\n 'y'], ['r', 'b', 'g', 'w', 'y']]"], {}), "([['r', 'b', 'g', 'w', 'y'], ['r', 'b', 'g', 'w', 'y'], ['r', 'b',\n 'g', 'w', 'y'], ['r', 'b', 'g', 'w', 'y']])\n", (353, 467), True, 'import numpy as np\n'), ((497, 512), 'numpy.shape', 'np.shape', (['world'], {}), '(world)\n', (505, 512), True, 'import numpy as np\n'), ((1603, 1612), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1610, 1612), True, 'import matplotlib.pyplot as plt\n'), ((1618, 1654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'dpi': '(80)'}), '(figsize=(10, 10), dpi=80)\n', (1628, 1654), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1671), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (1665, 1671), True, 'import numpy as np\n'), ((1674, 1689), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (1683, 1689), True, 'import numpy as np\n'), ((1697, 1716), 'numpy.meshgrid', 'np.meshgrid', (['_x', '_y'], {}), '(_x, _y)\n', (1708, 1716), True, 'import numpy as np\n'), ((2725, 2761), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'dpi': '(80)'}), '(figsize=(10, 10), dpi=80)\n', (2735, 2761), True, 'import matplotlib.pyplot as plt\n'), ((2964, 2982), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Step"""'], {}), "('Step')\n", (2974, 2982), True, 'import matplotlib.pyplot as plt\n'), ((2983, 3004), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entropy"""'], {}), "('Entropy')\n", (2993, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3017), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3015, 3017), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3028), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3026, 3028), True, 'import matplotlib.pyplot as plt\n'), ((3029, 3039), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3037, 3039), True, 'import matplotlib.pyplot as plt\n'), ((671, 692), 'numpy.ones', 'np.ones', (['[nRow, nCol]'], {}), '([nRow, nCol])\n', (678, 692), True, 'import numpy as np\n'), ((841, 852), 'numpy.shape', 'np.shape', (['p'], {}), '(p)\n', (849, 852), True, 'import numpy as np\n'), ((856, 878), 'numpy.zeros', 'np.zeros', (['[nRow, nCol]'], {}), '([nRow, nCol])\n', (864, 878), True, 'import numpy as np\n'), ((1126, 1137), 'numpy.shape', 'np.shape', (['p'], {}), '(p)\n', (1134, 1137), True, 'import numpy as np\n'), ((1141, 1163), 'numpy.zeros', 'np.zeros', (['[nRow, nCol]'], {}), '([nRow, nCol])\n', (1149, 1163), True, 'import numpy as np\n'), ((1379, 1395), 'numpy.argsort', 'np.argsort', (['temp'], {}), '(temp)\n', (1389, 1395), True, 'import numpy as np\n'), ((2493, 2535), 'matplotlib.pyplot.title', 'plt.title', (['"""The probability after sensing"""'], {}), "('The probability after sensing')\n", (2502, 2535), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2595), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (2592, 2595), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2220), 'numpy.zeros_like', 'np.zeros_like', (['p0'], {}), '(p0)\n', (2216, 2220), True, 'import numpy as np\n'), ((2433, 2450), 'numpy.zeros_like', 'np.zeros_like', (['p1'], {}), '(p1)\n', (2446, 2450), True, 'import numpy as np\n'), ((642, 652), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (649, 652), True, 'import numpy as np\n')] |
"""
Name: inherent
Coder: <NAME> (BGI-Research)[V1]
Current Version: 1
Function(s):
(1) Some inherent concepts.
"""
import numpy
# mapping of integer and char
A = 65 # ord('A')
B = 66 # ord('B')
C = 67 # ord('C')
D = 68 # ord('D')
E = 69 # ord('E')
F = 70 # ord('F')
G = 71 # ord('G')
H = 72 # ord('H')
I = 73 # ord('I')
K = 75 # ord('K')
L = 76 # ord('L')
M = 77 # ord('M')
N = 78 # ord('N')
P = 80 # ord('P')
Q = 81 # ord('Q')
R = 82 # ord('R')
S = 83 # ord('S')
T = 84 # ord('T')
V = 86 # ord('V')
W = 87 # ord('W')
Y = 89 # ord('Y')
e = 42 # ord('*')
# double mapping of bases or degenerate bases.
simple_bases = numpy.array([
A, C, G, T,
M, R, W, S, Y, K,
V, H, D, B,
N
])
detailed_bases = numpy.array([
[A], [C], [G], [T],
[A, C], [A, G], [A, T], [C, G], [C, T], [G, T],
[A, C, G], [A, C, T], [A, G, T], [C, G, T],
[A, C, G, T]
])
# principle of complementary base pairing
t_pairing = numpy.array([
A, C, G, T, M, R, W, S, Y, K, V, H, D, B, N
])
c_pairing = numpy.array([
T, G, C, A, K, Y, W, S, R, M, B, D, H, V, N
])
# mapping between normal proteins and codons.
codons = numpy.array([
A, C, D, E, F,
G, H, I, K, L,
L, M, N, P, Q,
R, R, S, S, T,
V, W, Y, e, e
])
base_maps = numpy.array([
[G, C, N], [T, G, Y], [G, A, Y], [G, A, R], [T, T, Y],
[G, G, N], [C, A, Y], [A, T, H], [A, A, R], [T, T, R],
[C, T, N], [A, T, G], [A, A, Y], [C, C, N], [C, A, R],
[C, G, N], [A, G, R], [A, G, Y], [T, C, N], [A, C, N],
[G, T, N], [T, G, G], [T, A, Y], [T, A, R], [T, G, A]
])
| [
"numpy.array"
] | [((681, 739), 'numpy.array', 'numpy.array', (['[A, C, G, T, M, R, W, S, Y, K, V, H, D, B, N]'], {}), '([A, C, G, T, M, R, W, S, Y, K, V, H, D, B, N])\n', (692, 739), False, 'import numpy\n'), ((783, 926), 'numpy.array', 'numpy.array', (['[[A], [C], [G], [T], [A, C], [A, G], [A, T], [C, G], [C, T], [G, T], [A, C,\n G], [A, C, T], [A, G, T], [C, G, T], [A, C, G, T]]'], {}), '([[A], [C], [G], [T], [A, C], [A, G], [A, T], [C, G], [C, T], [G,\n T], [A, C, G], [A, C, T], [A, G, T], [C, G, T], [A, C, G, T]])\n', (794, 926), False, 'import numpy\n'), ((1006, 1064), 'numpy.array', 'numpy.array', (['[A, C, G, T, M, R, W, S, Y, K, V, H, D, B, N]'], {}), '([A, C, G, T, M, R, W, S, Y, K, V, H, D, B, N])\n', (1017, 1064), False, 'import numpy\n'), ((1086, 1144), 'numpy.array', 'numpy.array', (['[T, G, C, A, K, Y, W, S, R, M, B, D, H, V, N]'], {}), '([T, G, C, A, K, Y, W, S, R, M, B, D, H, V, N])\n', (1097, 1144), False, 'import numpy\n'), ((1212, 1304), 'numpy.array', 'numpy.array', (['[A, C, D, E, F, G, H, I, K, L, L, M, N, P, Q, R, R, S, S, T, V, W, Y, e, e]'], {}), '([A, C, D, E, F, G, H, I, K, L, L, M, N, P, Q, R, R, S, S, T, V,\n W, Y, e, e])\n', (1223, 1304), False, 'import numpy\n'), ((1342, 1643), 'numpy.array', 'numpy.array', (['[[G, C, N], [T, G, Y], [G, A, Y], [G, A, R], [T, T, Y], [G, G, N], [C, A, Y\n ], [A, T, H], [A, A, R], [T, T, R], [C, T, N], [A, T, G], [A, A, Y], [C,\n C, N], [C, A, R], [C, G, N], [A, G, R], [A, G, Y], [T, C, N], [A, C, N],\n [G, T, N], [T, G, G], [T, A, Y], [T, A, R], [T, G, A]]'], {}), '([[G, C, N], [T, G, Y], [G, A, Y], [G, A, R], [T, T, Y], [G, G,\n N], [C, A, Y], [A, T, H], [A, A, R], [T, T, R], [C, T, N], [A, T, G], [\n A, A, Y], [C, C, N], [C, A, R], [C, G, N], [A, G, R], [A, G, Y], [T, C,\n N], [A, C, N], [G, T, N], [T, G, G], [T, A, Y], [T, A, R], [T, G, A]])\n', (1353, 1643), False, 'import numpy\n')] |
from PIL import Image
import numpy as np
import tensorflow as tf
# colour map
label_colours = [(0,0,0)
# 0=background
,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128)
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0)
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128)
# 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person
,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)]
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
label_colours_bin = [(0,0,0), (255,255,255)]
def decode_labels(mask, num_images=1, num_classes=21, include=False):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
palette = label_colours_bin if num_classes == 2 else label_colours
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_,j_] = palette[k]
elif include:
pixels[k_,j_] = (255,255,200) if num_classes > 2 else (128,0,0)
outputs[i] = np.array(img)
return outputs
def prepare_label(input_batch, new_size, num_classes, one_hot=True):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
num_classes: number of classes to predict (including background).
one_hot: whether perform one-hot encoding.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
if one_hot:
input_batch = tf.one_hot(input_batch, depth=num_classes)
return input_batch
def inv_preprocess(imgs, num_images, img_mean):
"""Inverse preprocessing of the batch of images.
Add the mean vector and convert from BGR to RGB.
Args:
imgs: batch of input images.
num_images: number of images to apply the inverse transformations on.
img_mean: vector of mean colour values.
Returns:
The batch of the size num_images with the same spatial dimensions as the input.
"""
n, h, w, c = imgs.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, c), dtype=np.uint8)
for i in range(num_images):
outputs[i] = (imgs[i] + img_mean)[:, :, ::-1].astype(np.uint8)
return outputs
def dice_coef(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5):
"""Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
of two batch of data, usually used for binary image segmentation
i.e. labels are binary. The coefficient is between 0 and 1, 1 meaning a perfect match.
Parameters
-----------
output : Tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : Tensor
The target distribution, format the same with `output`.
loss_type : str
``jaccard`` or ``sorensen``, default is ``jaccard``.
axis : tuple of int
All dimensions are reduced, default ``[1,2,3]``.
smooth : float
This small value will be added to the numerator and denominator.
- If both output and target are empty, it makes sure dice is 1.
- If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.
Examples
---------
predictions = tf.nn.softmax(predictions)
dice_loss = 1 - dice_coe(predictions, gt)
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`
"""
numerator = 2. * tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
denominator = tf.reduce_sum(output * output, axis=axis) + tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
denominator = tf.reduce_sum(output, axis=axis) + tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
dice = (numerator + smooth) / (denominator + smooth)
dice = tf.reduce_mean(dice, name='dice_coe')
return dice | [
"tensorflow.one_hot",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.reduce_sum",
"numpy.array",
"numpy.zeros",
"tensorflow.name_scope",
"tensorflow.reduce_mean",
"tensorflow.squeeze"
] | [((1402, 1449), 'numpy.zeros', 'np.zeros', (['(num_images, h, w, 3)'], {'dtype': 'np.uint8'}), '((num_images, h, w, 3), dtype=np.uint8)\n', (1410, 1449), True, 'import numpy as np\n'), ((3385, 3432), 'numpy.zeros', 'np.zeros', (['(num_images, h, w, c)'], {'dtype': 'np.uint8'}), '((num_images, h, w, c), dtype=np.uint8)\n', (3393, 3432), True, 'import numpy as np\n'), ((5378, 5415), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dice'], {'name': '"""dice_coe"""'}), "(dice, name='dice_coe')\n", (5392, 5415), True, 'import tensorflow as tf\n'), ((1863, 1876), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1871, 1876), True, 'import numpy as np\n'), ((2401, 2430), 'tensorflow.name_scope', 'tf.name_scope', (['"""label_encode"""'], {}), "('label_encode')\n", (2414, 2430), True, 'import tensorflow as tf\n'), ((2454, 2509), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['input_batch', 'new_size'], {}), '(input_batch, new_size)\n', (2486, 2509), True, 'import tensorflow as tf\n'), ((2588, 2629), 'tensorflow.squeeze', 'tf.squeeze', (['input_batch'], {'squeeze_dims': '[3]'}), '(input_batch, squeeze_dims=[3])\n', (2598, 2629), True, 'import tensorflow as tf\n'), ((4946, 4987), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(output * target)'], {'axis': 'axis'}), '(output * target, axis=axis)\n', (4959, 4987), True, 'import tensorflow as tf\n'), ((2710, 2752), 'tensorflow.one_hot', 'tf.one_hot', (['input_batch'], {'depth': 'num_classes'}), '(input_batch, depth=num_classes)\n', (2720, 2752), True, 'import tensorflow as tf\n'), ((5041, 5082), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(output * output)'], {'axis': 'axis'}), '(output * output, axis=axis)\n', (5054, 5082), True, 'import tensorflow as tf\n'), ((5085, 5126), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(target * target)'], {'axis': 'axis'}), '(target * target, axis=axis)\n', (5098, 5126), True, 'import tensorflow as tf\n'), ((5183, 5215), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['output'], {'axis': 'axis'}), '(output, axis=axis)\n', (5196, 5215), True, 'import tensorflow as tf\n'), ((5218, 5250), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['target'], {'axis': 'axis'}), '(target, axis=axis)\n', (5231, 5250), True, 'import tensorflow as tf\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time
def open_avro(fname):
with open(fname,'rb') as f:
freader = fastavro.reader(f)
schema = freader.writer_schema
for packet in freader:
return packet
def make_dataframe(packet):
dfc = pd.DataFrame(packet['candidate'], index=[0])
df_prv = pd.DataFrame(packet['prv_candidates'])
dflc = pd.concat([dfc,df_prv], ignore_index=True,sort=True)
dflc.objectId = packet['objectId']
dflc.candid = packet['candid']
return dflc
def dcmag(dflc, match_radius_arcsec=1.5, star_galaxy_threshold = 0.4,band=2):
if (dflc.loc[0,'distpsnr1'] > match_radius_arcsec) & (dflc.loc[0,'sgscore1'] < star_galaxy_threshold):
print('Object is not a variable star.')
return dflc
else:
dflc=dflc.fillna(np.nan)
def robust_median(x):
if len(x) == 0:
return np.nan
else:
return np.median(x[np.isfinite(x)])
grp = dflc.groupby(['fid','field','rcid'])
impute_magnr = grp['magnr'].agg(robust_median)
#print(impute_magnr)
impute_sigmagnr = grp['sigmagnr'].agg(robust_median)
#print(impute_sigmagnr)
for idx, grpi in grp:
w = np.isnan(grpi['magnr'])
w2 = grpi[w].index
dflc.loc[w2,'magnr'] = impute_magnr[idx]
dflc.loc[w2,'sigmagnr'] = impute_sigmagnr[idx]
dflc['sign'] = 2* (dflc['isdiffpos'] == 't') - 1
dflc['dc_mag'] = -2.5 * np.log10(10**(-0.4*dflc['magnr']) + dflc['sign'] * 10**(-0.4*dflc['magpsf'])) #u
dflc['dc_sigmag'] = np.sqrt(
(10**(-0.4*dflc['magnr'])* dflc['sigmagnr']) **2. +
(10**(-0.4*dflc['magpsf']) * dflc['sigmapsf'])**2.) / 10**(-0.4*dflc['magnr']) + dflc['sign'] * 10**(-0.4*dflc['magpsf']) #u
dflc['dc_mag_ulim'] = -2.5 * np.log10(10**(-0.4*dflc['magnr']) + 10**(-0.4*dflc['diffmaglim'])) #v
dflc['dc_mag_llim'] = -2.5 * np.log10(10**(-0.4*dflc['magnr']) - 10**(-0.4*dflc['diffmaglim'])) #v2
return dflc
def band_amplitude(dflc, band=2):
if 'dc_mag' in dflc.columns.values:
mag_key= 'dc_mag'
else:
mag_key= 'magpsf'
z = dflc[dflc.fid==band]
ampli=z[mag_key].max()-z[mag_key].min()
print('Max:',z[mag_key].max())
print('Min:',z[mag_key].min())
print('Amplitude:',ampli)
print('Is amplitude > 1.0 mag?',ampli>=1)
return ampli
def plot_dc_lightcurve(dflc, days_ago=True):
plt.rcParams["figure.figsize"] = (10,7)
filter_color = {1:'green', 2:'red', 3:'pink'}
if days_ago:
now = Time.now().jd
t = dflc.jd - now
xlabel = 'Days Ago'
else:
t = dflc.jd
xlabel = 'Time (JD)'
fig=plt.figure()
for fid, color in filter_color.items():
# plot detections in this filter:
w = (dflc.fid == fid) & ~dflc.magpsf.isnull()
if np.sum(w):
plt.errorbar(t[w],dflc.loc[w,'dc_mag'], dflc.loc[w,'dc_sigmag'],fmt='.',color=color)
wnodet = (dflc.fid == fid) & dflc.magpsf.isnull()
if np.sum(wnodet):
plt.scatter(t[wnodet],dflc.loc[wnodet,'dc_mag_ulim'], marker='v',color=color,alpha=0.25)
plt.scatter(t[wnodet],dflc.loc[wnodet,'dc_mag_llim'], marker='^',color=color,alpha=0.25)
plt.gca().invert_yaxis()
plt.xlabel(xlabel)
plt.ylabel('Magnitude')
return fig
def get_dc_mag(dflc, band=2, days_ago=True):
if 'dc_mag' not in dflc.columns.values:
dflc = dcmag(dflc)
else:
dflc=dflc
amplitude=band_amplitude(dflc, band=band)
fig=plot_dc_lightcurve(dflc, days_ago=days_ago)
return dflc, amplitude, fig
| [
"numpy.log10",
"astropy.time.Time.now",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.isnan",
"numpy.isfinite",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.errorbar",
"pandas.DataFrame",
"p... | [((328, 372), 'pandas.DataFrame', 'pd.DataFrame', (["packet['candidate']"], {'index': '[0]'}), "(packet['candidate'], index=[0])\n", (340, 372), True, 'import pandas as pd\n'), ((386, 424), 'pandas.DataFrame', 'pd.DataFrame', (["packet['prv_candidates']"], {}), "(packet['prv_candidates'])\n", (398, 424), True, 'import pandas as pd\n'), ((436, 490), 'pandas.concat', 'pd.concat', (['[dfc, df_prv]'], {'ignore_index': '(True)', 'sort': '(True)'}), '([dfc, df_prv], ignore_index=True, sort=True)\n', (445, 490), True, 'import pandas as pd\n'), ((2896, 2908), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2906, 2908), True, 'import matplotlib.pyplot as plt\n'), ((3494, 3512), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (3504, 3512), True, 'import matplotlib.pyplot as plt\n'), ((3517, 3540), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude"""'], {}), "('Magnitude')\n", (3527, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3060, 3069), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (3066, 3069), True, 'import numpy as np\n'), ((3237, 3251), 'numpy.sum', 'np.sum', (['wnodet'], {}), '(wnodet)\n', (3243, 3251), True, 'import numpy as np\n'), ((1333, 1356), 'numpy.isnan', 'np.isnan', (["grpi['magnr']"], {}), "(grpi['magnr'])\n", (1341, 1356), True, 'import numpy as np\n'), ((1598, 1688), 'numpy.log10', 'np.log10', (["(10 ** (-0.4 * dflc['magnr']) + dflc['sign'] * 10 ** (-0.4 * dflc['magpsf']))"], {}), "(10 ** (-0.4 * dflc['magnr']) + dflc['sign'] * 10 ** (-0.4 * dflc[\n 'magpsf']))\n", (1606, 1688), True, 'import numpy as np\n'), ((1957, 2031), 'numpy.log10', 'np.log10', (["(10 ** (-0.4 * dflc['magnr']) + 10 ** (-0.4 * dflc['diffmaglim']))"], {}), "(10 ** (-0.4 * dflc['magnr']) + 10 ** (-0.4 * dflc['diffmaglim']))\n", (1965, 2031), True, 'import numpy as np\n'), ((2066, 2140), 'numpy.log10', 'np.log10', (["(10 ** (-0.4 * dflc['magnr']) - 10 ** (-0.4 * dflc['diffmaglim']))"], {}), "(10 ** (-0.4 * dflc['magnr']) - 10 ** (-0.4 * dflc['diffmaglim']))\n", (2074, 2140), True, 'import numpy as np\n'), ((2756, 2766), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (2764, 2766), False, 'from astropy.time import Time\n'), ((3083, 3176), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['t[w]', "dflc.loc[w, 'dc_mag']", "dflc.loc[w, 'dc_sigmag']"], {'fmt': '"""."""', 'color': 'color'}), "(t[w], dflc.loc[w, 'dc_mag'], dflc.loc[w, 'dc_sigmag'], fmt='.',\n color=color)\n", (3095, 3176), True, 'import matplotlib.pyplot as plt\n'), ((3265, 3362), 'matplotlib.pyplot.scatter', 'plt.scatter', (['t[wnodet]', "dflc.loc[wnodet, 'dc_mag_ulim']"], {'marker': '"""v"""', 'color': 'color', 'alpha': '(0.25)'}), "(t[wnodet], dflc.loc[wnodet, 'dc_mag_ulim'], marker='v', color=\n color, alpha=0.25)\n", (3276, 3362), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3463), 'matplotlib.pyplot.scatter', 'plt.scatter', (['t[wnodet]', "dflc.loc[wnodet, 'dc_mag_llim']"], {'marker': '"""^"""', 'color': 'color', 'alpha': '(0.25)'}), "(t[wnodet], dflc.loc[wnodet, 'dc_mag_llim'], marker='^', color=\n color, alpha=0.25)\n", (3377, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3465, 3474), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3472, 3474), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1839), 'numpy.sqrt', 'np.sqrt', (["((10 ** (-0.4 * dflc['magnr']) * dflc['sigmagnr']) ** 2.0 + (10 ** (-0.4 *\n dflc['magpsf']) * dflc['sigmapsf']) ** 2.0)"], {}), "((10 ** (-0.4 * dflc['magnr']) * dflc['sigmagnr']) ** 2.0 + (10 ** (\n -0.4 * dflc['magpsf']) * dflc['sigmapsf']) ** 2.0)\n", (1716, 1839), True, 'import numpy as np\n'), ((1042, 1056), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (1053, 1056), True, 'import numpy as np\n')] |
"""
It contains the functions to compute the cases that presents an analytical
solutions.
All functions output the analytical solution in kcal/mol
"""
import numpy
from numpy import pi
from scipy import special, linalg
from scipy.misc import factorial
from math import gamma
def an_spherical(q, xq, E_1, E_2, E_0, R, N):
"""
It computes the analytical solution of the potential of a sphere with
Nq charges inside.
Took from Kirkwood (1934).
Arguments
----------
q : array, charges.
xq : array, positions of the charges.
E_1: float, dielectric constant inside the sphere.
E_2: float, dielectric constant outside the sphere.
E_0: float, dielectric constant of vacuum.
R : float, radius of the sphere.
N : int, number of terms desired in the spherical harmonic expansion.
Returns
--------
PHI: array, reaction potential.
"""
PHI = numpy.zeros(len(q))
for K in range(len(q)):
rho = numpy.sqrt(numpy.sum(xq[K]**2))
zenit = numpy.arccos(xq[K, 2] / rho)
azim = numpy.arctan2(xq[K, 1], xq[K, 0])
phi = 0. + 0. * 1j
for n in range(N):
for m in range(-n, n + 1):
sph1 = special.sph_harm(m, n, zenit, azim)
cons1 = rho**n / (E_1 * E_0 * R**(2 * n + 1)) * (E_1 - E_2) * (
n + 1) / (E_1 * n + E_2 * (n + 1))
cons2 = 4 * pi / (2 * n + 1)
for k in range(len(q)):
rho_k = numpy.sqrt(numpy.sum(xq[k]**2))
zenit_k = numpy.arccos(xq[k, 2] / rho_k)
azim_k = numpy.arctan2(xq[k, 1], xq[k, 0])
sph2 = numpy.conj(special.sph_harm(m, n, zenit_k, azim_k))
phi += cons1 * cons2 * q[K] * rho_k**n * sph1 * sph2
PHI[K] = numpy.real(phi) / (4 * pi)
return PHI
def get_K(x, n):
"""
It computes the polinomials K needed for Kirkwood-1934 solutions.
K_n(x) in Equation 4 in Kirkwood 1934.
Arguments
----------
x: float, evaluation point of K.
n: int, number of terms desired in the expansion.
Returns
--------
K: float, polinomials K.
"""
K = 0.
n_fact = factorial(n)
n_fact2 = factorial(2 * n)
for s in range(n + 1):
K += 2**s * n_fact * factorial(2 * n - s) / (factorial(s) * n_fact2 *
factorial(n - s)) * x**s
return K
def an_P(q, xq, E_1, E_2, R, kappa, a, N):
"""
It computes the solvation energy according to Kirkwood-1934.
Arguments
----------
q : array, charges.
xq : array, positions of the charges.
E_1 : float, dielectric constant inside the sphere.
E_2 : float, dielectric constant outside the sphere.
R : float, radius of the sphere.
kappa: float, reciprocal of Debye length.
a : float, radius of the Stern Layer.
N : int, number of terms desired in the polinomial expansion.
Returns
--------
E_P : float, solvation energy.
"""
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
PHI = numpy.zeros(len(q))
for K in range(len(q)):
rho = numpy.sqrt(numpy.sum(xq[K]**2))
zenit = numpy.arccos(xq[K, 2] / rho)
azim = numpy.arctan2(xq[K, 1], xq[K, 0])
phi = 0. + 0. * 1j
for n in range(N):
for m in range(-n, n + 1):
P1 = special.lpmv(numpy.abs(m), n, numpy.cos(zenit))
Enm = 0.
for k in range(len(q)):
rho_k = numpy.sqrt(numpy.sum(xq[k]**2))
zenit_k = numpy.arccos(xq[k, 2] / rho_k)
azim_k = numpy.arctan2(xq[k, 1], xq[k, 0])
P2 = special.lpmv(numpy.abs(m), n, numpy.cos(zenit_k))
Enm += q[k] * rho_k**n * factorial(n - numpy.abs(
m)) / factorial(n + numpy.abs(m)) * P2 * numpy.exp(
-1j * m * azim_k)
C2 = (kappa * a)**2 * get_K(kappa * a, n - 1) / (
get_K(kappa * a, n + 1) + n * (E_2 - E_1) / (
(n + 1) * E_2 + n * E_1) * (R / a)**(2 * n + 1) *
(kappa * a)**2 * get_K(kappa * a, n - 1) / ((2 * n - 1) *
(2 * n + 1)))
C1 = Enm / (E_2 * E_0 * a**
(2 * n + 1)) * (2 * n + 1) / (2 * n - 1) * (E_2 / (
(n + 1) * E_2 + n * E_1))**2
if n == 0 and m == 0:
Bnm = Enm / (E_0 * R) * (
1 / E_2 - 1 / E_1) - Enm * kappa * a / (
E_0 * E_2 * a * (1 + kappa * a))
else:
Bnm = 1. / (E_1 * E_0 * R**(2 * n + 1)) * (E_1 - E_2) * (
n + 1) / (E_1 * n + E_2 * (n + 1)) * Enm - C1 * C2
phi += Bnm * rho**n * P1 * numpy.exp(1j * m * azim)
PHI[K] = numpy.real(phi) / (4 * pi)
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J)
E_P = 0.5 * C0 * numpy.sum(q * PHI)
return E_P
def two_sphere(a, R, kappa, E_1, E_2, q):
"""
It computes the analytical solution of a spherical surface and a spherical
molecule with a center charge, both of radius R.
Follows Cooper&Barba 2016
Arguments
----------
a : float, center to center distance.
R : float, radius of surface and molecule.
kappa: float, reciprocal of Debye length.
E_1 : float, dielectric constant inside the sphere.
E_2 : float, dielectric constant outside the sphere.
q : float, number of qe to be asigned to the charge.
Returns
--------
Einter : float, interaction energy.
E1sphere: float, solvation energy of one sphere.
E2sphere: float, solvation energy of two spheres together.
Note:
Einter should match (E2sphere - 2xE1sphere)
"""
N = 20 # Number of terms in expansion.
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * a)
K1p = index / (kappa * a) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * a) * numpy.sqrt(pi / (2 * kappa * a))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * a)**(3 / 2.)) * special.kv(
index, kappa * a) + numpy.sqrt(pi / (2 * kappa * a)) * K1p
I1 = special.iv(index2, kappa * a)
I1p = index / (kappa * a) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * a) * numpy.sqrt(pi / (2 * kappa * a))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * a)**(3 / 2.)) * special.iv(
index, kappa * a) + numpy.sqrt(pi / (2 * kappa * a)) * I1p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((N, N), float)
E_hat = E_1 / E_2
for i in range(N):
for j in range(N):
M[i, j] = (2 * i + 1) * B[i, j] * (
kappa * i1p[i] - E_hat * i * i1[i] / a)
if i == j:
M[i, j] += kappa * k1p[i] - E_hat * i * k1[i] / a
RHS = numpy.zeros(N)
RHS[0] = -E_hat * q / (4 * pi * E_1 * a * a)
a_coeff = linalg.solve(M, RHS)
a0 = a_coeff[0]
a0_inf = -E_hat * q / (4 * pi * E_1 * a * a) * 1 / (kappa * k1p[0])
phi_2 = a0 * k1[0] + i1[0] * numpy.sum(a_coeff * B[:, 0]) - q / (4 * pi *
E_1 * a)
phi_1 = a0_inf * k1[0] - q / (4 * pi * E_1 * a)
phi_inter = phi_2 - phi_1
CC0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
Einter = 0.5 * CC0 * q * phi_inter
E1sphere = 0.5 * CC0 * q * phi_1
E2sphere = 0.5 * CC0 * q * phi_2
return Einter, E1sphere, E2sphere
def constant_potential_single_point(phi0, a, r, kappa):
"""
It computes the potential in a point 'r' due to a spherical surface
with constant potential phi0, immersed in water. Solution to the
Poisson-Boltzmann problem.
Arguments
----------
phi0 : float, constant potential on the surface of the sphere.
a : float, radius of the sphere.
r : float, distance from the center of the sphere to the evaluation
point.
kappa: float, reciprocal of Debye length.
Returns
--------
phi : float, potential.
"""
phi = a / r * phi0 * numpy.exp(kappa * (a - r))
return phi
def constant_charge_single_point(sigma0, a, r, kappa, epsilon):
"""
It computes the potential in a point 'r' due to a spherical surface
with constant charge sigma0 immersed in water. Solution to the
Poisson-Boltzmann problem. .
Arguments
----------
sigma0 : float, constant charge on the surface of the sphere.
a : float, radius of the sphere.
r : float, distance from the center of the sphere to the evaluation
point.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
phi : float, potential.
"""
dphi0 = -sigma0 / epsilon
phi = -dphi0 * a * a / (1 + kappa * a) * numpy.exp(kappa * (a - r)) / r
return phi
def constant_potential_single_charge(phi0, radius, kappa, epsilon):
"""
It computes the surface charge of a sphere at constant potential, immersed
in water.
Arguments
----------
phi0 : float, constant potential on the surface of the sphere.
radius : float, radius of the sphere.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant .
Returns
--------
sigma : float, surface charge.
"""
dphi = -phi0 * ((1. + kappa * radius) / radius)
sigma = -epsilon * dphi # Surface charge
return sigma
def constant_charge_single_potential(sigma0, radius, kappa, epsilon):
"""
It computes the surface potential on a sphere at constant charged, immersed
in water.
Arguments
----------
sigma0 : float, constant charge on the surface of the sphere.
radius : float, radius of the sphere.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
phi : float, potential.
"""
dphi = -sigma0 / epsilon
phi = -dphi * radius / (1. + kappa * radius) # Surface potential
return phi
def constant_potential_twosphere(phi01, phi02, r1, r2, R, kappa, epsilon):
"""
It computes the solvation energy of two spheres at constant potential,
immersed in water.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_solv : float, solvation energy.
"""
kT = 4.1419464e-21 # at 300K
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
C0 = kT / qe
phi01 /= C0
phi02 /= C0
k1 = special.kv(0.5, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k2 = special.kv(0.5, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
B00 = special.kv(0.5, kappa * R) * numpy.sqrt(pi / (2 * kappa * R))
# k1 = special.kv(0.5,kappa*r1)*numpy.sqrt(2/(pi*kappa*r1))
# k2 = special.kv(0.5,kappa*r2)*numpy.sqrt(2/(pi*kappa*r2))
# B00 = special.kv(0.5,kappa*R)*numpy.sqrt(2/(pi*kappa*R))
i1 = special.iv(0.5, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i2 = special.iv(0.5, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
a0 = (phi02 * B00 * i1 - phi01 * k2) / (B00 * B00 * i2 * i1 - k1 * k2)
b0 = (phi02 * k1 - phi01 * B00 * i2) / (k2 * k1 - B00 * B00 * i1 * i2)
U1 = 2 * pi * phi01 * (phi01 * numpy.exp(kappa * r1) * (kappa * r1) *
(kappa * r1) / numpy.sinh(kappa * r1) - pi * a0 /
(2 * i1))
U2 = 2 * pi * phi02 * (phi02 * numpy.exp(kappa * r2) * (kappa * r2) *
(kappa * r2) / numpy.sinh(kappa * r2) - pi * b0 /
(2 * i2))
print('U1: {}'.format(U1))
print('U2: {}'.format(U2))
print('E: {}'.format(U1 + U2))
C1 = C0 * C0 * epsilon / kappa
u1 = U1 * C1
u2 = U2 * C1
CC0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_solv = CC0 * (u1 + u2)
return E_solv
def constant_potential_twosphere_2(phi01, phi02, r1, r2, R, kappa, epsilon):
"""
It computes the solvation energy of two spheres at constant potential,
immersed in water.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_solv : float, solvation energy.
"""
kT = 4.1419464e-21 # at 300K
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
h = R - r1 - r2
# E_inter = r1*r2*epsilon/(4*R) * ( (phi01+phi02)**2 * log(1+numpy.exp(-kappa*h)) + (phi01-phi02)**2*log(1-numpy.exp(-kappa*h)) )
# E_inter = epsilon*r1*phi01**2/2 * log(1+numpy.exp(-kappa*h))
E_solv = epsilon * r1 * r2 * (phi01**2 + phi02**2) / (4 * (r1 + r2)) * (
(2 * phi01 * phi02) / (phi01**2 + phi02**2) * log(
(1 + numpy.exp(-kappa * h)) /
(1 - numpy.exp(-kappa * h))) + log(1 - numpy.exp(-2 * kappa * h)))
CC0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_solv *= CC0
return E_solv
def constant_potential_single_energy(phi0, r1, kappa, epsilon):
"""
It computes the total energy of a single sphere at constant potential,
inmmersed in water.
Arguments
----------
phi0 : float, constant potential on the surface of the sphere.
r1 : float, radius of sphere.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E : float, total energy.
"""
N = 1 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
a0_inf = phi0 / k1[0]
U1_inf = a0_inf * k1p[0]
C1 = 2 * pi * kappa * phi0 * r1 * r1 * epsilon
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E = C0 * C1 * U1_inf
return E
def constant_charge_single_energy(sigma0, r1, kappa, epsilon):
"""
It computes the total energy of a single sphere at constant charge,
inmmersed in water.
Arguments
----------
sigma0 : float, constant charge on the surface of the sphere.
r1 : float, radius of sphere.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E : float, total energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
a0_inf = -sigma0 / (epsilon * kappa * k1p[0])
U1_inf = a0_inf * k1[0]
C1 = 2 * pi * sigma0 * r1 * r1
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E = C0 * C1 * U1_inf
return E
def constant_potential_twosphere_dissimilar(phi01, phi02, r1, r2, R, kappa,
epsilon):
"""
It computes the interaction energy for dissimilar spheres at constant
potential, immersed in water.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * i1[j] / k2[n]
M[j + N, n] = (2 * j + 1) * B[j, n] * i2[j] / k1[n]
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = phi01
RHS[N] = phi02
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / k1
b = coeff[N:2 * N] / k2
a0 = a[0]
a0_inf = phi01 / k1[0]
b0 = b[0]
b0_inf = phi02 / k2[0]
U1_inf = a0_inf * k1p[0]
U1_h = a0 * k1p[0] + i1p[0] * numpy.sum(b * B[:, 0])
U2_inf = b0_inf * k2p[0]
U2_h = b0 * k2p[0] + i2p[0] * numpy.sum(a * B[:, 0])
C1 = 2 * pi * kappa * phi01 * r1 * r1 * epsilon
C2 = 2 * pi * kappa * phi02 * r2 * r2 * epsilon
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_inter = C0 * (C1 * (U1_h - U1_inf) + C2 * (U2_h - U2_inf))
return E_inter
def constant_charge_twosphere_dissimilar(sigma01, sigma02, r1, r2, R, kappa,
epsilon):
"""
It computes the interaction energy between two dissimilar spheres at
constant charge, immersed in water.
Arguments
----------
sigma01: float, constant charge on the surface of the sphere 1.
sigma02: float, constant charge on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * r1 * i1p[j] / (r2 * k2p[n])
M[j + N, n] = (2 * j + 1) * B[j, n] * r2 * i2p[j] / (r1 * k1p[n])
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = sigma01 * r1 / epsilon
RHS[N] = sigma02 * r2 / epsilon
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / (-r1 * kappa * k1p)
b = coeff[N:2 * N] / (-r2 * kappa * k2p)
a0 = a[0]
a0_inf = -sigma01 / (epsilon * kappa * k1p[0])
b0 = b[0]
b0_inf = -sigma02 / (epsilon * kappa * k2p[0])
U1_inf = a0_inf * k1[0]
U1_h = a0 * k1[0] + i1[0] * numpy.sum(b * B[:, 0])
U2_inf = b0_inf * k2[0]
U2_h = b0 * k2[0] + i2[0] * numpy.sum(a * B[:, 0])
C1 = 2 * pi * sigma01 * r1 * r1
C2 = 2 * pi * sigma02 * r2 * r2
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_inter = C0 * (C1 * (U1_h - U1_inf) + C2 * (U2_h - U2_inf))
return E_inter
def molecule_constant_potential(q, phi02, r1, r2, R, kappa, E_1, E_2):
"""
It computes the interaction energy between a molecule (sphere with
point-charge in the center) and a sphere at constant potential, immersed
in water.
Arguments
----------
q : float, number of qe to be asigned to the charge.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1, i.e the molecule.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
E_1 : float, dielectric constant inside the sphere/molecule.
E_2 : float, dielectric constant outside the sphere/molecule.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
E_hat = E_1 / E_2
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * (
kappa * i1p[j] / k2[n] - E_hat * j / r1 * i1[j] / k2[n])
M[j + N, n] = (2 * j + 1) * B[j, n] * i2[j] * 1 / (
kappa * k1p[n] - E_hat * n / r1 * k1[n])
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = -E_hat * q / (4 * pi * E_1 * r1 * r1)
RHS[N] = phi02
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / (kappa * k1p - E_hat * numpy.arange(N) / r1 * k1)
b = coeff[N:2 * N] / k2
a0 = a[0]
a0_inf = -E_hat * q / (4 * pi * E_1 * r1 * r1) * 1 / (kappa * k1p[0])
b0 = b[0]
b0_inf = phi02 / k2[0]
phi_inf = a0_inf * k1[0] - q / (4 * pi * E_1 * r1)
phi_h = a0 * k1[0] + i1[0] * numpy.sum(b * B[:, 0]) - q / (4 * pi * E_1 *
r1)
phi_inter = phi_h - phi_inf
U_inf = b0_inf * k2p[0]
U_h = b0 * k2p[0] + i2p[0] * numpy.sum(a * B[:, 0])
U_inter = U_h - U_inf
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
C1 = q * 0.5
C2 = 2 * pi * kappa * phi02 * r2 * r2 * E_2
E_inter = C0 * (C1 * phi_inter + C2 * U_inter)
return E_inter
def molecule_constant_charge(q, sigma02, r1, r2, R, kappa, E_1, E_2):
"""
It computes the interaction energy between a molecule (sphere with
point-charge in the center) and a sphere at constant charge, immersed
in water.
Arguments
----------
q : float, number of qe to be asigned to the charge.
sigma02: float, constant charge on the surface of the sphere 2.
r1 : float, radius of sphere 1, i.e the molecule.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
E_1 : float, dielectric constant inside the sphere/molecule.
E_2 : float, dielectric constant outside the sphere/molecule.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
E_hat = E_1 / E_2
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * (
i1p[j] / k2p[n] - E_hat * j / r1 * i1[j] / (kappa * k2p[n]))
M[j + N, n] = (2 * j + 1) * B[j, n] * i2p[j] * kappa * 1 / (
kappa * k1p[n] - E_hat * n / r1 * k1[n])
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = -E_hat * q / (4 * pi * E_1 * r1 * r1)
RHS[N] = -sigma02 / E_2
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / (kappa * k1p - E_hat * numpy.arange(N) / r1 * k1)
b = coeff[N:2 * N] / (kappa * k2p)
a0 = a[0]
a0_inf = -E_hat * q / (4 * pi * E_1 * r1 * r1) * 1 / (kappa * k1p[0])
b0 = b[0]
b0_inf = -sigma02 / (E_2 * kappa * k2p[0])
phi_inf = a0_inf * k1[0] - q / (4 * pi * E_1 * r1)
phi_h = a0 * k1[0] + i1[0] * numpy.sum(b * B[:, 0]) - q / (4 * pi * E_1 *
r1)
phi_inter = phi_h - phi_inf
U_inf = b0_inf * k2[0]
U_h = b0 * k2[0] + i2[0] * numpy.sum(a * B[:, 0])
U_inter = U_h - U_inf
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
C1 = q * 0.5
C2 = 2 * pi * sigma02 * r2 * r2
E_inter = C0 * (C1 * phi_inter + C2 * U_inter)
return E_inter
def constant_potential_twosphere_identical(phi01, phi02, r1, r2, R, kappa,
epsilon):
"""
It computes the interaction energy for two spheres at constants surface
potential, according to Carnie&Chan-1993.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Note:
Even though it admits phi01 and phi02, they should be identical; and
the same is applicable to r1 and r2.
Returns
--------
E_inter: float, interaction energy.
"""
# From Carnie+Chan 1993
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index = numpy.arange(N, dtype=float) + 0.5
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((N, N), float)
for i in range(N):
for j in range(N):
M[i, j] = (2 * i + 1) * B[i, j] * i1[i]
if i == j:
M[i, j] += k1[i]
RHS = numpy.zeros(N)
RHS[0] = phi01
a = linalg.solve(M, RHS)
a0 = a[0]
U = 4 * pi * (-pi / 2 * a0 / phi01 * 1 / numpy.sinh(kappa * r1) + kappa *
r1 + kappa * r1 / numpy.tanh(kappa * r1))
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
C1 = r1 * epsilon * phi01 * phi01
E_inter = U * C1 * C0
return E_inter
def constant_charge_twosphere_identical(sigma, a, R, kappa, epsilon):
"""
It computes the interaction energy for two spheres at constants surface
charge, according to Carnie&Chan-1993.
Arguments
----------
sigma : float, constant charge on the surface of the spheres.
a : float, radius of spheres.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_inter: float, interaction energy.
"""
# From Carnie+Chan 1993
N = 10 # Number of terms in expansion
E_p = 0 # Permitivitty inside sphere
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * a)
K1p = index / (kappa * a) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * a) * numpy.sqrt(pi / (2 * kappa * a))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * a)**(3 / 2.)) * special.kv(
index, kappa * a) + numpy.sqrt(pi / (2 * kappa * a)) * K1p
I1 = special.iv(index2, kappa * a)
I1p = index / (kappa * a) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * a) * numpy.sqrt(pi / (2 * kappa * a))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * a)**(3 / 2.)) * special.iv(
index, kappa * a) + numpy.sqrt(pi / (2 * kappa * a)) * I1p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((N, N), float)
for i in range(N):
for j in range(N):
M[i, j] = (2 * i + 1) * B[i, j] * (
E_p / epsilon * i * i1[i] - a * kappa * i1p[i])
if i == j:
M[i, j] += (E_p / epsilon * i * k1[i] - a * kappa * k1p[i])
RHS = numpy.zeros(N)
RHS[0] = a * sigma / epsilon
a_coeff = linalg.solve(M, RHS)
a0 = a_coeff[0]
C0 = a * sigma / epsilon
CC0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_inter = 4 * pi * a * epsilon * C0 * C0 * CC0(pi * a0 / (2 * C0 * (
kappa * a * numpy.cosh(kappa * a) - numpy.sinh(kappa * a))) - 1 / (
1 + kappa * a) - 1 / (kappa * a * 1 / numpy.tanh(kappa * a) - 1))
return E_inter
def Cext_analytical(radius, wavelength, diel_out, diel_in):
"""
Calculates the analytical solution of the extinction cross section.
This solution is valid when the nano particle involved is a sphere.
Arguments
----------
radius : float, radius of the sphere in [nm].
wavelength: float/array of floats, wavelength of the incident
electric field in [nm].
diel_out : complex/array of complex, dielectric constant inside surface.
diel_in : complex/array of complex, dielectric constant inside surface.
Returns
--------
Cext_an : float/array of floats, extinction cross section.
"""
wavenumber = 2 * numpy.pi * numpy.sqrt(diel_out) / wavelength
C1 = wavenumber**2 * (diel_in / diel_out - 1) / (diel_in / diel_out + 2)
Cext_an = 4 * numpy.pi * radius**3 / wavenumber.real * C1.imag
return Cext_an
| [
"numpy.arccos",
"scipy.misc.factorial",
"numpy.sqrt",
"scipy.special.kv",
"numpy.sinh",
"numpy.arctan2",
"scipy.special.sph_harm",
"numpy.arange",
"math.gamma",
"numpy.tanh",
"numpy.exp",
"numpy.real",
"scipy.special.iv",
"numpy.abs",
"numpy.cos",
"scipy.linalg.solve",
"numpy.sum",
... | [((2216, 2228), 'scipy.misc.factorial', 'factorial', (['n'], {}), '(n)\n', (2225, 2228), False, 'from scipy.misc import factorial\n'), ((2243, 2259), 'scipy.misc.factorial', 'factorial', (['(2 * n)'], {}), '(2 * n)\n', (2252, 2259), False, 'from scipy.misc import factorial\n'), ((6216, 6245), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * a)'], {}), '(index2, kappa * a)\n', (6226, 6245), False, 'from scipy import special, linalg\n'), ((6525, 6554), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * a)'], {}), '(index2, kappa * a)\n', (6535, 6554), False, 'from scipy import special, linalg\n'), ((6832, 6864), 'numpy.zeros', 'numpy.zeros', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (6843, 6864), False, 'import numpy\n'), ((7650, 7676), 'numpy.zeros', 'numpy.zeros', (['(N, N)', 'float'], {}), '((N, N), float)\n', (7661, 7676), False, 'import numpy\n'), ((7953, 7967), 'numpy.zeros', 'numpy.zeros', (['N'], {}), '(N)\n', (7964, 7967), False, 'import numpy\n'), ((8032, 8052), 'scipy.linalg.solve', 'linalg.solve', (['M', 'RHS'], {}), '(M, RHS)\n', (8044, 8052), False, 'from scipy import special, linalg\n'), ((15426, 15456), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (15436, 15456), False, 'from scipy import special, linalg\n'), ((16617, 16647), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (16627, 16647), False, 'from scipy import special, linalg\n'), ((18066, 18096), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (18076, 18096), False, 'from scipy import special, linalg\n'), ((18381, 18411), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r2)'], {}), '(index2, kappa * r2)\n', (18391, 18411), False, 'from scipy import special, linalg\n'), ((18696, 18726), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (18706, 18726), False, 'from scipy import special, linalg\n'), ((19011, 19041), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * r2)'], {}), '(index2, kappa * r2)\n', (19021, 19041), False, 'from scipy import special, linalg\n'), ((19325, 19357), 'numpy.zeros', 'numpy.zeros', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (19336, 19357), False, 'import numpy\n'), ((20143, 20177), 'numpy.zeros', 'numpy.zeros', (['(2 * N, 2 * N)', 'float'], {}), '((2 * N, 2 * N), float)\n', (20154, 20177), False, 'import numpy\n'), ((20454, 20472), 'numpy.zeros', 'numpy.zeros', (['(2 * N)'], {}), '(2 * N)\n', (20465, 20472), False, 'import numpy\n'), ((20524, 20544), 'scipy.linalg.solve', 'linalg.solve', (['M', 'RHS'], {}), '(M, RHS)\n', (20536, 20544), False, 'from scipy import special, linalg\n'), ((22031, 22061), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (22041, 22061), False, 'from scipy import special, linalg\n'), ((22346, 22376), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r2)'], {}), '(index2, kappa * r2)\n', (22356, 22376), False, 'from scipy import special, linalg\n'), ((22661, 22691), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (22671, 22691), False, 'from scipy import special, linalg\n'), ((22976, 23006), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * r2)'], {}), '(index2, kappa * r2)\n', (22986, 23006), False, 'from scipy import special, linalg\n'), ((23290, 23322), 'numpy.zeros', 'numpy.zeros', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (23301, 23322), False, 'import numpy\n'), ((24108, 24142), 'numpy.zeros', 'numpy.zeros', (['(2 * N, 2 * N)', 'float'], {}), '((2 * N, 2 * N), float)\n', (24119, 24142), False, 'import numpy\n'), ((24447, 24465), 'numpy.zeros', 'numpy.zeros', (['(2 * N)'], {}), '(2 * N)\n', (24458, 24465), False, 'import numpy\n'), ((24551, 24571), 'scipy.linalg.solve', 'linalg.solve', (['M', 'RHS'], {}), '(M, RHS)\n', (24563, 24571), False, 'from scipy import special, linalg\n'), ((26199, 26229), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (26209, 26229), False, 'from scipy import special, linalg\n'), ((26514, 26544), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r2)'], {}), '(index2, kappa * r2)\n', (26524, 26544), False, 'from scipy import special, linalg\n'), ((26829, 26859), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (26839, 26859), False, 'from scipy import special, linalg\n'), ((27144, 27174), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * r2)'], {}), '(index2, kappa * r2)\n', (27154, 27174), False, 'from scipy import special, linalg\n'), ((27458, 27490), 'numpy.zeros', 'numpy.zeros', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (27469, 27490), False, 'import numpy\n'), ((28298, 28332), 'numpy.zeros', 'numpy.zeros', (['(2 * N, 2 * N)', 'float'], {}), '((2 * N, 2 * N), float)\n', (28309, 28332), False, 'import numpy\n'), ((28727, 28745), 'numpy.zeros', 'numpy.zeros', (['(2 * N)'], {}), '(2 * N)\n', (28738, 28745), False, 'import numpy\n'), ((28829, 28849), 'scipy.linalg.solve', 'linalg.solve', (['M', 'RHS'], {}), '(M, RHS)\n', (28841, 28849), False, 'from scipy import special, linalg\n'), ((30637, 30667), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (30647, 30667), False, 'from scipy import special, linalg\n'), ((30952, 30982), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * r2)'], {}), '(index2, kappa * r2)\n', (30962, 30982), False, 'from scipy import special, linalg\n'), ((31267, 31297), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * r1)'], {}), '(index2, kappa * r1)\n', (31277, 31297), False, 'from scipy import special, linalg\n'), ((31582, 31612), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * r2)'], {}), '(index2, kappa * r2)\n', (31592, 31612), False, 'from scipy import special, linalg\n'), ((31896, 31928), 'numpy.zeros', 'numpy.zeros', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (31907, 31928), False, 'import numpy\n'), ((32736, 32770), 'numpy.zeros', 'numpy.zeros', (['(2 * N, 2 * N)', 'float'], {}), '((2 * N, 2 * N), float)\n', (32747, 32770), False, 'import numpy\n'), ((33178, 33196), 'numpy.zeros', 'numpy.zeros', (['(2 * N)'], {}), '(2 * N)\n', (33189, 33196), False, 'import numpy\n'), ((33289, 33309), 'scipy.linalg.solve', 'linalg.solve', (['M', 'RHS'], {}), '(M, RHS)\n', (33301, 33309), False, 'from scipy import special, linalg\n'), ((35475, 35507), 'numpy.zeros', 'numpy.zeros', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (35486, 35507), False, 'import numpy\n'), ((36293, 36319), 'numpy.zeros', 'numpy.zeros', (['(N, N)', 'float'], {}), '((N, N), float)\n', (36304, 36319), False, 'import numpy\n'), ((36489, 36503), 'numpy.zeros', 'numpy.zeros', (['N'], {}), '(N)\n', (36500, 36503), False, 'import numpy\n'), ((36532, 36552), 'scipy.linalg.solve', 'linalg.solve', (['M', 'RHS'], {}), '(M, RHS)\n', (36544, 36552), False, 'from scipy import special, linalg\n'), ((37689, 37718), 'scipy.special.kv', 'special.kv', (['index2', '(kappa * a)'], {}), '(index2, kappa * a)\n', (37699, 37718), False, 'from scipy import special, linalg\n'), ((37998, 38027), 'scipy.special.iv', 'special.iv', (['index2', '(kappa * a)'], {}), '(index2, kappa * a)\n', (38008, 38027), False, 'from scipy import special, linalg\n'), ((38305, 38337), 'numpy.zeros', 'numpy.zeros', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (38316, 38337), False, 'import numpy\n'), ((39123, 39149), 'numpy.zeros', 'numpy.zeros', (['(N, N)', 'float'], {}), '((N, N), float)\n', (39134, 39149), False, 'import numpy\n'), ((39422, 39436), 'numpy.zeros', 'numpy.zeros', (['N'], {}), '(N)\n', (39433, 39436), False, 'import numpy\n'), ((39485, 39505), 'scipy.linalg.solve', 'linalg.solve', (['M', 'RHS'], {}), '(M, RHS)\n', (39497, 39505), False, 'from scipy import special, linalg\n'), ((1018, 1046), 'numpy.arccos', 'numpy.arccos', (['(xq[K, 2] / rho)'], {}), '(xq[K, 2] / rho)\n', (1030, 1046), False, 'import numpy\n'), ((1062, 1095), 'numpy.arctan2', 'numpy.arctan2', (['xq[K, 1]', 'xq[K, 0]'], {}), '(xq[K, 1], xq[K, 0])\n', (1075, 1095), False, 'import numpy\n'), ((3273, 3301), 'numpy.arccos', 'numpy.arccos', (['(xq[K, 2] / rho)'], {}), '(xq[K, 2] / rho)\n', (3285, 3301), False, 'import numpy\n'), ((3317, 3350), 'numpy.arctan2', 'numpy.arctan2', (['xq[K, 1]', 'xq[K, 0]'], {}), '(xq[K, 1], xq[K, 0])\n', (3330, 3350), False, 'import numpy\n'), ((5146, 5164), 'numpy.sum', 'numpy.sum', (['(q * PHI)'], {}), '(q * PHI)\n', (5155, 5164), False, 'import numpy\n'), ((6142, 6174), 'numpy.arange', 'numpy.arange', (['(N + 1)'], {'dtype': 'float'}), '(N + 1, dtype=float)\n', (6154, 6174), False, 'import numpy\n'), ((6306, 6334), 'scipy.special.kv', 'special.kv', (['index', '(kappa * a)'], {}), '(index, kappa * a)\n', (6316, 6334), False, 'from scipy import special, linalg\n'), ((6337, 6369), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * a))'], {}), '(pi / (2 * kappa * a))\n', (6347, 6369), False, 'import numpy\n'), ((6614, 6642), 'scipy.special.iv', 'special.iv', (['index', '(kappa * a)'], {}), '(index, kappa * a)\n', (6624, 6642), False, 'from scipy import special, linalg\n'), ((6645, 6677), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * a))'], {}), '(pi / (2 * kappa * a))\n', (6655, 6677), False, 'import numpy\n'), ((9200, 9226), 'numpy.exp', 'numpy.exp', (['(kappa * (a - r))'], {}), '(kappa * (a - r))\n', (9209, 9226), False, 'import numpy\n'), ((12030, 12057), 'scipy.special.kv', 'special.kv', (['(0.5)', '(kappa * r1)'], {}), '(0.5, kappa * r1)\n', (12040, 12057), False, 'from scipy import special, linalg\n'), ((12060, 12093), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (12070, 12093), False, 'import numpy\n'), ((12103, 12130), 'scipy.special.kv', 'special.kv', (['(0.5)', '(kappa * r2)'], {}), '(0.5, kappa * r2)\n', (12113, 12130), False, 'from scipy import special, linalg\n'), ((12133, 12166), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (12143, 12166), False, 'import numpy\n'), ((12177, 12203), 'scipy.special.kv', 'special.kv', (['(0.5)', '(kappa * R)'], {}), '(0.5, kappa * R)\n', (12187, 12203), False, 'from scipy import special, linalg\n'), ((12206, 12238), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * R))'], {}), '(pi / (2 * kappa * R))\n', (12216, 12238), False, 'import numpy\n'), ((12449, 12476), 'scipy.special.iv', 'special.iv', (['(0.5)', '(kappa * r1)'], {}), '(0.5, kappa * r1)\n', (12459, 12476), False, 'from scipy import special, linalg\n'), ((12479, 12512), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (12489, 12512), False, 'import numpy\n'), ((12522, 12549), 'scipy.special.iv', 'special.iv', (['(0.5)', '(kappa * r2)'], {}), '(0.5, kappa * r2)\n', (12532, 12549), False, 'from scipy import special, linalg\n'), ((12552, 12585), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (12562, 12585), False, 'import numpy\n'), ((15352, 15384), 'numpy.arange', 'numpy.arange', (['(N + 1)'], {'dtype': 'float'}), '(N + 1, dtype=float)\n', (15364, 15384), False, 'import numpy\n'), ((15517, 15546), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (15527, 15546), False, 'from scipy import special, linalg\n'), ((15549, 15582), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (15559, 15582), False, 'import numpy\n'), ((16543, 16575), 'numpy.arange', 'numpy.arange', (['(N + 1)'], {'dtype': 'float'}), '(N + 1, dtype=float)\n', (16555, 16575), False, 'import numpy\n'), ((16708, 16737), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (16718, 16737), False, 'from scipy import special, linalg\n'), ((16740, 16773), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (16750, 16773), False, 'import numpy\n'), ((17992, 18024), 'numpy.arange', 'numpy.arange', (['(N + 1)'], {'dtype': 'float'}), '(N + 1, dtype=float)\n', (18004, 18024), False, 'import numpy\n'), ((18157, 18186), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (18167, 18186), False, 'from scipy import special, linalg\n'), ((18189, 18222), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (18199, 18222), False, 'import numpy\n'), ((18472, 18501), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (18482, 18501), False, 'from scipy import special, linalg\n'), ((18504, 18537), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (18514, 18537), False, 'import numpy\n'), ((18787, 18816), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (18797, 18816), False, 'from scipy import special, linalg\n'), ((18819, 18852), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (18829, 18852), False, 'import numpy\n'), ((19102, 19131), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (19112, 19131), False, 'from scipy import special, linalg\n'), ((19134, 19167), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (19144, 19167), False, 'import numpy\n'), ((21957, 21989), 'numpy.arange', 'numpy.arange', (['(N + 1)'], {'dtype': 'float'}), '(N + 1, dtype=float)\n', (21969, 21989), False, 'import numpy\n'), ((22122, 22151), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (22132, 22151), False, 'from scipy import special, linalg\n'), ((22154, 22187), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (22164, 22187), False, 'import numpy\n'), ((22437, 22466), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (22447, 22466), False, 'from scipy import special, linalg\n'), ((22469, 22502), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (22479, 22502), False, 'import numpy\n'), ((22752, 22781), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (22762, 22781), False, 'from scipy import special, linalg\n'), ((22784, 22817), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (22794, 22817), False, 'import numpy\n'), ((23067, 23096), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (23077, 23096), False, 'from scipy import special, linalg\n'), ((23099, 23132), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (23109, 23132), False, 'import numpy\n'), ((26125, 26157), 'numpy.arange', 'numpy.arange', (['(N + 1)'], {'dtype': 'float'}), '(N + 1, dtype=float)\n', (26137, 26157), False, 'import numpy\n'), ((26290, 26319), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (26300, 26319), False, 'from scipy import special, linalg\n'), ((26322, 26355), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (26332, 26355), False, 'import numpy\n'), ((26605, 26634), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (26615, 26634), False, 'from scipy import special, linalg\n'), ((26637, 26670), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (26647, 26670), False, 'import numpy\n'), ((26920, 26949), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (26930, 26949), False, 'from scipy import special, linalg\n'), ((26952, 26985), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (26962, 26985), False, 'import numpy\n'), ((27235, 27264), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (27245, 27264), False, 'from scipy import special, linalg\n'), ((27267, 27300), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (27277, 27300), False, 'import numpy\n'), ((30563, 30595), 'numpy.arange', 'numpy.arange', (['(N + 1)'], {'dtype': 'float'}), '(N + 1, dtype=float)\n', (30575, 30595), False, 'import numpy\n'), ((30728, 30757), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (30738, 30757), False, 'from scipy import special, linalg\n'), ((30760, 30793), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (30770, 30793), False, 'import numpy\n'), ((31043, 31072), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (31053, 31072), False, 'from scipy import special, linalg\n'), ((31075, 31108), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (31085, 31108), False, 'import numpy\n'), ((31358, 31387), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (31368, 31387), False, 'from scipy import special, linalg\n'), ((31390, 31423), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (31400, 31423), False, 'import numpy\n'), ((31673, 31702), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (31683, 31702), False, 'from scipy import special, linalg\n'), ((31705, 31738), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (31715, 31738), False, 'import numpy\n'), ((35129, 35157), 'numpy.arange', 'numpy.arange', (['N'], {'dtype': 'float'}), '(N, dtype=float)\n', (35141, 35157), False, 'import numpy\n'), ((35174, 35203), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (35184, 35203), False, 'from scipy import special, linalg\n'), ((35206, 35239), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (35216, 35239), False, 'import numpy\n'), ((35249, 35278), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (35259, 35278), False, 'from scipy import special, linalg\n'), ((35281, 35314), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (35291, 35314), False, 'import numpy\n'), ((35325, 35354), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (35335, 35354), False, 'from scipy import special, linalg\n'), ((35357, 35390), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (35367, 35390), False, 'import numpy\n'), ((35400, 35429), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (35410, 35429), False, 'from scipy import special, linalg\n'), ((35432, 35465), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (35442, 35465), False, 'import numpy\n'), ((37615, 37647), 'numpy.arange', 'numpy.arange', (['(N + 1)'], {'dtype': 'float'}), '(N + 1, dtype=float)\n', (37627, 37647), False, 'import numpy\n'), ((37779, 37807), 'scipy.special.kv', 'special.kv', (['index', '(kappa * a)'], {}), '(index, kappa * a)\n', (37789, 37807), False, 'from scipy import special, linalg\n'), ((37810, 37842), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * a))'], {}), '(pi / (2 * kappa * a))\n', (37820, 37842), False, 'import numpy\n'), ((38087, 38115), 'scipy.special.iv', 'special.iv', (['index', '(kappa * a)'], {}), '(index, kappa * a)\n', (38097, 38115), False, 'from scipy import special, linalg\n'), ((38118, 38150), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * a))'], {}), '(pi / (2 * kappa * a))\n', (38128, 38150), False, 'import numpy\n'), ((981, 1002), 'numpy.sum', 'numpy.sum', (['(xq[K] ** 2)'], {}), '(xq[K] ** 2)\n', (990, 1002), False, 'import numpy\n'), ((1824, 1839), 'numpy.real', 'numpy.real', (['phi'], {}), '(phi)\n', (1834, 1839), False, 'import numpy\n'), ((3236, 3257), 'numpy.sum', 'numpy.sum', (['(xq[K] ** 2)'], {}), '(xq[K] ** 2)\n', (3245, 3257), False, 'import numpy\n'), ((5053, 5068), 'numpy.real', 'numpy.real', (['phi'], {}), '(phi)\n', (5063, 5068), False, 'import numpy\n'), ((6436, 6464), 'scipy.special.kv', 'special.kv', (['index', '(kappa * a)'], {}), '(index, kappa * a)\n', (6446, 6464), False, 'from scipy import special, linalg\n'), ((6476, 6508), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * a))'], {}), '(pi / (2 * kappa * a))\n', (6486, 6508), False, 'import numpy\n'), ((6744, 6772), 'scipy.special.iv', 'special.iv', (['index', '(kappa * a)'], {}), '(index, kappa * a)\n', (6754, 6772), False, 'from scipy import special, linalg\n'), ((6784, 6816), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * a))'], {}), '(pi / (2 * kappa * a))\n', (6794, 6816), False, 'import numpy\n'), ((9965, 9991), 'numpy.exp', 'numpy.exp', (['(kappa * (a - r))'], {}), '(kappa * (a - r))\n', (9974, 9991), False, 'import numpy\n'), ((15650, 15679), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (15660, 15679), False, 'from scipy import special, linalg\n'), ((15691, 15724), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (15701, 15724), False, 'import numpy\n'), ((16841, 16870), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (16851, 16870), False, 'from scipy import special, linalg\n'), ((16882, 16915), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (16892, 16915), False, 'import numpy\n'), ((18290, 18319), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (18300, 18319), False, 'from scipy import special, linalg\n'), ((18331, 18364), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (18341, 18364), False, 'import numpy\n'), ((18605, 18634), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (18615, 18634), False, 'from scipy import special, linalg\n'), ((18646, 18679), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (18656, 18679), False, 'import numpy\n'), ((18920, 18949), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (18930, 18949), False, 'from scipy import special, linalg\n'), ((18961, 18994), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (18971, 18994), False, 'import numpy\n'), ((19235, 19264), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (19245, 19264), False, 'from scipy import special, linalg\n'), ((19276, 19309), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (19286, 19309), False, 'import numpy\n'), ((20745, 20767), 'numpy.sum', 'numpy.sum', (['(b * B[:, 0])'], {}), '(b * B[:, 0])\n', (20754, 20767), False, 'import numpy\n'), ((20832, 20854), 'numpy.sum', 'numpy.sum', (['(a * B[:, 0])'], {}), '(a * B[:, 0])\n', (20841, 20854), False, 'import numpy\n'), ((22255, 22284), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (22265, 22284), False, 'from scipy import special, linalg\n'), ((22296, 22329), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (22306, 22329), False, 'import numpy\n'), ((22570, 22599), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (22580, 22599), False, 'from scipy import special, linalg\n'), ((22611, 22644), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (22621, 22644), False, 'import numpy\n'), ((22885, 22914), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (22895, 22914), False, 'from scipy import special, linalg\n'), ((22926, 22959), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (22936, 22959), False, 'import numpy\n'), ((23200, 23229), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (23210, 23229), False, 'from scipy import special, linalg\n'), ((23241, 23274), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (23251, 23274), False, 'import numpy\n'), ((24851, 24873), 'numpy.sum', 'numpy.sum', (['(b * B[:, 0])'], {}), '(b * B[:, 0])\n', (24860, 24873), False, 'import numpy\n'), ((24935, 24957), 'numpy.sum', 'numpy.sum', (['(a * B[:, 0])'], {}), '(a * B[:, 0])\n', (24944, 24957), False, 'import numpy\n'), ((26423, 26452), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (26433, 26452), False, 'from scipy import special, linalg\n'), ((26464, 26497), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (26474, 26497), False, 'import numpy\n'), ((26738, 26767), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (26748, 26767), False, 'from scipy import special, linalg\n'), ((26779, 26812), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (26789, 26812), False, 'import numpy\n'), ((27053, 27082), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (27063, 27082), False, 'from scipy import special, linalg\n'), ((27094, 27127), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (27104, 27127), False, 'import numpy\n'), ((27368, 27397), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (27378, 27397), False, 'from scipy import special, linalg\n'), ((27409, 27442), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (27419, 27442), False, 'import numpy\n'), ((29375, 29397), 'numpy.sum', 'numpy.sum', (['(a * B[:, 0])'], {}), '(a * B[:, 0])\n', (29384, 29397), False, 'import numpy\n'), ((30861, 30890), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (30871, 30890), False, 'from scipy import special, linalg\n'), ((30902, 30935), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (30912, 30935), False, 'import numpy\n'), ((31176, 31205), 'scipy.special.kv', 'special.kv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (31186, 31205), False, 'from scipy import special, linalg\n'), ((31217, 31250), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (31227, 31250), False, 'import numpy\n'), ((31491, 31520), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r1)'], {}), '(index, kappa * r1)\n', (31501, 31520), False, 'from scipy import special, linalg\n'), ((31532, 31565), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r1))'], {}), '(pi / (2 * kappa * r1))\n', (31542, 31565), False, 'import numpy\n'), ((31806, 31835), 'scipy.special.iv', 'special.iv', (['index', '(kappa * r2)'], {}), '(index, kappa * r2)\n', (31816, 31835), False, 'from scipy import special, linalg\n'), ((31847, 31880), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * r2))'], {}), '(pi / (2 * kappa * r2))\n', (31857, 31880), False, 'import numpy\n'), ((33863, 33885), 'numpy.sum', 'numpy.sum', (['(a * B[:, 0])'], {}), '(a * B[:, 0])\n', (33872, 33885), False, 'import numpy\n'), ((37909, 37937), 'scipy.special.kv', 'special.kv', (['index', '(kappa * a)'], {}), '(index, kappa * a)\n', (37919, 37937), False, 'from scipy import special, linalg\n'), ((37949, 37981), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * a))'], {}), '(pi / (2 * kappa * a))\n', (37959, 37981), False, 'import numpy\n'), ((38217, 38245), 'scipy.special.iv', 'special.iv', (['index', '(kappa * a)'], {}), '(index, kappa * a)\n', (38227, 38245), False, 'from scipy import special, linalg\n'), ((38257, 38289), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * a))'], {}), '(pi / (2 * kappa * a))\n', (38267, 38289), False, 'import numpy\n'), ((40556, 40576), 'numpy.sqrt', 'numpy.sqrt', (['diel_out'], {}), '(diel_out)\n', (40566, 40576), False, 'import numpy\n'), ((1213, 1248), 'scipy.special.sph_harm', 'special.sph_harm', (['m', 'n', 'zenit', 'azim'], {}), '(m, n, zenit, azim)\n', (1229, 1248), False, 'from scipy import special, linalg\n'), ((8180, 8208), 'numpy.sum', 'numpy.sum', (['(a_coeff * B[:, 0])'], {}), '(a_coeff * B[:, 0])\n', (8189, 8208), False, 'import numpy\n'), ((12854, 12876), 'numpy.sinh', 'numpy.sinh', (['(kappa * r1)'], {}), '(kappa * r1)\n', (12864, 12876), False, 'import numpy\n'), ((13042, 13064), 'numpy.sinh', 'numpy.sinh', (['(kappa * r2)'], {}), '(kappa * r2)\n', (13052, 13064), False, 'import numpy\n'), ((29169, 29191), 'numpy.sum', 'numpy.sum', (['(b * B[:, 0])'], {}), '(b * B[:, 0])\n', (29178, 29191), False, 'import numpy\n'), ((33660, 33682), 'numpy.sum', 'numpy.sum', (['(b * B[:, 0])'], {}), '(b * B[:, 0])\n', (33669, 33682), False, 'import numpy\n'), ((36683, 36705), 'numpy.tanh', 'numpy.tanh', (['(kappa * r1)'], {}), '(kappa * r1)\n', (36693, 36705), False, 'import numpy\n'), ((1560, 1590), 'numpy.arccos', 'numpy.arccos', (['(xq[k, 2] / rho_k)'], {}), '(xq[k, 2] / rho_k)\n', (1572, 1590), False, 'import numpy\n'), ((1620, 1653), 'numpy.arctan2', 'numpy.arctan2', (['xq[k, 1]', 'xq[k, 0]'], {}), '(xq[k, 1], xq[k, 0])\n', (1633, 1653), False, 'import numpy\n'), ((2316, 2336), 'scipy.misc.factorial', 'factorial', (['(2 * n - s)'], {}), '(2 * n - s)\n', (2325, 2336), False, 'from scipy.misc import factorial\n'), ((2418, 2434), 'scipy.misc.factorial', 'factorial', (['(n - s)'], {}), '(n - s)\n', (2427, 2434), False, 'from scipy.misc import factorial\n'), ((3479, 3491), 'numpy.abs', 'numpy.abs', (['m'], {}), '(m)\n', (3488, 3491), False, 'import numpy\n'), ((3496, 3512), 'numpy.cos', 'numpy.cos', (['zenit'], {}), '(zenit)\n', (3505, 3512), False, 'import numpy\n'), ((3670, 3700), 'numpy.arccos', 'numpy.arccos', (['(xq[k, 2] / rho_k)'], {}), '(xq[k, 2] / rho_k)\n', (3682, 3700), False, 'import numpy\n'), ((3730, 3763), 'numpy.arctan2', 'numpy.arctan2', (['xq[k, 1]', 'xq[k, 0]'], {}), '(xq[k, 1], xq[k, 0])\n', (3743, 3763), False, 'import numpy\n'), ((5010, 5036), 'numpy.exp', 'numpy.exp', (['(1.0j * m * azim)'], {}), '(1.0j * m * azim)\n', (5019, 5036), False, 'import numpy\n'), ((7013, 7032), 'math.gamma', 'gamma', (['(n - nu + 0.5)'], {}), '(n - nu + 0.5)\n', (7018, 7032), False, 'from math import gamma\n'), ((7058, 7077), 'math.gamma', 'gamma', (['(m - nu + 0.5)'], {}), '(m - nu + 0.5)\n', (7063, 7077), False, 'from math import gamma\n'), ((7103, 7118), 'math.gamma', 'gamma', (['(nu + 0.5)'], {}), '(nu + 0.5)\n', (7108, 7118), False, 'from math import gamma\n'), ((7144, 7167), 'math.gamma', 'gamma', (['(m + n - nu + 1.5)'], {}), '(m + n - nu + 1.5)\n', (7149, 7167), False, 'from math import gamma\n'), ((7193, 7214), 'scipy.misc.factorial', 'factorial', (['(n + m - nu)'], {}), '(n + m - nu)\n', (7202, 7214), False, 'from scipy.misc import factorial\n'), ((7240, 7257), 'scipy.misc.factorial', 'factorial', (['(n - nu)'], {}), '(n - nu)\n', (7249, 7257), False, 'from scipy.misc import factorial\n'), ((7283, 7300), 'scipy.misc.factorial', 'factorial', (['(m - nu)'], {}), '(m - nu)\n', (7292, 7300), False, 'from scipy.misc import factorial\n'), ((7326, 7339), 'scipy.misc.factorial', 'factorial', (['nu'], {}), '(nu)\n', (7335, 7339), False, 'from scipy.misc import factorial\n'), ((14615, 14640), 'numpy.exp', 'numpy.exp', (['(-2 * kappa * h)'], {}), '(-2 * kappa * h)\n', (14624, 14640), False, 'import numpy\n'), ((19506, 19525), 'math.gamma', 'gamma', (['(n - nu + 0.5)'], {}), '(n - nu + 0.5)\n', (19511, 19525), False, 'from math import gamma\n'), ((19551, 19570), 'math.gamma', 'gamma', (['(m - nu + 0.5)'], {}), '(m - nu + 0.5)\n', (19556, 19570), False, 'from math import gamma\n'), ((19596, 19611), 'math.gamma', 'gamma', (['(nu + 0.5)'], {}), '(nu + 0.5)\n', (19601, 19611), False, 'from math import gamma\n'), ((19637, 19660), 'math.gamma', 'gamma', (['(m + n - nu + 1.5)'], {}), '(m + n - nu + 1.5)\n', (19642, 19660), False, 'from math import gamma\n'), ((19686, 19707), 'scipy.misc.factorial', 'factorial', (['(n + m - nu)'], {}), '(n + m - nu)\n', (19695, 19707), False, 'from scipy.misc import factorial\n'), ((19733, 19750), 'scipy.misc.factorial', 'factorial', (['(n - nu)'], {}), '(n - nu)\n', (19742, 19750), False, 'from scipy.misc import factorial\n'), ((19776, 19793), 'scipy.misc.factorial', 'factorial', (['(m - nu)'], {}), '(m - nu)\n', (19785, 19793), False, 'from scipy.misc import factorial\n'), ((19819, 19832), 'scipy.misc.factorial', 'factorial', (['nu'], {}), '(nu)\n', (19828, 19832), False, 'from scipy.misc import factorial\n'), ((23471, 23490), 'math.gamma', 'gamma', (['(n - nu + 0.5)'], {}), '(n - nu + 0.5)\n', (23476, 23490), False, 'from math import gamma\n'), ((23516, 23535), 'math.gamma', 'gamma', (['(m - nu + 0.5)'], {}), '(m - nu + 0.5)\n', (23521, 23535), False, 'from math import gamma\n'), ((23561, 23576), 'math.gamma', 'gamma', (['(nu + 0.5)'], {}), '(nu + 0.5)\n', (23566, 23576), False, 'from math import gamma\n'), ((23602, 23625), 'math.gamma', 'gamma', (['(m + n - nu + 1.5)'], {}), '(m + n - nu + 1.5)\n', (23607, 23625), False, 'from math import gamma\n'), ((23651, 23672), 'scipy.misc.factorial', 'factorial', (['(n + m - nu)'], {}), '(n + m - nu)\n', (23660, 23672), False, 'from scipy.misc import factorial\n'), ((23698, 23715), 'scipy.misc.factorial', 'factorial', (['(n - nu)'], {}), '(n - nu)\n', (23707, 23715), False, 'from scipy.misc import factorial\n'), ((23741, 23758), 'scipy.misc.factorial', 'factorial', (['(m - nu)'], {}), '(m - nu)\n', (23750, 23758), False, 'from scipy.misc import factorial\n'), ((23784, 23797), 'scipy.misc.factorial', 'factorial', (['nu'], {}), '(nu)\n', (23793, 23797), False, 'from scipy.misc import factorial\n'), ((27639, 27658), 'math.gamma', 'gamma', (['(n - nu + 0.5)'], {}), '(n - nu + 0.5)\n', (27644, 27658), False, 'from math import gamma\n'), ((27684, 27703), 'math.gamma', 'gamma', (['(m - nu + 0.5)'], {}), '(m - nu + 0.5)\n', (27689, 27703), False, 'from math import gamma\n'), ((27729, 27744), 'math.gamma', 'gamma', (['(nu + 0.5)'], {}), '(nu + 0.5)\n', (27734, 27744), False, 'from math import gamma\n'), ((27770, 27793), 'math.gamma', 'gamma', (['(m + n - nu + 1.5)'], {}), '(m + n - nu + 1.5)\n', (27775, 27793), False, 'from math import gamma\n'), ((27819, 27840), 'scipy.misc.factorial', 'factorial', (['(n + m - nu)'], {}), '(n + m - nu)\n', (27828, 27840), False, 'from scipy.misc import factorial\n'), ((27866, 27883), 'scipy.misc.factorial', 'factorial', (['(n - nu)'], {}), '(n - nu)\n', (27875, 27883), False, 'from scipy.misc import factorial\n'), ((27909, 27926), 'scipy.misc.factorial', 'factorial', (['(m - nu)'], {}), '(m - nu)\n', (27918, 27926), False, 'from scipy.misc import factorial\n'), ((27952, 27965), 'scipy.misc.factorial', 'factorial', (['nu'], {}), '(nu)\n', (27961, 27965), False, 'from scipy.misc import factorial\n'), ((32077, 32096), 'math.gamma', 'gamma', (['(n - nu + 0.5)'], {}), '(n - nu + 0.5)\n', (32082, 32096), False, 'from math import gamma\n'), ((32122, 32141), 'math.gamma', 'gamma', (['(m - nu + 0.5)'], {}), '(m - nu + 0.5)\n', (32127, 32141), False, 'from math import gamma\n'), ((32167, 32182), 'math.gamma', 'gamma', (['(nu + 0.5)'], {}), '(nu + 0.5)\n', (32172, 32182), False, 'from math import gamma\n'), ((32208, 32231), 'math.gamma', 'gamma', (['(m + n - nu + 1.5)'], {}), '(m + n - nu + 1.5)\n', (32213, 32231), False, 'from math import gamma\n'), ((32257, 32278), 'scipy.misc.factorial', 'factorial', (['(n + m - nu)'], {}), '(n + m - nu)\n', (32266, 32278), False, 'from scipy.misc import factorial\n'), ((32304, 32321), 'scipy.misc.factorial', 'factorial', (['(n - nu)'], {}), '(n - nu)\n', (32313, 32321), False, 'from scipy.misc import factorial\n'), ((32347, 32364), 'scipy.misc.factorial', 'factorial', (['(m - nu)'], {}), '(m - nu)\n', (32356, 32364), False, 'from scipy.misc import factorial\n'), ((32390, 32403), 'scipy.misc.factorial', 'factorial', (['nu'], {}), '(nu)\n', (32399, 32403), False, 'from scipy.misc import factorial\n'), ((35656, 35675), 'math.gamma', 'gamma', (['(n - nu + 0.5)'], {}), '(n - nu + 0.5)\n', (35661, 35675), False, 'from math import gamma\n'), ((35701, 35720), 'math.gamma', 'gamma', (['(m - nu + 0.5)'], {}), '(m - nu + 0.5)\n', (35706, 35720), False, 'from math import gamma\n'), ((35746, 35761), 'math.gamma', 'gamma', (['(nu + 0.5)'], {}), '(nu + 0.5)\n', (35751, 35761), False, 'from math import gamma\n'), ((35787, 35810), 'math.gamma', 'gamma', (['(m + n - nu + 1.5)'], {}), '(m + n - nu + 1.5)\n', (35792, 35810), False, 'from math import gamma\n'), ((35836, 35857), 'scipy.misc.factorial', 'factorial', (['(n + m - nu)'], {}), '(n + m - nu)\n', (35845, 35857), False, 'from scipy.misc import factorial\n'), ((35883, 35900), 'scipy.misc.factorial', 'factorial', (['(n - nu)'], {}), '(n - nu)\n', (35892, 35900), False, 'from scipy.misc import factorial\n'), ((35926, 35943), 'scipy.misc.factorial', 'factorial', (['(m - nu)'], {}), '(m - nu)\n', (35935, 35943), False, 'from scipy.misc import factorial\n'), ((35969, 35982), 'scipy.misc.factorial', 'factorial', (['nu'], {}), '(nu)\n', (35978, 35982), False, 'from scipy.misc import factorial\n'), ((36614, 36636), 'numpy.sinh', 'numpy.sinh', (['(kappa * r1)'], {}), '(kappa * r1)\n', (36624, 36636), False, 'import numpy\n'), ((38486, 38505), 'math.gamma', 'gamma', (['(n - nu + 0.5)'], {}), '(n - nu + 0.5)\n', (38491, 38505), False, 'from math import gamma\n'), ((38531, 38550), 'math.gamma', 'gamma', (['(m - nu + 0.5)'], {}), '(m - nu + 0.5)\n', (38536, 38550), False, 'from math import gamma\n'), ((38576, 38591), 'math.gamma', 'gamma', (['(nu + 0.5)'], {}), '(nu + 0.5)\n', (38581, 38591), False, 'from math import gamma\n'), ((38617, 38640), 'math.gamma', 'gamma', (['(m + n - nu + 1.5)'], {}), '(m + n - nu + 1.5)\n', (38622, 38640), False, 'from math import gamma\n'), ((38666, 38687), 'scipy.misc.factorial', 'factorial', (['(n + m - nu)'], {}), '(n + m - nu)\n', (38675, 38687), False, 'from scipy.misc import factorial\n'), ((38713, 38730), 'scipy.misc.factorial', 'factorial', (['(n - nu)'], {}), '(n - nu)\n', (38722, 38730), False, 'from scipy.misc import factorial\n'), ((38756, 38773), 'scipy.misc.factorial', 'factorial', (['(m - nu)'], {}), '(m - nu)\n', (38765, 38773), False, 'from scipy.misc import factorial\n'), ((38799, 38812), 'scipy.misc.factorial', 'factorial', (['nu'], {}), '(nu)\n', (38808, 38812), False, 'from scipy.misc import factorial\n'), ((1509, 1530), 'numpy.sum', 'numpy.sum', (['(xq[k] ** 2)'], {}), '(xq[k] ** 2)\n', (1518, 1530), False, 'import numpy\n'), ((1692, 1731), 'scipy.special.sph_harm', 'special.sph_harm', (['m', 'n', 'zenit_k', 'azim_k'], {}), '(m, n, zenit_k, azim_k)\n', (1708, 1731), False, 'from scipy import special, linalg\n'), ((2340, 2352), 'scipy.misc.factorial', 'factorial', (['s'], {}), '(s)\n', (2349, 2352), False, 'from scipy.misc import factorial\n'), ((3619, 3640), 'numpy.sum', 'numpy.sum', (['(xq[k] ** 2)'], {}), '(xq[k] ** 2)\n', (3628, 3640), False, 'import numpy\n'), ((3802, 3814), 'numpy.abs', 'numpy.abs', (['m'], {}), '(m)\n', (3811, 3814), False, 'import numpy\n'), ((3819, 3837), 'numpy.cos', 'numpy.cos', (['zenit_k'], {}), '(zenit_k)\n', (3828, 3837), False, 'import numpy\n'), ((3975, 4004), 'numpy.exp', 'numpy.exp', (['(-1.0j * m * azim_k)'], {}), '(-1.0j * m * azim_k)\n', (3984, 4004), False, 'import numpy\n'), ((6381, 6399), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (6391, 6399), False, 'import numpy\n'), ((6689, 6707), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (6699, 6707), False, 'import numpy\n'), ((7486, 7529), 'scipy.special.kv', 'special.kv', (['(n + m - 2 * nu + 0.5)', '(kappa * R)'], {}), '(n + m - 2 * nu + 0.5, kappa * R)\n', (7496, 7529), False, 'from scipy import special, linalg\n'), ((7568, 7600), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * R))'], {}), '(pi / (2 * kappa * R))\n', (7578, 7600), False, 'import numpy\n'), ((15594, 15612), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (15604, 15612), False, 'import numpy\n'), ((16785, 16803), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (16795, 16803), False, 'import numpy\n'), ((18234, 18252), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (18244, 18252), False, 'import numpy\n'), ((18549, 18567), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (18559, 18567), False, 'import numpy\n'), ((18864, 18882), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (18874, 18882), False, 'import numpy\n'), ((19179, 19197), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (19189, 19197), False, 'import numpy\n'), ((19979, 20022), 'scipy.special.kv', 'special.kv', (['(n + m - 2 * nu + 0.5)', '(kappa * R)'], {}), '(n + m - 2 * nu + 0.5, kappa * R)\n', (19989, 20022), False, 'from scipy import special, linalg\n'), ((20061, 20093), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * R))'], {}), '(pi / (2 * kappa * R))\n', (20071, 20093), False, 'import numpy\n'), ((22199, 22217), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (22209, 22217), False, 'import numpy\n'), ((22514, 22532), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (22524, 22532), False, 'import numpy\n'), ((22829, 22847), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (22839, 22847), False, 'import numpy\n'), ((23144, 23162), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (23154, 23162), False, 'import numpy\n'), ((23944, 23987), 'scipy.special.kv', 'special.kv', (['(n + m - 2 * nu + 0.5)', '(kappa * R)'], {}), '(n + m - 2 * nu + 0.5, kappa * R)\n', (23954, 23987), False, 'from scipy import special, linalg\n'), ((24026, 24058), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * R))'], {}), '(pi / (2 * kappa * R))\n', (24036, 24058), False, 'import numpy\n'), ((26367, 26385), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (26377, 26385), False, 'import numpy\n'), ((26682, 26700), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (26692, 26700), False, 'import numpy\n'), ((26997, 27015), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (27007, 27015), False, 'import numpy\n'), ((27312, 27330), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (27322, 27330), False, 'import numpy\n'), ((28112, 28155), 'scipy.special.kv', 'special.kv', (['(n + m - 2 * nu + 0.5)', '(kappa * R)'], {}), '(n + m - 2 * nu + 0.5, kappa * R)\n', (28122, 28155), False, 'from scipy import special, linalg\n'), ((28194, 28226), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * R))'], {}), '(pi / (2 * kappa * R))\n', (28204, 28226), False, 'import numpy\n'), ((28895, 28910), 'numpy.arange', 'numpy.arange', (['N'], {}), '(N)\n', (28907, 28910), False, 'import numpy\n'), ((30805, 30823), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (30815, 30823), False, 'import numpy\n'), ((31120, 31138), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (31130, 31138), False, 'import numpy\n'), ((31435, 31453), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (31445, 31453), False, 'import numpy\n'), ((31750, 31768), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (31760, 31768), False, 'import numpy\n'), ((32550, 32593), 'scipy.special.kv', 'special.kv', (['(n + m - 2 * nu + 0.5)', '(kappa * R)'], {}), '(n + m - 2 * nu + 0.5, kappa * R)\n', (32560, 32593), False, 'from scipy import special, linalg\n'), ((32632, 32664), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * R))'], {}), '(pi / (2 * kappa * R))\n', (32642, 32664), False, 'import numpy\n'), ((33355, 33370), 'numpy.arange', 'numpy.arange', (['N'], {}), '(N)\n', (33367, 33370), False, 'import numpy\n'), ((36129, 36172), 'scipy.special.kv', 'special.kv', (['(n + m - 2 * nu + 0.5)', '(kappa * R)'], {}), '(n + m - 2 * nu + 0.5, kappa * R)\n', (36139, 36172), False, 'from scipy import special, linalg\n'), ((36211, 36243), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * R))'], {}), '(pi / (2 * kappa * R))\n', (36221, 36243), False, 'import numpy\n'), ((37854, 37872), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (37864, 37872), False, 'import numpy\n'), ((38162, 38180), 'numpy.sqrt', 'numpy.sqrt', (['(pi / 2)'], {}), '(pi / 2)\n', (38172, 38180), False, 'import numpy\n'), ((38959, 39002), 'scipy.special.kv', 'special.kv', (['(n + m - 2 * nu + 0.5)', '(kappa * R)'], {}), '(n + m - 2 * nu + 0.5, kappa * R)\n', (38969, 39002), False, 'from scipy import special, linalg\n'), ((39041, 39073), 'numpy.sqrt', 'numpy.sqrt', (['(pi / (2 * kappa * R))'], {}), '(pi / (2 * kappa * R))\n', (39051, 39073), False, 'import numpy\n'), ((12773, 12794), 'numpy.exp', 'numpy.exp', (['(kappa * r1)'], {}), '(kappa * r1)\n', (12782, 12794), False, 'import numpy\n'), ((12961, 12982), 'numpy.exp', 'numpy.exp', (['(kappa * r2)'], {}), '(kappa * r2)\n', (12970, 12982), False, 'import numpy\n'), ((14539, 14560), 'numpy.exp', 'numpy.exp', (['(-kappa * h)'], {}), '(-kappa * h)\n', (14548, 14560), False, 'import numpy\n'), ((14581, 14602), 'numpy.exp', 'numpy.exp', (['(-kappa * h)'], {}), '(-kappa * h)\n', (14590, 14602), False, 'import numpy\n'), ((39808, 39829), 'numpy.tanh', 'numpy.tanh', (['(kappa * a)'], {}), '(kappa * a)\n', (39818, 39829), False, 'import numpy\n'), ((39726, 39747), 'numpy.sinh', 'numpy.sinh', (['(kappa * a)'], {}), '(kappa * a)\n', (39736, 39747), False, 'import numpy\n'), ((39702, 39723), 'numpy.cosh', 'numpy.cosh', (['(kappa * a)'], {}), '(kappa * a)\n', (39712, 39723), False, 'import numpy\n'), ((3954, 3966), 'numpy.abs', 'numpy.abs', (['m'], {}), '(m)\n', (3963, 3966), False, 'import numpy\n'), ((3899, 3911), 'numpy.abs', 'numpy.abs', (['m'], {}), '(m)\n', (3908, 3911), False, 'import numpy\n')] |
"""
Filename: ifp.py
Authors: <NAME>, <NAME>
Tools for solving the standard optimal savings / income fluctuation
problem for an infinitely lived consumer facing an exogenous income
process that evolves according to a Markov chain.
References
----------
http://quant-econ.net/ifp.html
"""
import numpy as np
from scipy.optimize import fminbound, brentq
from scipy import interp
class ConsumerProblem:
"""
A class for solving the income fluctuation problem. Iteration with
either the Coleman or Bellman operators from appropriate initial
conditions leads to convergence to the optimal consumption policy.
The income process is a finite state Markov chain. Note that the
Coleman operator is the preferred method, as it is almost always
faster and more accurate. The Bellman operator is only provided for
comparison.
Parameters
----------
r : scalar(float), optional(default=0.01)
A strictly positive scalar giving the interest rate
beta : scalar(float), optional(default=0.96)
The discount factor, must satisfy (1 + r) * beta < 1
Pi : array_like(float), optional(default=((0.60, 0.40),(0.05, 0.95))
A 2D NumPy array giving the Markov matrix for {z_t}
z_vals : array_like(float), optional(default=(0.5, 0.95))
The state space of {z_t}
b : scalar(float), optional(default=0)
The borrowing constraint
grid_max : scalar(float), optional(default=16)
Max of the grid used to solve the problem
grid_size : scalar(int), optional(default=50)
Number of grid points to solve problem, a grid on [-b, grid_max]
u : callable, optional(default=np.log)
The utility function
du : callable, optional(default=lambda x: 1/x)
The derivative of u
Attributes
----------
r : scalar(float)
A strictly positive scalar giving the interest rate
beta : scalar(float)
The discount factor, must satisfy (1 + r) * beta < 1
Pi : array_like(float)
A 2D NumPy array giving the Markov matrix for {z_t}
z_vals : array_like(float)
The state space of {z_t}
b : scalar(float)
The borrowing constraint
u : callable
The utility function
du : callable
The derivative of u
"""
def __init__(self, r=0.01, beta=0.96, Pi=((0.6, 0.4), (0.05, 0.95)),
z_vals=(0.5, 1.0), b=0, grid_max=16, grid_size=50,
u=np.log, du=lambda x: 1/x):
self.u, self.du = u, du
self.r, self.R = r, 1 + r
self.beta, self.b = beta, b
self.Pi, self.z_vals = np.array(Pi), tuple(z_vals)
self.asset_grid = np.linspace(-b, grid_max, grid_size)
def bellman_operator(self, V, return_policy=False):
"""
The approximate Bellman operator, which computes and returns the
updated value function TV (or the V-greedy policy c if
return_policy is True).
Parameters
----------
V : array_like(float)
A NumPy array of dim len(cp.asset_grid) x len(cp.z_vals)
return_policy : bool, optional(default=False)
Indicates whether to return the greed policy given V or the
updated value function TV. Default is TV.
Returns
-------
array_like(float)
Returns either the greed policy given V or the updated value
function TV.
"""
# === Simplify names, set up arrays === #
R, Pi, beta, u, b = self.R, self.Pi, self.beta, self.u, self.b
asset_grid, z_vals = self.asset_grid, self.z_vals
new_V = np.empty(V.shape)
new_c = np.empty(V.shape)
z_idx = list(range(len(z_vals)))
# === Linear interpolation of V along the asset grid === #
vf = lambda a, i_z: interp(a, asset_grid, V[:, i_z])
# === Solve r.h.s. of Bellman equation === #
for i_a, a in enumerate(asset_grid):
for i_z, z in enumerate(z_vals):
def obj(c): # objective function to be *minimized*
y = sum(vf(R * a + z - c, j) * Pi[i_z, j] for j in z_idx)
return - u(c) - beta * y
c_star = fminbound(obj, np.min(z_vals), R * a + z + b)
new_c[i_a, i_z], new_V[i_a, i_z] = c_star, -obj(c_star)
if return_policy:
return new_c
else:
return new_V
def coleman_operator(self, c):
"""
The approximate Coleman operator.
Iteration with this operator corresponds to policy function
iteration. Computes and returns the updated consumption policy
c. The array c is replaced with a function cf that implements
univariate linear interpolation over the asset grid for each
possible value of z.
Parameters
----------
c : array_like(float)
A NumPy array of dim len(cp.asset_grid) x len(cp.z_vals)
Returns
-------
array_like(float)
The updated policy, where updating is by the Coleman
operator. function TV.
"""
# === simplify names, set up arrays === #
R, Pi, beta, du, b = self.R, self.Pi, self.beta, self.du, self.b
asset_grid, z_vals = self.asset_grid, self.z_vals
z_size = len(z_vals)
gamma = R * beta
vals = np.empty(z_size)
# === linear interpolation to get consumption function === #
def cf(a):
"""
The call cf(a) returns an array containing the values c(a,
z) for each z in z_vals. For each such z, the value c(a, z)
is constructed by univariate linear approximation over asset
space, based on the values in the array c
"""
for i in range(z_size):
vals[i] = interp(a, asset_grid, c[:, i])
return vals
# === solve for root to get Kc === #
Kc = np.empty(c.shape)
for i_a, a in enumerate(asset_grid):
for i_z, z in enumerate(z_vals):
def h(t):
expectation = np.dot(du(cf(R * a + z - t)), Pi[i_z, :])
return du(t) - max(gamma * expectation, du(R * a + z + b))
Kc[i_a, i_z] = brentq(h, np.min(z_vals), R * a + z + b)
return Kc
def initialize(self):
"""
Creates a suitable initial conditions V and c for value function
and policy function iteration respectively.
Returns
-------
V : array_like(float)
Initial condition for value function iteration
c : array_like(float)
Initial condition for Coleman operator iteration
"""
# === Simplify names, set up arrays === #
R, beta, u, b = self.R, self.beta, self.u, self.b
asset_grid, z_vals = self.asset_grid, self.z_vals
shape = len(asset_grid), len(z_vals)
V, c = np.empty(shape), np.empty(shape)
# === Populate V and c === #
for i_a, a in enumerate(asset_grid):
for i_z, z in enumerate(z_vals):
c_max = R * a + z + b
c[i_a, i_z] = c_max
V[i_a, i_z] = u(c_max) / (1 - beta)
return V, c
| [
"scipy.interp",
"numpy.array",
"numpy.linspace",
"numpy.empty",
"numpy.min"
] | [((2661, 2697), 'numpy.linspace', 'np.linspace', (['(-b)', 'grid_max', 'grid_size'], {}), '(-b, grid_max, grid_size)\n', (2672, 2697), True, 'import numpy as np\n'), ((3619, 3636), 'numpy.empty', 'np.empty', (['V.shape'], {}), '(V.shape)\n', (3627, 3636), True, 'import numpy as np\n'), ((3653, 3670), 'numpy.empty', 'np.empty', (['V.shape'], {}), '(V.shape)\n', (3661, 3670), True, 'import numpy as np\n'), ((5369, 5385), 'numpy.empty', 'np.empty', (['z_size'], {}), '(z_size)\n', (5377, 5385), True, 'import numpy as np\n'), ((5954, 5971), 'numpy.empty', 'np.empty', (['c.shape'], {}), '(c.shape)\n', (5962, 5971), True, 'import numpy as np\n'), ((2607, 2619), 'numpy.array', 'np.array', (['Pi'], {}), '(Pi)\n', (2615, 2619), True, 'import numpy as np\n'), ((3808, 3840), 'scipy.interp', 'interp', (['a', 'asset_grid', 'V[:, i_z]'], {}), '(a, asset_grid, V[:, i_z])\n', (3814, 3840), False, 'from scipy import interp\n'), ((6950, 6965), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (6958, 6965), True, 'import numpy as np\n'), ((6967, 6982), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (6975, 6982), True, 'import numpy as np\n'), ((5840, 5870), 'scipy.interp', 'interp', (['a', 'asset_grid', 'c[:, i]'], {}), '(a, asset_grid, c[:, i])\n', (5846, 5870), False, 'from scipy import interp\n'), ((4216, 4230), 'numpy.min', 'np.min', (['z_vals'], {}), '(z_vals)\n', (4222, 4230), True, 'import numpy as np\n'), ((6284, 6298), 'numpy.min', 'np.min', (['z_vals'], {}), '(z_vals)\n', (6290, 6298), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from tqdm import tqdm
from collections import deque
from plasticity.utils import _check_activation
from plasticity.utils.activations import Linear
from plasticity.model.optimizer import Optimizer
from plasticity.model.weights import BaseWeights
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.utils import check_array
from sklearn.utils import check_X_y
from sklearn.utils.validation import check_is_fitted
__author__ = ['<NAME>', '<NAME>', 'SimoneGasperini']
__email__ = ['<EMAIL>', '<EMAIL>', '<EMAIL>']
class BasePlasticity (BaseEstimator, TransformerMixin):
'''
Abstract base class for plasticity models
Parameters
----------
outputs : int (default=100)
Number of hidden units
num_epochs : int (default=100)
Maximum number of epochs for model convergency
batch_size : int (default=100)
Size of the minibatch
weights_init : BaseWeights object
Weights initialization strategy.
activation : str (default="Linear")
Name of the activation function
optimizer : Optimizer (default=SGD)
Optimizer object (derived by the base class Optimizer)
precision : float (default=1e-30)
Parameter that controls numerical precision of the weight updates
epochs_for_convergency : int (default=None)
Number of stable epochs requested for the convergency.
If None the training proceeds up to the maximum number of epochs (num_epochs).
convergency_atol : float (default=0.01)
Absolute tolerance requested for the convergency
decay : float (default=0.)
Weight decay scale factor.
random_state : int (default=None)
Random seed for batch subdivisions
verbose : bool (default=True)
Turn on/off the verbosity
'''
def __init__(self, outputs: int = 100, num_epochs: int = 100,
activation: str = 'Linear', optimizer: 'Optimizer' = Optimizer(),
batch_size: int = 100, weights_init: 'BaseWeights' = BaseWeights(),
precision: float = 1e-30,
epochs_for_convergency: int = None,
convergency_atol: float = 0.01,
decay: float = 0.,
moving_average: bool = False,
memory_factor: float = 0.9,
random_state: int = None,
checkpoints: int = 0,
checkpoint_path: str = './',
verbose: bool = True):
_, activation = _check_activation(self, activation)
self.outputs = outputs
self.num_epochs = num_epochs
self.batch_size = batch_size
self.activation = activation
self.optimizer = optimizer
self.weights_init = weights_init
self.precision = precision
self.epochs_for_convergency = epochs_for_convergency if epochs_for_convergency is not None else num_epochs
self.epochs_for_convergency = max(self.epochs_for_convergency, 1)
self.decay = decay
self.convergency_atol = convergency_atol
self.random_state = random_state
self.verbose = verbose
self.checkpoints = checkpoints
self.checkpoint_path = checkpoint_path
self.theta = None
self.moving_average = moving_average
self.memory_factor = memory_factor
def _weights_update(self, X: np.ndarray, output: np.ndarray) -> tuple:
'''
Compute the weights update using the given learning rule.
Parameters
----------
X : array-like (2D)
Input array of data
output : array-like (2D)
Output of the model estimated by the predict function
Returns
-------
weight_update : array-like (2D)
Weight updates matrix to apply
theta : array-like
Array of learning progress
'''
raise NotImplementedError
def _lebesgue_norm(self, w: np.ndarray) -> np.ndarray:
'''
Apply the Lebesgue norm to the weights.
Parameters
----------
w : array-like (2D)
Array to normalize using Lebesgue norm
Returns
-------
wnorm : array-like (2D)
Normalized version of the input array
'''
if self.p != 2:
sign = np.sign(w)
return sign * np.absolute(w)**(self.p - 1)
else:
return w
def _fit_step(self, X: np.ndarray) -> np.ndarray:
'''
Core function of fit step (forward + backward + updates).
We divide the step into a function to allow an easier visualization
of the weight matrix (if necessary).
Parameters
----------
X : array-like (2D)
Input array of data
Returns
-------
theta : array-like
Array of learning progress
'''
# predict the encoded values
output = self._predict(X)
# update weights
w_update, theta = self._weights_update(X, output)
# apply the weight decay
if self.decay != 0.:
w_update -= self.decay * self.weights
#self.weights[:] += epsilon * w_update
self.weights, = self.optimizer.update(params=[self.weights], gradients=[-w_update]) # -update for compatibility with optimizers
return theta
@property
def _check_convergency(self) -> bool:
'''
Check if the current training has reached the convergency.
Returns
-------
check : bool
Check if the learning history of the model is stable.
Notes
-----
.. note::
The convergency is estimated by the stability or not of the
learning parameter in a fixed (epochs_for_convergency) number
of epochs for all the outputs.
'''
if len(self.history) < self.epochs_for_convergency:
return False
last = np.full_like(self.history, fill_value=self.history[-1])
return np.allclose(self.history, last, atol=self.convergency_atol) # , rtol=self.convergency_atol)
def _join_input_label(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
'''
Join the input data matrix to the labels.
In this way the labels array/matrix is considered as a new
set of inputs for the model and the plasticity model can
perform classification tasks without any extra supervised learning.
Parameters
----------
X : array-like (2D)
Input array of data
y : array-like (1D or 2D)
Labels array/matrix
Returns
-------
join : array-like (2D)
Matrix of the merged data in which the first n_sample columns
are occupied by the original data and the remaining ones store
the labels.
Notes
-----
.. note::
The labels can be a 1D array or multi-dimensional array: the given
shape is internally reshaped according to the required dimensions.
'''
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# reshape the labels if it is a single array
y = y.reshape(-1, 1) if len(y.shape) == 1 else y
# concatenate the labels as new inputs for neurons
X = np.concatenate((X, y), axis=1)
return X
def _fit(self, X: np.ndarray) -> 'BasePlasticity':
'''
Core function for the fit member
Parameters
----------
X : array-like (2D)
Input array of data
Returns
-------
self
'''
num_samples, _ = X.shape
indices = np.arange(0, num_samples).astype('int64')
num_batches = num_samples // self.batch_size
for epoch in tqdm(range(self.num_epochs), disable=(not self.verbose)):
# random shuffle the input
np.random.shuffle(indices)
batches = np.lib.stride_tricks.as_strided(indices, shape=(num_batches, self.batch_size), strides=(self.batch_size * 8, 8))
# init null values of theta for iterative summation
theta = np.zeros(shape=(self.outputs,), dtype=float)
for batch in batches:
batch_data = X[batch, ...]
theta += self._fit_step(X=batch_data)
# Note: the theta must be normalized according to number of batches!
self.history.append(theta * (1. / num_batches))
# check if the model has reached the convergency (early stopping criteria)
if self._check_convergency:
if self.verbose:
print('Early stopping: the training has reached the convergency criteria')
break
if self.checkpoints:
if (epoch % self.checkpoints) == 0:
self.save_weights(self.checkpoint_path + "weights" + str(epoch) + ".bin")
elif(epoch == (self.num_epochs - 1)):
self.save_weights(self.checkpoint_path + "weights" + str(self.num_epochs) + ".bin")
# WEIGHTS SYMMETRIC ORTHOGONALIZATION (once at the end of each epoch)
# the two methods are actually exactly equivalent
# (provable by simple linear algebra in real domain)
# but the first using the numpy svd function is faster
# (1)
#U, _, Vt = np.linalg.svd(self.weights, full_matrices=False)
#self.weights = np.einsum('ij, jk -> ik', U, Vt, optimize=True)
# (2)
#from scipy.linalg import sqrtm
# self.weights = np.real(self.weights @ np.linalg.inv(sqrtm(self.weights.T @ self.weights)))
return self
def fit(self, X: np.ndarray, y: np.ndarray = None) -> 'BasePlasticity':
'''
Fit the Plasticity model weights.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples
y : array-like, default=None
The array of labels
Returns
-------
self : object
Return self
Notes
-----
.. note::
The model tries to memorize the given input producing a valid encoding.
.. warning::
If the array of labels is provided, it will be considered as a set of new inputs
for the neurons. The labels can be 1D array or multi-dimensional array: the given
shape is internally reshaped according to the required dimensions.
'''
if y is not None:
X = self._join_input_label(X=X, y=y)
X = check_array(X)
np.random.seed(self.random_state)
num_samples, num_features = X.shape
if self.batch_size > num_samples:
raise ValueError('Incorrect batch_size found. The batch_size must be less or equal to the number of samples. '
'Given {:d} for {:d} samples'.format(self.batch_size, num_samples))
#self.weights = np.random.normal(loc=self.mu, scale=self.sigma, size=(self.outputs, num_features))
self.weights = self.weights_init.get(size=(self.outputs, num_features))
self.history = deque(maxlen=self.epochs_for_convergency)
self._fit(X)
return self
def _predict(self, X: np.ndarray) -> np.ndarray:
'''
Core function for the predict member
'''
raise NotImplementedError
def predict(self, X: np.ndarray, y: np.ndarray = None) -> np.ndarray:
'''
Reduce X applying the Plasticity encoding.
Parameters
----------
X : array of shape (n_samples, n_features)
The input samples
y : array-like, default=None
The array of labels
Returns
-------
Xnew : array of shape (n_values, n_samples)
The encoded features
Notes
-----
.. warning::
If the array of labels is provided, it will be considered as a set of new inputs
for the neurons. The labels can be 1D array or multi-dimensional array: the given
shape is internally reshaped according to the required dimensions.
'''
check_is_fitted(self, 'weights')
if y is not None:
X = self._join_input_label(X=X, y=y)
# return (self.weights @ X).transpose()
old_activation = self.activation
self.activation = Linear()
result = self._predict(X).transpose() # np.einsum('ij, kj -> ik', self.weights, X, optimize=True).transpose() # without activation
self.activation = old_activation
return result
X = check_array(X)
return self._predict(X)
def transform(self, X: np.ndarray) -> np.ndarray:
'''
Apply the data reduction according to the features in the best signature found.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples
Returns
-------
Xnew : array-like of shape (n_samples, encoded_features)
The data encoded according to the model weights.
'''
check_is_fitted(self, 'weights')
Xnew = self._predict(X)
return Xnew.transpose()
def fit_transform(self, X: np.ndarray, y: np.ndarray = None) -> np.ndarray:
'''
Fit the model model meta-transformer and apply the data encoding transformation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples
y : array-like, shape (n_samples,)
The target values
Returns
-------
Xnew : array-like of shape (n_samples, encoded_features)
The data encoded according to the model weights.
Notes
-----
.. warning::
If the array of labels is provided, it will be considered as a set of new inputs
for the neurons. The labels can be 1D array or multi-dimensional array: the given
shape is internally reshaped according to the required dimensions.
'''
self.fit(X, y=y)
Xnew = self.transform(X)
return Xnew
def save_weights(self, filename: str) -> bool:
'''
Save the current weights to a binary file.
Parameters
----------
filename : str
Filename or path
Returns
-------
True if everything is ok
'''
check_is_fitted(self, 'weights')
with open(filename, 'wb') as fp:
self.weights.tofile(fp, sep='')
return True
def load_weights(self, filename: str) -> bool:
'''
Load the weight matrix from a binary file.
Parameters
----------
filename : str
Filename or path
Returns
-------
self : object
Return self
'''
with open(filename, 'rb') as fp:
self.weights = np.fromfile(fp, dtype=np.float, count=-1)
# reshape the loaded weights since the numpy function loads
# only in ravel format!!
self.weights = self.weights.reshape(self.outputs, -1)
return self
def __repr__(self) -> str:
'''
Object representation
'''
class_name = self.__class__.__qualname__
params = self.__init__.__code__.co_varnames
params = set(params) - {'self'}
args = ', '.join(['{0}={1}'.format(k, str(getattr(self, k)))
if not isinstance(getattr(self, k), str) else '{0}="{1}"'.format(k, str(getattr(self, k)))
for k in params])
return '{0}({1})'.format(class_name, args)
| [
"numpy.fromfile",
"plasticity.model.weights.BaseWeights",
"numpy.arange",
"collections.deque",
"numpy.full_like",
"plasticity.utils._check_activation",
"numpy.random.seed",
"numpy.concatenate",
"sklearn.utils.validation.check_is_fitted",
"plasticity.utils.activations.Linear",
"numpy.allclose",
... | [((2051, 2062), 'plasticity.model.optimizer.Optimizer', 'Optimizer', ([], {}), '()\n', (2060, 2062), False, 'from plasticity.model.optimizer import Optimizer\n'), ((2134, 2147), 'plasticity.model.weights.BaseWeights', 'BaseWeights', ([], {}), '()\n', (2145, 2147), False, 'from plasticity.model.weights import BaseWeights\n'), ((2615, 2650), 'plasticity.utils._check_activation', '_check_activation', (['self', 'activation'], {}), '(self, activation)\n', (2632, 2650), False, 'from plasticity.utils import _check_activation\n'), ((6091, 6146), 'numpy.full_like', 'np.full_like', (['self.history'], {'fill_value': 'self.history[-1]'}), '(self.history, fill_value=self.history[-1])\n', (6103, 6146), True, 'import numpy as np\n'), ((6163, 6222), 'numpy.allclose', 'np.allclose', (['self.history', 'last'], {'atol': 'self.convergency_atol'}), '(self.history, last, atol=self.convergency_atol)\n', (6174, 6222), True, 'import numpy as np\n'), ((7242, 7292), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y'], {'multi_output': '(True)', 'y_numeric': '(True)'}), '(X, y, multi_output=True, y_numeric=True)\n', (7251, 7292), False, 'from sklearn.utils import check_X_y\n'), ((7474, 7504), 'numpy.concatenate', 'np.concatenate', (['(X, y)'], {'axis': '(1)'}), '((X, y), axis=1)\n', (7488, 7504), True, 'import numpy as np\n'), ((10822, 10836), 'sklearn.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (10833, 10836), False, 'from sklearn.utils import check_array\n'), ((10845, 10878), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (10859, 10878), True, 'import numpy as np\n'), ((11397, 11438), 'collections.deque', 'deque', ([], {'maxlen': 'self.epochs_for_convergency'}), '(maxlen=self.epochs_for_convergency)\n', (11402, 11438), False, 'from collections import deque\n'), ((12420, 12452), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""weights"""'], {}), "(self, 'weights')\n", (12435, 12452), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((12894, 12908), 'sklearn.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (12905, 12908), False, 'from sklearn.utils import check_array\n'), ((13404, 13436), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""weights"""'], {}), "(self, 'weights')\n", (13419, 13436), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((14754, 14786), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""weights"""'], {}), "(self, 'weights')\n", (14769, 14786), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((4444, 4454), 'numpy.sign', 'np.sign', (['w'], {}), '(w)\n', (4451, 4454), True, 'import numpy as np\n'), ((8072, 8098), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (8089, 8098), True, 'import numpy as np\n'), ((8122, 8239), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['indices'], {'shape': '(num_batches, self.batch_size)', 'strides': '(self.batch_size * 8, 8)'}), '(indices, shape=(num_batches, self.\n batch_size), strides=(self.batch_size * 8, 8))\n', (8153, 8239), True, 'import numpy as np\n'), ((8319, 8363), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.outputs,)', 'dtype': 'float'}), '(shape=(self.outputs,), dtype=float)\n', (8327, 8363), True, 'import numpy as np\n'), ((12657, 12665), 'plasticity.utils.activations.Linear', 'Linear', ([], {}), '()\n', (12663, 12665), False, 'from plasticity.utils.activations import Linear\n'), ((15261, 15302), 'numpy.fromfile', 'np.fromfile', (['fp'], {'dtype': 'np.float', 'count': '(-1)'}), '(fp, dtype=np.float, count=-1)\n', (15272, 15302), True, 'import numpy as np\n'), ((7845, 7870), 'numpy.arange', 'np.arange', (['(0)', 'num_samples'], {}), '(0, num_samples)\n', (7854, 7870), True, 'import numpy as np\n'), ((4481, 4495), 'numpy.absolute', 'np.absolute', (['w'], {}), '(w)\n', (4492, 4495), True, 'import numpy as np\n')] |
import numpy as np
from py_diff_stokes_flow.env.env_base import EnvBase
from py_diff_stokes_flow.common.common import ndarray
from py_diff_stokes_flow.core.py_diff_stokes_flow_core import ShapeComposition2d, StdIntArray2d
class FluidicTwisterEnv3d(EnvBase):
def __init__(self, seed, folder):
np.random.seed(seed)
cell_nums = (32, 32, 16)
E = 100
nu = 0.499
vol_tol = 1e-2
edge_sample_num = 2
EnvBase.__init__(self, cell_nums, E, nu, vol_tol, edge_sample_num, folder)
# Initialize the parametric shapes.
self._parametric_shape_info = [ ('polar_bezier-6', 51)]
# Initialize the node conditions.
self._node_boundary_info = []
inlet_radius = 0.3
outlet_radius = 0.3
inlet_velocity = 1.0
outlet_velocity = 2.0
cx, cy, _ = self.cell_nums()
assert cx == cy
nx, ny, nz = self.node_nums()
def get_bezier(radius):
bezier = ShapeComposition2d()
params = np.concatenate([
np.full(8, radius) * cx,
ndarray([0.5 * cx, 0.5 * cy, 0])
])
bezier.AddParametricShape('polar_bezier', params.size)
cxy = StdIntArray2d((int(cx), int(cy)))
bezier.Initialize(cxy, params, True)
return bezier
inlet_bezier = get_bezier(inlet_radius)
outlet_bezier = get_bezier(outlet_radius)
for i in range(nx):
for j in range(ny):
if inlet_bezier.signed_distance((i, j)) > 0:
self._node_boundary_info.append(((i, j, 0, 0), 0))
self._node_boundary_info.append(((i, j, 0, 1), 0))
self._node_boundary_info.append(((i, j, 0, 2), inlet_velocity))
# Initialize the interface.
self._interface_boundary_type = 'free-slip'
# Compute the target velocity field (for rendering purposes only)
desired_omega = 2 * outlet_velocity / (cx * outlet_radius)
target_velocity_field = np.zeros((nx, ny, 3))
for i in range(nx):
for j in range(ny):
if outlet_bezier.signed_distance((i, j)) > 0:
x, y = i / cx, j / cy
# u = (-(j - ny / 2), (i - nx / 2), 0) * c.
# ux_pos = (-j, i + 1, 0) * c.
# uy_pos = (-j - 1, i, 0) * c.
# curl = (i + 1) * c + (j + 1) * c - i * c - j * c.
# = (i + j + 2 - i - j) * c = 2 * c.
# c = outlet_vel / (num_cells[0] * outlet_radius).
c = desired_omega / 2
target_velocity_field[i, j] = ndarray([
-(y - 0.5) * c,
(x - 0.5) * c,
0
])
# Other data members.
self._inlet_radius = inlet_radius
self._outlet_radius = outlet_radius
self._inlet_velocity = inlet_velocity
self._target_velocity_field = target_velocity_field
self._inlet_bezier = inlet_bezier
self._outlet_bezier = outlet_bezier
self._desired_omega = desired_omega
def _variables_to_shape_params(self, x):
x = ndarray(x).copy().ravel()
assert x.size == 32
cx, cy, _ = self._cell_nums
assert cx == cy
params = np.concatenate([
np.full(8, self._inlet_radius),
x,
np.full(8, self._outlet_radius),
ndarray([0.5, 0.5, 0]),
])
params[:-1] *= cx
# Jacobian.
J = np.zeros((params.size, x.size))
for i in range(x.size):
J[8 + i, i] = cx
return ndarray(params).copy(), ndarray(J).copy()
def _loss_and_grad_on_velocity_field(self, u):
u_field = self.reshape_velocity_field(u)
grad = np.zeros(u_field.shape)
nx, ny, nz = self.node_nums()
assert nx == ny
loss = 0
cnt = 0
for i in range(nx):
for j in range(ny):
if self._outlet_bezier.signed_distance((i, j)) > 0:
cnt += 1
uxy = u_field[i, j, nz - 1, :2]
ux_pos = u_field[i + 1, j, nz - 1, :2]
uy_pos = u_field[i, j + 1, nz - 1, :2]
# Compute the curl.
curl = ux_pos[1] - uy_pos[0] - uxy[1] + uxy[0]
loss += (curl - self._desired_omega) ** 2
# ux_pos[1]
grad[i + 1, j, nz - 1, 1] += 2 * (curl - self._desired_omega)
grad[i, j + 1, nz - 1, 0] += -2 * (curl - self._desired_omega)
grad[i, j, nz - 1, 1] += -2 * (curl - self._desired_omega)
grad[i, j, nz - 1, 0] += 2 * (curl - self._desired_omega)
loss /= cnt
grad /= cnt
return loss, ndarray(grad).ravel()
def _color_velocity(self, u):
return float(np.linalg.norm(u) / 2)
def sample(self):
return np.random.uniform(low=self.lower_bound(), high=self.upper_bound())
def lower_bound(self):
return np.full(32, 0.1)
def upper_bound(self):
return np.full(32, 0.4) | [
"py_diff_stokes_flow.common.common.ndarray",
"numpy.zeros",
"py_diff_stokes_flow.env.env_base.EnvBase.__init__",
"numpy.random.seed",
"py_diff_stokes_flow.core.py_diff_stokes_flow_core.ShapeComposition2d",
"numpy.linalg.norm",
"numpy.full"
] | [((306, 326), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (320, 326), True, 'import numpy as np\n'), ((455, 529), 'py_diff_stokes_flow.env.env_base.EnvBase.__init__', 'EnvBase.__init__', (['self', 'cell_nums', 'E', 'nu', 'vol_tol', 'edge_sample_num', 'folder'], {}), '(self, cell_nums, E, nu, vol_tol, edge_sample_num, folder)\n', (471, 529), False, 'from py_diff_stokes_flow.env.env_base import EnvBase\n'), ((2054, 2075), 'numpy.zeros', 'np.zeros', (['(nx, ny, 3)'], {}), '((nx, ny, 3))\n', (2062, 2075), True, 'import numpy as np\n'), ((3611, 3642), 'numpy.zeros', 'np.zeros', (['(params.size, x.size)'], {}), '((params.size, x.size))\n', (3619, 3642), True, 'import numpy as np\n'), ((3877, 3900), 'numpy.zeros', 'np.zeros', (['u_field.shape'], {}), '(u_field.shape)\n', (3885, 3900), True, 'import numpy as np\n'), ((5156, 5172), 'numpy.full', 'np.full', (['(32)', '(0.1)'], {}), '(32, 0.1)\n', (5163, 5172), True, 'import numpy as np\n'), ((5216, 5232), 'numpy.full', 'np.full', (['(32)', '(0.4)'], {}), '(32, 0.4)\n', (5223, 5232), True, 'import numpy as np\n'), ((987, 1007), 'py_diff_stokes_flow.core.py_diff_stokes_flow_core.ShapeComposition2d', 'ShapeComposition2d', ([], {}), '()\n', (1005, 1007), False, 'from py_diff_stokes_flow.core.py_diff_stokes_flow_core import ShapeComposition2d, StdIntArray2d\n'), ((3413, 3443), 'numpy.full', 'np.full', (['(8)', 'self._inlet_radius'], {}), '(8, self._inlet_radius)\n', (3420, 3443), True, 'import numpy as np\n'), ((3472, 3503), 'numpy.full', 'np.full', (['(8)', 'self._outlet_radius'], {}), '(8, self._outlet_radius)\n', (3479, 3503), True, 'import numpy as np\n'), ((3517, 3539), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[0.5, 0.5, 0]'], {}), '([0.5, 0.5, 0])\n', (3524, 3539), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((4985, 5002), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (4999, 5002), True, 'import numpy as np\n'), ((1103, 1135), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[0.5 * cx, 0.5 * cy, 0]'], {}), '([0.5 * cx, 0.5 * cy, 0])\n', (1110, 1135), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((2703, 2746), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[-(y - 0.5) * c, (x - 0.5) * c, 0]'], {}), '([-(y - 0.5) * c, (x - 0.5) * c, 0])\n', (2710, 2746), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((3719, 3734), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['params'], {}), '(params)\n', (3726, 3734), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((3743, 3753), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['J'], {}), '(J)\n', (3750, 3753), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((4907, 4920), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['grad'], {}), '(grad)\n', (4914, 4920), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((1062, 1080), 'numpy.full', 'np.full', (['(8)', 'radius'], {}), '(8, radius)\n', (1069, 1080), True, 'import numpy as np\n'), ((3252, 3262), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['x'], {}), '(x)\n', (3259, 3262), False, 'from py_diff_stokes_flow.common.common import ndarray\n')] |
# -*-coding:utf-8 -*-
import numpy as np
from bs4 import BeautifulSoup
import random
def scrapePage(retX, retY, inFile, yr, numPce, origPrc):
"""
函数说明:从页面读取数据,生成retX和retY列表
Parameters:
retX - 数据X
retY - 数据Y
inFile - HTML文件
yr - 年份
numPce - 乐高部件数目
origPrc - 原价
Returns:
无
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
# 打开并读取HTML文件
with open(inFile, encoding='utf-8') as f:
html = f.read()
soup = BeautifulSoup(html)
i = 1
# 根据HTML页面结构进行解析
currentRow = soup.find_all('table', r = "%d" % i)
while(len(currentRow) != 0):
currentRow = soup.find_all('table', r = "%d" % i)
title = currentRow[0].find_all('a')[1].text
lwrTitle = title.lower()
# 查找是否有全新标签
if (lwrTitle.find('new') > -1) or (lwrTitle.find('nisb') > -1):
newFlag = 1.0
else:
newFlag = 0.0
# 查找是否已经标志出售,我们只收集已出售的数据
soldUnicde = currentRow[0].find_all('td')[3].find_all('span')
if len(soldUnicde) == 0:
print("商品 #%d 没有出售" % i)
else:
# 解析页面获取当前价格
soldPrice = currentRow[0].find_all('td')[4]
priceStr = soldPrice.text
priceStr = priceStr.replace('$','')
priceStr = priceStr.replace(',','')
if len(soldPrice) > 1:
priceStr = priceStr.replace('Free shipping', '')
sellingPrice = float(priceStr)
# 去掉不完整的套装价格
if sellingPrice > origPrc * 0.5:
print("%d\t%d\t%d\t%f\t%f" % (yr, numPce, newFlag, origPrc, sellingPrice))
retX.append([yr, numPce, newFlag, origPrc])
retY.append(sellingPrice)
i += 1
currentRow = soup.find_all('table', r = "%d" % i)
def ridgeRegres(xMat, yMat, lam = 0.2):
"""
函数说明:岭回归
Parameters:
xMat - x数据集
yMat - y数据集
lam - 缩减系数
Returns:
ws - 回归系数
Website:
http://www.cuijiahua.com/
Modify:
2017-11-20
"""
xTx = xMat.T * xMat
denom = xTx + np.eye(np.shape(xMat)[1]) * lam
if np.linalg.det(denom) == 0.0:
print("矩阵为奇异矩阵,不能求逆")
return
ws = denom.I * (xMat.T * yMat)
return ws
def setDataCollect(retX, retY):
"""
函数说明:依次读取六种乐高套装的数据,并生成数据矩阵
Parameters:
无
Returns:
无
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
scrapePage(retX, retY, './lego/lego8288.html', 2006, 800, 49.99) #2006年的乐高8288,部件数目800,原价49.99
scrapePage(retX, retY, './lego/lego10030.html', 2002, 3096, 269.99) #2002年的乐高10030,部件数目3096,原价269.99
scrapePage(retX, retY, './lego/lego10179.html', 2007, 5195, 499.99) #2007年的乐高10179,部件数目5195,原价499.99
scrapePage(retX, retY, './lego/lego10181.html', 2007, 3428, 199.99) #2007年的乐高10181,部件数目3428,原价199.99
scrapePage(retX, retY, './lego/lego10189.html', 2008, 5922, 299.99) #2008年的乐高10189,部件数目5922,原价299.99
scrapePage(retX, retY, './lego/lego10196.html', 2009, 3263, 249.99) #2009年的乐高10196,部件数目3263,原价249.99
def regularize(xMat, yMat):
"""
函数说明:数据标准化
Parameters:
xMat - x数据集
yMat - y数据集
Returns:
inxMat - 标准化后的x数据集
inyMat - 标准化后的y数据集
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
inxMat = xMat.copy() #数据拷贝
inyMat = yMat.copy()
yMean = np.mean(yMat, 0) #行与行操作,求均值
inyMat = yMat - yMean #数据减去均值
inMeans = np.mean(inxMat, 0) #行与行操作,求均值
inVar = np.var(inxMat, 0) #行与行操作,求方差
# print(inxMat)
print(inMeans)
# print(inVar)
inxMat = (inxMat - inMeans) / inVar #数据减去均值除以方差实现标准化
return inxMat, inyMat
def rssError(yArr,yHatArr):
"""
函数说明:计算平方误差
Parameters:
yArr - 预测值
yHatArr - 真实值
Returns:
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
return ((yArr-yHatArr)**2).sum()
def standRegres(xArr,yArr):
"""
函数说明:计算回归系数w
Parameters:
xArr - x数据集
yArr - y数据集
Returns:
ws - 回归系数
Website:
http://www.cuijiahua.com/
Modify:
2017-11-12
"""
xMat = np.mat(xArr); yMat = np.mat(yArr).T
xTx = xMat.T * xMat #根据文中推导的公示计算回归系数
if np.linalg.det(xTx) == 0.0:
print("矩阵为奇异矩阵,不能求逆")
return
ws = xTx.I * (xMat.T*yMat)
return ws
def crossValidation(xArr, yArr, numVal = 10):
"""
函数说明:交叉验证岭回归
Parameters:
xArr - x数据集
yArr - y数据集
numVal - 交叉验证次数
Returns:
wMat - 回归系数矩阵
Website:
http://www.cuijiahua.com/
Modify:
2017-11-20
"""
m = len(yArr) #统计样本个数
indexList = list(range(m)) #生成索引值列表
errorMat = np.zeros((numVal,30)) #create error mat 30columns numVal rows
for i in range(numVal): #交叉验证numVal次
trainX = []; trainY = [] #训练集
testX = []; testY = [] #测试集
random.shuffle(indexList) #打乱次序
for j in range(m): #划分数据集:90%训练集,10%测试集
if j < m * 0.9:
trainX.append(xArr[indexList[j]])
trainY.append(yArr[indexList[j]])
else:
testX.append(xArr[indexList[j]])
testY.append(yArr[indexList[j]])
wMat = ridgeTest(trainX, trainY) #获得30个不同lambda下的岭回归系数
for k in range(30): #遍历所有的岭回归系数
matTestX = np.mat(testX); matTrainX = np.mat(trainX) #测试集
meanTrain = np.mean(matTrainX,0) #测试集均值
varTrain = np.var(matTrainX,0) #测试集方差
matTestX = (matTestX - meanTrain) / varTrain #测试集标准化
yEst = matTestX * np.mat(wMat[k,:]).T + np.mean(trainY) #根据ws预测y值
errorMat[i, k] = rssError(yEst.T.A, np.array(testY)) #统计误差
meanErrors = np.mean(errorMat,0) #计算每次交叉验证的平均误差
minMean = float(min(meanErrors)) #找到最小误差
bestWeights = wMat[np.nonzero(meanErrors == minMean)] #找到最佳回归系数
xMat = np.mat(xArr); yMat = np.mat(yArr).T
meanX = np.mean(xMat,0); varX = np.var(xMat,0)
unReg = bestWeights / varX #数据经过标准化,因此需要还原
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % ((-1 * np.sum(np.multiply(meanX,unReg)) + np.mean(yMat)), unReg[0,0], unReg[0,1], unReg[0,2], unReg[0,3]))
def ridgeTest(xArr, yArr):
"""
函数说明:岭回归测试
Parameters:
xMat - x数据集
yMat - y数据集
Returns:
wMat - 回归系数矩阵
Website:
http://www.cuijiahua.com/
Modify:
2017-11-20
"""
xMat = np.mat(xArr); yMat = np.mat(yArr).T
#数据标准化
yMean = np.mean(yMat, axis = 0) #行与行操作,求均值
yMat = yMat - yMean #数据减去均值
xMeans = np.mean(xMat, axis = 0) #行与行操作,求均值
xVar = np.var(xMat, axis = 0) #行与行操作,求方差
xMat = (xMat - xMeans) / xVar #数据减去均值除以方差实现标准化
numTestPts = 30 #30个不同的lambda测试
wMat = np.zeros((numTestPts, np.shape(xMat)[1])) #初始回归系数矩阵
for i in range(numTestPts): #改变lambda计算回归系数
ws = ridgeRegres(xMat, yMat, np.exp(i - 10)) #lambda以e的指数变化,最初是一个非常小的数,
wMat[i, :] = ws.T #计算回归系数矩阵
return wMat
def useStandRegres():
"""
函数说明:使用简单的线性回归
Parameters:
无
Returns:
无
Website:
http://www.cuijiahua.com/
Modify:
2017-11-12
"""
lgX = []
lgY = []
setDataCollect(lgX, lgY)
data_num, features_num = np.shape(lgX)
lgX1 = np.mat(np.ones((data_num, features_num + 1)))
lgX1[:, 1:5] = np.mat(lgX)
ws = standRegres(lgX1, lgY)
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % (ws[0],ws[1],ws[2],ws[3],ws[4]))
def usesklearn():
"""
函数说明:使用sklearn
Parameters:
无
Returns:
无
Website:
http://www.cuijiahua.com/
Modify:
2017-12-08
"""
from sklearn import linear_model
reg = linear_model.Ridge(alpha = .5)
lgX = []
lgY = []
setDataCollect(lgX, lgY)
reg.fit(lgX, lgY)
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % (reg.intercept_, reg.coef_[0], reg.coef_[1], reg.coef_[2], reg.coef_[3]))
if __name__ == '__main__':
usesklearn() | [
"numpy.mean",
"numpy.mat",
"numpy.multiply",
"random.shuffle",
"numpy.ones",
"sklearn.linear_model.Ridge",
"numpy.linalg.det",
"bs4.BeautifulSoup",
"numpy.exp",
"numpy.zeros",
"numpy.array",
"numpy.nonzero",
"numpy.shape",
"numpy.var"
] | [((439, 458), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html'], {}), '(html)\n', (452, 458), False, 'from bs4 import BeautifulSoup\n'), ((2962, 2978), 'numpy.mean', 'np.mean', (['yMat', '(0)'], {}), '(yMat, 0)\n', (2969, 2978), True, 'import numpy as np\n'), ((3057, 3075), 'numpy.mean', 'np.mean', (['inxMat', '(0)'], {}), '(inxMat, 0)\n', (3064, 3075), True, 'import numpy as np\n'), ((3110, 3127), 'numpy.var', 'np.var', (['inxMat', '(0)'], {}), '(inxMat, 0)\n', (3116, 3127), True, 'import numpy as np\n'), ((3676, 3688), 'numpy.mat', 'np.mat', (['xArr'], {}), '(xArr)\n', (3682, 3688), True, 'import numpy as np\n'), ((4201, 4223), 'numpy.zeros', 'np.zeros', (['(numVal, 30)'], {}), '((numVal, 30))\n', (4209, 4223), True, 'import numpy as np\n'), ((5211, 5231), 'numpy.mean', 'np.mean', (['errorMat', '(0)'], {}), '(errorMat, 0)\n', (5218, 5231), True, 'import numpy as np\n'), ((5393, 5405), 'numpy.mat', 'np.mat', (['xArr'], {}), '(xArr)\n', (5399, 5405), True, 'import numpy as np\n'), ((5438, 5454), 'numpy.mean', 'np.mean', (['xMat', '(0)'], {}), '(xMat, 0)\n', (5445, 5454), True, 'import numpy as np\n'), ((5462, 5477), 'numpy.var', 'np.var', (['xMat', '(0)'], {}), '(xMat, 0)\n', (5468, 5477), True, 'import numpy as np\n'), ((5871, 5883), 'numpy.mat', 'np.mat', (['xArr'], {}), '(xArr)\n', (5877, 5883), True, 'import numpy as np\n'), ((5924, 5945), 'numpy.mean', 'np.mean', (['yMat'], {'axis': '(0)'}), '(yMat, axis=0)\n', (5931, 5945), True, 'import numpy as np\n'), ((6011, 6032), 'numpy.mean', 'np.mean', (['xMat'], {'axis': '(0)'}), '(xMat, axis=0)\n', (6018, 6032), True, 'import numpy as np\n'), ((6058, 6078), 'numpy.var', 'np.var', (['xMat'], {'axis': '(0)'}), '(xMat, axis=0)\n', (6064, 6078), True, 'import numpy as np\n'), ((6640, 6653), 'numpy.shape', 'np.shape', (['lgX'], {}), '(lgX)\n', (6648, 6653), True, 'import numpy as np\n'), ((6724, 6735), 'numpy.mat', 'np.mat', (['lgX'], {}), '(lgX)\n', (6730, 6735), True, 'import numpy as np\n'), ((7019, 7048), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (7037, 7048), False, 'from sklearn import linear_model\n'), ((1794, 1814), 'numpy.linalg.det', 'np.linalg.det', (['denom'], {}), '(denom)\n', (1807, 1814), True, 'import numpy as np\n'), ((3697, 3709), 'numpy.mat', 'np.mat', (['yArr'], {}), '(yArr)\n', (3703, 3709), True, 'import numpy as np\n'), ((3760, 3778), 'numpy.linalg.det', 'np.linalg.det', (['xTx'], {}), '(xTx)\n', (3773, 3778), True, 'import numpy as np\n'), ((4419, 4444), 'random.shuffle', 'random.shuffle', (['indexList'], {}), '(indexList)\n', (4433, 4444), False, 'import random\n'), ((5332, 5365), 'numpy.nonzero', 'np.nonzero', (['(meanErrors == minMean)'], {}), '(meanErrors == minMean)\n', (5342, 5365), True, 'import numpy as np\n'), ((5414, 5426), 'numpy.mat', 'np.mat', (['yArr'], {}), '(yArr)\n', (5420, 5426), True, 'import numpy as np\n'), ((5892, 5904), 'numpy.mat', 'np.mat', (['yArr'], {}), '(yArr)\n', (5898, 5904), True, 'import numpy as np\n'), ((6669, 6706), 'numpy.ones', 'np.ones', (['(data_num, features_num + 1)'], {}), '((data_num, features_num + 1))\n', (6676, 6706), True, 'import numpy as np\n'), ((4834, 4847), 'numpy.mat', 'np.mat', (['testX'], {}), '(testX)\n', (4840, 4847), True, 'import numpy as np\n'), ((4861, 4875), 'numpy.mat', 'np.mat', (['trainX'], {}), '(trainX)\n', (4867, 4875), True, 'import numpy as np\n'), ((4901, 4922), 'numpy.mean', 'np.mean', (['matTrainX', '(0)'], {}), '(matTrainX, 0)\n', (4908, 4922), True, 'import numpy as np\n'), ((4953, 4973), 'numpy.var', 'np.var', (['matTrainX', '(0)'], {}), '(matTrainX, 0)\n', (4959, 4973), True, 'import numpy as np\n'), ((6334, 6348), 'numpy.exp', 'np.exp', (['(i - 10)'], {}), '(i - 10)\n', (6340, 6348), True, 'import numpy as np\n'), ((5098, 5113), 'numpy.mean', 'np.mean', (['trainY'], {}), '(trainY)\n', (5105, 5113), True, 'import numpy as np\n'), ((5168, 5183), 'numpy.array', 'np.array', (['testY'], {}), '(testY)\n', (5176, 5183), True, 'import numpy as np\n'), ((6222, 6236), 'numpy.shape', 'np.shape', (['xMat'], {}), '(xMat)\n', (6230, 6236), True, 'import numpy as np\n'), ((1765, 1779), 'numpy.shape', 'np.shape', (['xMat'], {}), '(xMat)\n', (1773, 1779), True, 'import numpy as np\n'), ((5646, 5659), 'numpy.mean', 'np.mean', (['yMat'], {}), '(yMat)\n', (5653, 5659), True, 'import numpy as np\n'), ((5076, 5094), 'numpy.mat', 'np.mat', (['wMat[k, :]'], {}), '(wMat[k, :])\n', (5082, 5094), True, 'import numpy as np\n'), ((5618, 5643), 'numpy.multiply', 'np.multiply', (['meanX', 'unReg'], {}), '(meanX, unReg)\n', (5629, 5643), True, 'import numpy as np\n')] |
# plotting.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019, <NAME> and <NAME>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import os
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scqubits.core.constants as constants
import scqubits.utils.misc as utils
import scqubits.utils.plot_defaults as defaults
try:
from labellines import labelLines
_LABELLINES_ENABLED = True
except ImportError:
_LABELLINES_ENABLED = False
def _process_options(figure, axes, opts=None, **kwargs):
"""
Processes plotting options.
Parameters
----------
figure: matplotlib.Figure
axes: matplotlib.Axes
opts: dict
keyword dictionary with custom options
**kwargs: dict
standard plotting option (see separate documentation)
"""
opts = opts or {}
option_dict = {**opts, **kwargs}
for key, value in option_dict.items():
if key in defaults.SPECIAL_PLOT_OPTIONS:
_process_special_option(figure, axes, key, value)
else:
set_method = getattr(axes, 'set_' + key)
set_method(value)
filename = kwargs.get('filename')
if filename:
figure.savefig(os.path.splitext(filename)[0] + '.pdf')
def _process_special_option(figure, axes, key, value):
"""Processes a single 'special' option, i.e., one internal to scqubits and not to be handed further down to
matplotlib.
Parameters
----------
figure: matplotlib.Figure
axes: matplotlib.Axes
key: str
value: anything
"""
if key == 'x_range':
warnings.warn('x_range is deprecated, use xlim instead', FutureWarning)
axes.set_xlim(value)
elif key == 'y_range':
warnings.warn('y_range is deprecated, use ylim instead', FutureWarning)
axes.set_ylim(value)
elif key == 'ymax':
ymax = value
ymin, _ = axes.get_ylim()
ymin = ymin - (ymax - ymin) * 0.05
axes.set_ylim(ymin, ymax)
elif key == 'figsize':
figure.set_size_inches(value)
def wavefunction1d(wavefunc, potential_vals=None, offset=0, scaling=1, **kwargs):
"""
Plots the amplitude of a single real-valued 1d wave function, along with the potential energy if provided.
Parameters
----------
wavefunc: WaveFunction object
basis and amplitude data of wave function to be plotted
potential_vals: array of float
potential energies, array length must match basis array of `wavefunc`
offset: float
y-offset for the wave function (e.g., shift by eigenenergy)
scaling: float, optional
scaling factor for wave function amplitudes
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
tuple(Figure, Axes)
matplotlib objects for further editing
"""
fig, axes = kwargs.get('fig_ax') or plt.subplots()
x_vals = wavefunc.basis_labels
y_vals = offset + scaling * wavefunc.amplitudes
offset_vals = [offset] * len(x_vals)
if potential_vals is not None:
axes.plot(x_vals, potential_vals, color='gray')
axes.plot(x_vals, y_vals)
axes.fill_between(x_vals, y_vals, offset_vals, where=(y_vals != offset_vals), interpolate=True)
_process_options(fig, axes, **kwargs)
return fig, axes
def wavefunction1d_discrete(wavefunc, **kwargs):
"""
Plots the amplitude of a real-valued 1d wave function in a discrete basis. (Example: transmon in the charge basis.)
Parameters
----------
wavefunc: WaveFunction object
basis and amplitude data of wave function to be plotted
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
tuple(Figure, Axes)
matplotlib objects for further editing
"""
fig, axes = kwargs.get('fig_ax') or plt.subplots()
x_vals = wavefunc.basis_labels
width = .75
axes.bar(x_vals, wavefunc.amplitudes, width=width)
axes.set_xticks(x_vals)
axes.set_xticklabels(x_vals)
_process_options(fig, axes, defaults.wavefunction1d_discrete(), **kwargs)
return fig, axes
def wavefunction2d(wavefunc, zero_calibrate=False, **kwargs):
"""
Creates a density plot of the amplitude of a real-valued wave function in 2 "spatial" dimensions.
Parameters
----------
wavefunc: WaveFunctionOnGrid object
basis and amplitude data of wave function to be plotted
zero_calibrate: bool, optional
whether to calibrate plot to zero amplitude
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
tuple(Figure, Axes)
matplotlib objects for further editing
"""
fig, axes = kwargs.get('fig_ax') or plt.subplots()
min_vals = wavefunc.gridspec.min_vals
max_vals = wavefunc.gridspec.max_vals
if zero_calibrate:
absmax = np.amax(np.abs(wavefunc.amplitudes))
imshow_minval = -absmax
imshow_maxval = absmax
cmap = plt.get_cmap('PRGn')
else:
imshow_minval = np.min(wavefunc.amplitudes)
imshow_maxval = np.max(wavefunc.amplitudes)
cmap = plt.cm.viridis
im = axes.imshow(wavefunc.amplitudes, extent=[min_vals[0], max_vals[0], min_vals[1], max_vals[1]],
cmap=cmap, vmin=imshow_minval, vmax=imshow_maxval, origin='lower', aspect='auto')
divider = make_axes_locatable(axes)
cax = divider.append_axes("right", size="2%", pad=0.05)
fig.colorbar(im, cax=cax)
_process_options(fig, axes, defaults.wavefunction2d(), **kwargs)
return fig, axes
def contours(x_vals, y_vals, func, contour_vals=None, show_colorbar=True, **kwargs):
"""Contour plot of a 2d function `func(x,y)`.
Parameters
----------
x_vals: (ordered) list
x values for the x-y evaluation grid
y_vals: (ordered) list
y values for the x-y evaluation grid
func: function f(x,y)
function for which contours are to be plotted
contour_vals: list of float, optional
contour values can be specified if so desired
show_colorbar: bool, optional
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
tuple(Figure, Axes)
matplotlib objects for further editing
"""
fig, axes = kwargs.get('fig_ax') or plt.subplots()
x_grid, y_grid = np.meshgrid(x_vals, y_vals)
z_array = func(x_grid, y_grid)
im = axes.contourf(x_grid, y_grid, z_array, levels=contour_vals, cmap=plt.cm.viridis, origin="lower")
if show_colorbar:
divider = make_axes_locatable(axes)
cax = divider.append_axes("right", size="2%", pad=0.05)
fig.colorbar(im, cax=cax)
_process_options(fig, axes, opts=defaults.contours(x_vals, y_vals), **kwargs)
return fig, axes
def matrix(data_matrix, mode='abs', **kwargs):
"""
Create a "skyscraper" plot and a 2d color-coded plot of a matrix.
Parameters
----------
data_matrix: ndarray of float or complex
2d matrix data
mode: str from `constants.MODE_FUNC_DICT`
choice of processing function to be applied to data
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
Figure, (Axes1, Axes2)
figure and axes objects for further editing
"""
if 'fig_ax' in kwargs:
fig, (ax1, ax2) = kwargs['fig_ax']
else:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
ax2 = plt.subplot(1, 2, 2)
matsize = len(data_matrix)
element_count = matsize ** 2 # num. of elements to plot
xgrid, ygrid = np.meshgrid(range(matsize), range(matsize))
xgrid = xgrid.T.flatten() - 0.5 # center bars on integer value of x-axis
ygrid = ygrid.T.flatten() - 0.5 # center bars on integer value of y-axis
zbottom = np.zeros(element_count) # all bars start at z=0
dx = 0.75 * np.ones(element_count) # width of bars in x-direction
dy = dx # width of bars in y-direction (same as x-direction)
modefunction = constants.MODE_FUNC_DICT[mode]
zheight = modefunction(data_matrix).flatten() # height of bars from matrix elements
nrm = mpl.colors.Normalize(0, max(zheight)) # <-- normalize colors to max. data
colors = plt.cm.viridis(nrm(zheight)) # list of colors for each bar
# skyscraper plot
ax1.view_init(azim=210, elev=23)
ax1.bar3d(xgrid, ygrid, zbottom, dx, dy, zheight, color=colors)
ax1.axes.w_xaxis.set_major_locator(plt.IndexLocator(1, -0.5)) # set x-ticks to integers
ax1.axes.w_yaxis.set_major_locator(plt.IndexLocator(1, -0.5)) # set y-ticks to integers
ax1.set_zlim3d([0, max(zheight)])
# 2d plot
ax2.matshow(modefunction(data_matrix), cmap=plt.cm.viridis)
cax, _ = mpl.colorbar.make_axes(ax2, shrink=.75, pad=.02) # add colorbar with normalized range
_ = mpl.colorbar.ColorbarBase(cax, cmap=plt.cm.viridis, norm=nrm)
_process_options(fig, ax1, opts=defaults.matrix(), **kwargs)
return fig, (ax1, ax2)
def data_vs_paramvals(xdata, ydata, label_list=None, **kwargs):
"""Plot of a set of yadata vs xdata.
The individual points correspond to the a provided array of parameter values.
Parameters
----------
xdata, ydata: ndarray
must have compatible shapes for matplotlib.pyplot.plot
label_list: list(str), optional
list of labels associated with the individual curves to be plotted
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
tuple(Figure, Axes)
matplotlib objects for further editing
"""
fig, axes = kwargs.get('fig_ax') or plt.subplots()
if label_list is None:
axes.plot(xdata, ydata)
else:
for idx, ydataset in enumerate(ydata.T):
axes.plot(xdata, ydataset, label=label_list[idx])
axes.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_process_options(fig, axes, **kwargs)
return fig, axes
def evals_vs_paramvals(specdata, which=-1, subtract_ground=False, label_list=None, **kwargs):
"""Generates a simple plot of a set of eigenvalues as a function of one parameter.
The individual points correspond to the a provided array of parameter values.
Parameters
----------
specdata: SpectrumData
object includes parameter name, values, and resulting eigenenergies
which: int or list(int)
number of desired eigenvalues (sorted from smallest to largest); default: -1, signals all eigenvalues
or: list of specific eigenvalues to include
subtract_ground: bool
whether to subtract the ground state energy
label_list: list(str), optional
list of labels associated with the individual curves to be plotted
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
tuple(Figure, Axes)
matplotlib objects for further editing
"""
index_list = utils.process_which(which, specdata.energy_table[0].size)
xdata = specdata.param_vals
ydata = specdata.energy_table[:, index_list]
if subtract_ground:
ydata = (ydata.T - ydata[:, 0]).T
return data_vs_paramvals(xdata, ydata, label_list=label_list,
**defaults.evals_vs_paramvals(specdata, **kwargs))
def matelem_vs_paramvals(specdata, select_elems=4, mode='abs', **kwargs):
"""Generates a simple plot of matrix elements as a function of one parameter.
The individual points correspond to the a provided array of parameter values.
Parameters
----------
specdata: SpectrumData
object includes parameter name, values, and matrix elements
select_elems: int or list
either maximum index of desired matrix elements, or list [(i1, i2), (i3, i4), ...] of index tuples
for specific desired matrix elements
mode: str from `constants.MODE_FUNC_DICT`, optional
choice of processing function to be applied to data (default value = 'abs')
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
tuple(Figure, Axes)
matplotlib objects for further editing
"""
def request_range(sel_elems):
return isinstance(sel_elems, int)
fig, axes = kwargs.get('fig_ax') or plt.subplots()
x = specdata.param_vals
modefunction = constants.MODE_FUNC_DICT[mode]
if request_range(select_elems):
index_pairs = [(row, col) for row in range(select_elems) for col in range(row + 1)]
else:
index_pairs = select_elems
for (row, col) in index_pairs:
y = modefunction(specdata.matrixelem_table[:, row, col])
axes.plot(x, y, label=str(row) + ',' + str(col))
if _LABELLINES_ENABLED:
labelLines(axes.get_lines(), zorder=1.5)
else:
axes.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_process_options(fig, axes, opts=defaults.matelem_vs_paramvals(specdata), **kwargs)
return fig, axes
def print_matrix(matrix, show_numbers=True, **kwargs):
"""Pretty print a matrix, optionally printing the numerical values of the data.
"""
fig, axes = kwargs.get('fig_ax') or plt.subplots()
m = axes.matshow(matrix, cmap=plt.cm.viridis, interpolation='none')
fig.colorbar(m, ax=axes)
if show_numbers:
for y_index in range(matrix.shape[0]):
for x_index in range(matrix.shape[1]):
axes.text(x_index, y_index, "{:.03f}".format(matrix[y_index, x_index]),
va='center', ha='center', fontsize=8, rotation=45, color='white')
# shift the grid
for axis, locs in [(axes.xaxis, np.arange(matrix.shape[1])), (axes.yaxis, np.arange(matrix.shape[0]))]:
axis.set_ticks(locs + 0.5, minor=True)
axis.set(ticks=locs, ticklabels=locs)
axes.grid(True, which='minor')
axes.grid(False, which='major')
_process_options(fig, axes, **kwargs)
return fig, axes
| [
"matplotlib.colorbar.ColorbarBase",
"scqubits.utils.plot_defaults.wavefunction2d",
"matplotlib.pyplot.IndexLocator",
"matplotlib.colorbar.make_axes",
"scqubits.utils.plot_defaults.contours",
"numpy.arange",
"scqubits.utils.misc.process_which",
"scqubits.utils.plot_defaults.evals_vs_paramvals",
"nump... | [((5669, 5694), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axes'], {}), '(axes)\n', (5688, 5694), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((6660, 6687), 'numpy.meshgrid', 'np.meshgrid', (['x_vals', 'y_vals'], {}), '(x_vals, y_vals)\n', (6671, 6687), True, 'import numpy as np\n'), ((8150, 8173), 'numpy.zeros', 'np.zeros', (['element_count'], {}), '(element_count)\n', (8158, 8173), True, 'import numpy as np\n'), ((9078, 9128), 'matplotlib.colorbar.make_axes', 'mpl.colorbar.make_axes', (['ax2'], {'shrink': '(0.75)', 'pad': '(0.02)'}), '(ax2, shrink=0.75, pad=0.02)\n', (9100, 9128), True, 'import matplotlib as mpl\n'), ((9173, 9234), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['cax'], {'cmap': 'plt.cm.viridis', 'norm': 'nrm'}), '(cax, cmap=plt.cm.viridis, norm=nrm)\n', (9198, 9234), True, 'import matplotlib as mpl\n'), ((11276, 11333), 'scqubits.utils.misc.process_which', 'utils.process_which', (['which', 'specdata.energy_table[0].size'], {}), '(which, specdata.energy_table[0].size)\n', (11295, 11333), True, 'import scqubits.utils.misc as utils\n'), ((1864, 1935), 'warnings.warn', 'warnings.warn', (['"""x_range is deprecated, use xlim instead"""', 'FutureWarning'], {}), "('x_range is deprecated, use xlim instead', FutureWarning)\n", (1877, 1935), False, 'import warnings\n'), ((3159, 3173), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3171, 3173), True, 'import matplotlib.pyplot as plt\n'), ((4122, 4136), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4134, 4136), True, 'import matplotlib.pyplot as plt\n'), ((4338, 4372), 'scqubits.utils.plot_defaults.wavefunction1d_discrete', 'defaults.wavefunction1d_discrete', ([], {}), '()\n', (4370, 4372), True, 'import scqubits.utils.plot_defaults as defaults\n'), ((5027, 5041), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5039, 5041), True, 'import matplotlib.pyplot as plt\n'), ((5283, 5303), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""PRGn"""'], {}), "('PRGn')\n", (5295, 5303), True, 'import matplotlib.pyplot as plt\n'), ((5338, 5365), 'numpy.min', 'np.min', (['wavefunc.amplitudes'], {}), '(wavefunc.amplitudes)\n', (5344, 5365), True, 'import numpy as np\n'), ((5390, 5417), 'numpy.max', 'np.max', (['wavefunc.amplitudes'], {}), '(wavefunc.amplitudes)\n', (5396, 5417), True, 'import numpy as np\n'), ((5818, 5843), 'scqubits.utils.plot_defaults.wavefunction2d', 'defaults.wavefunction2d', ([], {}), '()\n', (5841, 5843), True, 'import scqubits.utils.plot_defaults as defaults\n'), ((6623, 6637), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6635, 6637), True, 'import matplotlib.pyplot as plt\n'), ((6871, 6896), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axes'], {}), '(axes)\n', (6890, 6896), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((7718, 7730), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7728, 7730), True, 'import matplotlib.pyplot as plt\n'), ((7801, 7821), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (7812, 7821), True, 'import matplotlib.pyplot as plt\n'), ((8215, 8237), 'numpy.ones', 'np.ones', (['element_count'], {}), '(element_count)\n', (8222, 8237), True, 'import numpy as np\n'), ((8801, 8826), 'matplotlib.pyplot.IndexLocator', 'plt.IndexLocator', (['(1)', '(-0.5)'], {}), '(1, -0.5)\n', (8817, 8826), True, 'import matplotlib.pyplot as plt\n'), ((8894, 8919), 'matplotlib.pyplot.IndexLocator', 'plt.IndexLocator', (['(1)', '(-0.5)'], {}), '(1, -0.5)\n', (8910, 8919), True, 'import matplotlib.pyplot as plt\n'), ((9973, 9987), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9985, 9987), True, 'import matplotlib.pyplot as plt\n'), ((12618, 12632), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12630, 12632), True, 'import matplotlib.pyplot as plt\n'), ((13493, 13507), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13505, 13507), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2071), 'warnings.warn', 'warnings.warn', (['"""y_range is deprecated, use ylim instead"""', 'FutureWarning'], {}), "('y_range is deprecated, use ylim instead', FutureWarning)\n", (2013, 2071), False, 'import warnings\n'), ((5176, 5203), 'numpy.abs', 'np.abs', (['wavefunc.amplitudes'], {}), '(wavefunc.amplitudes)\n', (5182, 5203), True, 'import numpy as np\n'), ((7033, 7066), 'scqubits.utils.plot_defaults.contours', 'defaults.contours', (['x_vals', 'y_vals'], {}), '(x_vals, y_vals)\n', (7050, 7066), True, 'import scqubits.utils.plot_defaults as defaults\n'), ((9272, 9289), 'scqubits.utils.plot_defaults.matrix', 'defaults.matrix', ([], {}), '()\n', (9287, 9289), True, 'import scqubits.utils.plot_defaults as defaults\n'), ((11579, 11626), 'scqubits.utils.plot_defaults.evals_vs_paramvals', 'defaults.evals_vs_paramvals', (['specdata'], {}), '(specdata, **kwargs)\n', (11606, 11626), True, 'import scqubits.utils.plot_defaults as defaults\n'), ((13232, 13271), 'scqubits.utils.plot_defaults.matelem_vs_paramvals', 'defaults.matelem_vs_paramvals', (['specdata'], {}), '(specdata)\n', (13261, 13271), True, 'import scqubits.utils.plot_defaults as defaults\n'), ((13967, 13993), 'numpy.arange', 'np.arange', (['matrix.shape[1]'], {}), '(matrix.shape[1])\n', (13976, 13993), True, 'import numpy as np\n'), ((14009, 14035), 'numpy.arange', 'np.arange', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (14018, 14035), True, 'import numpy as np\n'), ((1478, 1504), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1494, 1504), False, 'import os\n')] |
import torch
from torch import nn
from .utils import EarlyStopping, appendabledict, \
calculate_multiclass_accuracy, calculate_multiclass_f1_score,\
append_suffix, compute_dict_average
from copy import deepcopy
import numpy as np
from torch.utils.data import RandomSampler, BatchSampler
from .categorization import summary_key_dict
class LinearProbe(nn.Module):
def __init__(self, input_dim, num_classes=255):
super().__init__()
self.model = nn.Linear(in_features=input_dim, out_features=num_classes)
def forward(self, feature_vectors):
return self.model(feature_vectors)
class FullySupervisedLinearProbe(nn.Module):
def __init__(self, encoder, num_classes=255):
super().__init__()
self.encoder = deepcopy(encoder)
self.probe = LinearProbe(input_dim=self.encoder.hidden_size,
num_classes=num_classes)
def forward(self, x):
feature_vec = self.encoder(x)
return self.probe(feature_vec)
class ProbeTrainer():
def __init__(self,
encoder=None,
method_name="my_method",
wandb=None,
patience=15,
num_classes=256,
fully_supervised=False,
save_dir=".models",
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
lr=5e-4,
epochs=100,
batch_size=64,
representation_len=256):
self.encoder = encoder
self.wandb = wandb
self.device = device
self.fully_supervised = fully_supervised
self.save_dir = save_dir
self.num_classes = num_classes
self.epochs = epochs
self.lr = lr
self.batch_size = batch_size
self.patience = patience
self.method = method_name
self.feature_size = representation_len
self.loss_fn = nn.CrossEntropyLoss()
# bad convention, but these get set in "create_probes"
self.probes = self.early_stoppers = self.optimizers = self.schedulers = None
def create_probes(self, sample_label):
if self.fully_supervised:
assert self.encoder != None, "for fully supervised you must provide an encoder!"
self.probes = {k: FullySupervisedLinearProbe(encoder=self.encoder,
num_classes=self.num_classes).to(self.device) for k in
sample_label.keys()}
else:
self.probes = {k: LinearProbe(input_dim=self.feature_size,
num_classes=self.num_classes).to(self.device) for k in sample_label.keys()}
self.early_stoppers = {
k: EarlyStopping(patience=self.patience, verbose=False, name=k + "_probe", save_dir=self.save_dir)
for k in sample_label.keys()}
self.optimizers = {k: torch.optim.Adam(list(self.probes[k].parameters()),
eps=1e-5, lr=self.lr) for k in sample_label.keys()}
self.schedulers = {
k: torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizers[k], patience=5, factor=0.2, verbose=True,
mode='max', min_lr=1e-5) for k in sample_label.keys()}
def generate_batch(self, episodes, episode_labels):
total_steps = sum([len(e) for e in episodes])
assert total_steps > self.batch_size
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
self.batch_size, drop_last=True)
for indices in sampler:
episodes_batch = [episodes[x] for x in indices]
episode_labels_batch = [episode_labels[x] for x in indices]
xs, labels = [], appendabledict()
for ep_ind, episode in enumerate(episodes_batch):
# Get one sample from this episode
t = np.random.randint(len(episode))
xs.append(episode[t])
labels.append_update(episode_labels_batch[ep_ind][t])
yield torch.stack(xs).float().to(self.device) / 255., labels
def probe(self, batch, k):
probe = self.probes[k]
probe.to(self.device)
if self.fully_supervised:
# if method is supervised batch is a batch of frames and probe is a full encoder + linear or nonlinear probe
preds = probe(batch)
elif not self.encoder:
# if encoder is None then inputs are vectors
f = batch.detach()
assert len(f.squeeze().shape) == 2, "if input is not a batch of vectors you must specify an encoder!"
preds = probe(f)
else:
with torch.no_grad():
self.encoder.to(self.device)
f = self.encoder(batch).detach()
preds = probe(f)
return preds
def do_one_epoch(self, episodes, label_dicts):
sample_label = label_dicts[0][0]
epoch_loss, accuracy = {k + "_loss": [] for k in sample_label.keys() if
not self.early_stoppers[k].early_stop}, \
{k + "_acc": [] for k in sample_label.keys() if
not self.early_stoppers[k].early_stop}
data_generator = self.generate_batch(episodes, label_dicts)
for step, (x, labels_batch) in enumerate(data_generator):
for k, label in labels_batch.items():
if self.early_stoppers[k].early_stop:
continue
optim = self.optimizers[k]
optim.zero_grad()
label = torch.tensor(label).long().to(self.device)
preds = self.probe(x, k)
loss = self.loss_fn(preds, label)
epoch_loss[k + "_loss"].append(loss.detach().item())
preds = preds.cpu().detach().numpy()
preds = np.argmax(preds, axis=1)
label = label.cpu().detach().numpy()
accuracy[k + "_acc"].append(calculate_multiclass_accuracy(preds,
label))
if self.probes[k].training:
loss.backward()
optim.step()
epoch_loss = {k: np.mean(loss) for k, loss in epoch_loss.items()}
accuracy = {k: np.mean(acc) for k, acc in accuracy.items()}
return epoch_loss, accuracy
def do_test_epoch(self, episodes, label_dicts):
sample_label = label_dicts[0][0]
accuracy_dict, f1_score_dict = {}, {}
pred_dict, all_label_dict = {k: [] for k in sample_label.keys()}, \
{k: [] for k in sample_label.keys()}
# collect all predictions first
data_generator = self.generate_batch(episodes, label_dicts)
for step, (x, labels_batch) in enumerate(data_generator):
for k, label in labels_batch.items():
label = torch.tensor(label).long().cpu()
all_label_dict[k].append(label)
preds = self.probe(x, k).detach().cpu()
pred_dict[k].append(preds)
for k in all_label_dict.keys():
preds, labels = torch.cat(pred_dict[k]).cpu().detach().numpy(),\
torch.cat(all_label_dict[k]).cpu().detach().numpy()
preds = np.argmax(preds, axis=1)
accuracy = calculate_multiclass_accuracy(preds, labels)
f1score = calculate_multiclass_f1_score(preds, labels)
accuracy_dict[k] = accuracy
f1_score_dict[k] = f1score
return accuracy_dict, f1_score_dict
def train(self, tr_eps, val_eps, tr_labels, val_labels):
# if not self.encoder:
# assert len(tr_eps[0][0].squeeze().shape) == 2, "if input is a batch of vectors you must specify an encoder!"
sample_label = tr_labels[0][0]
self.create_probes(sample_label)
e = 0
all_probes_stopped = np.all([early_stopper.early_stop for early_stopper in self.early_stoppers.values()])
while (not all_probes_stopped) and e < self.epochs:
epoch_loss, accuracy = self.do_one_epoch(tr_eps, tr_labels)
self.log_results(e, epoch_loss, accuracy)
val_loss, val_accuracy = self.evaluate(val_eps, val_labels, epoch=e)
# update all early stoppers
for k in sample_label.keys():
if not self.early_stoppers[k].early_stop:
self.early_stoppers[k](val_accuracy["val_" + k + "_acc"], self.probes[k])
for k, scheduler in self.schedulers.items():
if not self.early_stoppers[k].early_stop:
scheduler.step(val_accuracy['val_' + k + '_acc'])
e += 1
all_probes_stopped = np.all([early_stopper.early_stop for early_stopper in self.early_stoppers.values()])
print("All probes early stopped!")
def evaluate(self, val_episodes, val_label_dicts, epoch=None):
for k, probe in self.probes.items():
probe.eval()
epoch_loss, accuracy = self.do_one_epoch(val_episodes, val_label_dicts)
epoch_loss = {"val_" + k: v for k, v in epoch_loss.items()}
accuracy = {"val_" + k: v for k, v in accuracy.items()}
self.log_results(epoch, epoch_loss, accuracy)
for k, probe in self.probes.items():
probe.train()
return epoch_loss, accuracy
def test(self, test_episodes, test_label_dicts, epoch=None):
for k in self.early_stoppers.keys():
self.early_stoppers[k].early_stop = False
for k, probe in self.probes.items():
probe.eval()
acc_dict, f1_dict = self.do_test_epoch(test_episodes, test_label_dicts)
acc_dict, f1_dict = postprocess_raw_metrics(acc_dict, f1_dict)
print("""In our paper, we report F1 scores and accuracies averaged across each category.
That is, we take a mean across all state variables in a category to get the average score for that category.
Then we average all the category averages to get the final score that we report per game for each method.
These scores are called \'across_categories_avg_acc\' and \'across_categories_avg_f1\' respectively
We do this to prevent categories with large number of state variables dominating the mean F1 score.
""")
self.log_results("Test", acc_dict, f1_dict)
return acc_dict, f1_dict
def log_results(self, epoch_idx, *dictionaries):
print("Epoch: {}".format(epoch_idx))
for dictionary in dictionaries:
for k, v in dictionary.items():
print("\t {}: {:8.4f}".format(k, v))
print("\t --")
def postprocess_raw_metrics(acc_dict, f1_dict):
acc_overall_avg, f1_overall_avg = compute_dict_average(acc_dict), \
compute_dict_average(f1_dict)
acc_category_avgs_dict, f1_category_avgs_dict = compute_category_avgs(acc_dict), \
compute_category_avgs(f1_dict)
acc_avg_across_categories, f1_avg_across_categories = compute_dict_average(acc_category_avgs_dict), \
compute_dict_average(f1_category_avgs_dict)
acc_dict.update(acc_category_avgs_dict)
f1_dict.update(f1_category_avgs_dict)
acc_dict["overall_avg"], f1_dict["overall_avg"] = acc_overall_avg, f1_overall_avg
acc_dict["across_categories_avg"], f1_dict["across_categories_avg"] = [acc_avg_across_categories,
f1_avg_across_categories]
acc_dict = append_suffix(acc_dict, "_acc")
f1_dict = append_suffix(f1_dict, "_f1")
return acc_dict, f1_dict
def compute_category_avgs(metric_dict):
category_dict = {}
for category_name, category_keys in summary_key_dict.items():
category_values = [v for k, v in metric_dict.items() if k in category_keys]
if len(category_values) < 1:
continue
category_mean = np.mean(category_values)
category_dict[category_name + "_avg"] = category_mean
return category_dict
| [
"numpy.mean",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.CrossEntropyLoss",
"torch.stack",
"numpy.argmax",
"torch.tensor",
"torch.cuda.is_available",
"torch.nn.Linear",
"copy.deepcopy",
"torch.no_grad",
"torch.cat"
] | [((473, 531), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'input_dim', 'out_features': 'num_classes'}), '(in_features=input_dim, out_features=num_classes)\n', (482, 531), False, 'from torch import nn\n'), ((763, 780), 'copy.deepcopy', 'deepcopy', (['encoder'], {}), '(encoder)\n', (771, 780), False, 'from copy import deepcopy\n'), ((1951, 1972), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1970, 1972), False, 'from torch import nn\n'), ((12505, 12529), 'numpy.mean', 'np.mean', (['category_values'], {}), '(category_values)\n', (12512, 12529), True, 'import numpy as np\n'), ((3146, 3276), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['self.optimizers[k]'], {'patience': '(5)', 'factor': '(0.2)', 'verbose': '(True)', 'mode': '"""max"""', 'min_lr': '(1e-05)'}), "(self.optimizers[k], patience=5,\n factor=0.2, verbose=True, mode='max', min_lr=1e-05)\n", (3188, 3276), False, 'import torch\n'), ((6639, 6652), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (6646, 6652), True, 'import numpy as np\n'), ((6711, 6723), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (6718, 6723), True, 'import numpy as np\n'), ((7730, 7754), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (7739, 7754), True, 'import numpy as np\n'), ((1350, 1375), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1373, 1375), False, 'import torch\n'), ((6259, 6283), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (6268, 6283), True, 'import numpy as np\n'), ((5049, 5064), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5062, 5064), False, 'import torch\n'), ((5977, 5996), 'torch.tensor', 'torch.tensor', (['label'], {}), '(label)\n', (5989, 5996), False, 'import torch\n'), ((7331, 7350), 'torch.tensor', 'torch.tensor', (['label'], {}), '(label)\n', (7343, 7350), False, 'import torch\n'), ((4418, 4433), 'torch.stack', 'torch.stack', (['xs'], {}), '(xs)\n', (4429, 4433), False, 'import torch\n'), ((7580, 7603), 'torch.cat', 'torch.cat', (['pred_dict[k]'], {}), '(pred_dict[k])\n', (7589, 7603), False, 'import torch\n'), ((7657, 7685), 'torch.cat', 'torch.cat', (['all_label_dict[k]'], {}), '(all_label_dict[k])\n', (7666, 7685), False, 'import torch\n')] |
import argparse
import json
import os
from os import listdir
from os.path import isfile
import shutil
from genson import SchemaBuilder
from enum import Enum
import copy
import flatdict
import pandas as pd
import numpy as np
from collections import OrderedDict
from functools import reduce # forward compatibility for Python 3
import operator
import sys
from echr.utils.folders import make_build_folder
from echr.utils.cli import TAB
from echr.utils.logger import getlogger
from rich.markdown import Markdown
from rich.console import Console
log = getlogger()
__console = Console(record=True)
DELIMITER = '.'
type_priority = OrderedDict([
('number', float),
('integer', int),
('string', str)
])
class COL_HINT(str, Enum):
HOT_ONE = 'hot_one'
POSITIONAL = 'positional'
def format_structured_json(cases_list):
res = []
representents = {}
extractedapp = {}
scl = {}
decision_body = {}
for name in cases_list:
with open(name, 'r') as f:
c = json.load(f)
c['representedby'] = [r for r in c['representedby'] if r != 'N/A']
representents[c['appno']] = {'representedby': c['representedby']}
extractedapp[c['appno']] = {'appnos': c['extractedappno']}
decision_body[c['appno']] = {
'name': [e['name'] for e in c['decision_body']],
'role': {e['name']: e['role'] for e in c['decision_body'] if 'role' in e}
}
scl[c['appno']] = {'scl': c['scl']}
c['respondent'] = c['respondent'].split(';') #
c['applicability'] = c['applicability'].strip().split(';')
c['appno'] = c['appno'].split(';')[0]
c['decisiondate'] = c['decisiondate'].split(' ')[0]
c['judgementdate'] = c['judgementdate'].split(' ')[0]
c['introductiondate'] = c['introductiondate'].split(' ')[0]
c['kpdate'] = c['kpdate'].split(' ')[0]
c['separateopinion'] = True if c['separateopinion'] == 'TRUE' else False
del c['representedby']
del c['extractedappno']
del c['decision_body']
del c['scl']
del c['documents']
del c['content']
del c['externalsources']
del c['kpthesaurus']
del c['__conclusion']
del c['__articles']
if not len(c['issue']):
del c['issue']
else:
c['issue'] = sorted(c['issue'])
if not len(c['applicability']):
del c['applicability']
res.append(c)
return res, representents, extractedapp, scl, decision_body
def get_by_path(root, items):
return reduce(operator.getitem, items, root)
def set_by_path(root, items, value):
get_by_path(root, items[:-1])[items[-1]] = value
def determine_schema(X):
builder = SchemaBuilder()
for x in X:
builder.add_object(x)
schema = builder
return schema
def get_flat_type_mapping(flat_schema):
flat_type_mapping = {}
for k in flat_schema.keys():
if k.endswith(DELIMITER + 'type'):
key = k.replace('properties' + DELIMITER, '').replace(DELIMITER + 'type', '')
flat_type_mapping[key] = flat_schema[k]
return flat_type_mapping
def get_flat_domain_mapping(X, flat_type_mapping):
flat_domain_mapping = {}
for x in X:
flat = flatdict.FlatterDict(x, delimiter='.')
for k in flat_type_mapping.keys():
v = flat.get(k)
if v is not None:
if k not in flat_domain_mapping:
flat_domain_mapping[k] = set()
type_ = flat_type_mapping[k]
try:
if type_ == 'array':
flat_domain_mapping[k].update(get_by_path(x, k.split('.')))
else:
flat_domain_mapping[k].add(get_by_path(x, k.split('.')))
except:
if not flat_domain_mapping[k]:
del flat_domain_mapping[k]
for k in flat_domain_mapping:
flat_domain_mapping[k] = list(flat_domain_mapping[k])
return flat_domain_mapping
def flatten_dataset(X, flat_type_mapping, schema_hints=None):
if schema_hints is None:
schema_hints = {}
flat_X = []
for x in X:
flat = flatdict.FlatterDict(x, delimiter=DELIMITER)
c_x = copy.deepcopy(x)
for k in flat_type_mapping.keys():
col_type = schema_hints.get(k, {}).get('col_type')
if col_type not in [None, COL_HINT.POSITIONAL]:
continue
v = flat.get(k)
if v is not None:
sort = schema_hints.get(k, {}).get('sort', False)
if sort:
type_ = flat_type_mapping[k]
if type_ == 'array':
item_types = flat_type_mapping.get(k + '.items')
a = get_by_path(c_x, k.split('.'))
if isinstance(item_types, list):
try:
a = sorted(a)
except:
print('# Warning: mix-type array with types: {}'.format(', '.join(item_types)))
print('# Warning; no comparison operator provided. Try to assess the proper cast...')
for t in type_priority:
try:
a = list(map(type_priority[t], a))
print('# Casting \'{}\' to {}'.format(k, t))
break
except:
log.error('Could not cast \'{}\' to {}'.format(k, t))
else:
print('# Error: Could not find any way to sort {}'.format(k))
raise Exception('Could not find any way to sort {}'.format(k))
set_by_path(c_x, k.split('.'), sorted(a))
flat = flatdict.FlatterDict(c_x, delimiter=DELIMITER)
flat_X.append(flat)
return flat_X
def hot_one_encoder_on_list(df, column):
v = [x if isinstance(x, list) else [] for x in df[column].values]
l = [len(x) for x in v]
f, u = pd.factorize(np.concatenate(v))
n, m = len(v), u.size
i = np.arange(n).repeat(l)
dummies = pd.DataFrame(
np.bincount(i * m + f, minlength=n * m).reshape(n, m),
df.index, map(lambda x: str(column) + '=' + str(x), u)
)
return df.drop(column, 1).join(dummies)
def normalize(X, schema_hints=None):
if schema_hints is None:
schema_hints = {}
def hot_one_encoder(df, columns):
return pd.get_dummies(df, prefix_sep="=", columns=columns)
schema = determine_schema(X)
flat_schema = flatdict.FlatDict(schema.to_schema(), delimiter=DELIMITER)
flat_type_mapping = get_flat_type_mapping(flat_schema)
flat_domain_mapping = get_flat_domain_mapping(X, flat_type_mapping)
flat_X = flatten_dataset(X, flat_type_mapping, schema_hints)
columns_to_encode = [k for k, v in schema_hints.items() if v['col_type'] == COL_HINT.HOT_ONE]
df = pd.DataFrame(flat_X)
for c in df.columns:
f = next((k for k in columns_to_encode if c.startswith(k)), None)
if f:
df = df.drop(c, 1)
encoded = []
for c in columns_to_encode:
type_ = flat_type_mapping[c]
if type_ == 'array':
if c == 'conclusion':
articles = set()
for x in X:
for e in x[c]:
if 'article' in e:
articles.add(e['article'])
articles = sorted(articles)
df2 = []
for x in X:
e = []
xart = {v['article']: v['type'] for v in x['conclusion'] if 'article' in v}
for a in articles:
v = 0
if a in xart:
if xart[a] == 'violation':
v = 1
else:
v = -1
e.append(v)
df2.append(e)
df2 = pd.DataFrame(df2, columns=list(map(lambda x: 'ccl_article={}'.format(x), articles)))
encoded.append(df2)
else:
df2 = pd.DataFrame(X)[[c]]
e = hot_one_encoder_on_list(df2, c)
encoded.append(e)
else:
df2 = pd.DataFrame(X)[c]
e = hot_one_encoder(df2, [c])
encoded.append(e)
df = pd.concat([df] + encoded, axis=1)
return df, schema, flat_schema, flat_type_mapping, flat_domain_mapping
def run(console, build, title, output_prefix='cases', force=False):
__console = console
global print
print = __console.print
print(Markdown("- **Step configuration**"))
print(TAB + "> Prepare release folder structure")
paths = ['unstructured', 'structured', 'raw']
for p in paths:
make_build_folder(console, os.path.join(build, p), force, strict=False)
print(Markdown("- **Normalize database**"))
input_folder = os.path.join(build, 'raw', 'preprocessed_documents')
cases_files = [os.path.join(input_folder, f) for f in listdir(input_folder)
if isfile(os.path.join(input_folder, f)) and '.json' in f]
print(TAB + "> Prepare unstructured cases [green][DONE]")
# Unstructured
with open(os.path.join(build, 'unstructured', 'cases.json'), 'w') as outfile:
outfile.write('[\n')
for i, f in enumerate(cases_files):
with open(f) as json_file:
data = json.load(json_file)
json.dump(data, outfile, indent=4)
if i != len(cases_files) - 1:
outfile.write(',\n')
outfile.write('\n]')
# Structured
print(TAB + "> Generate flat cases [green][DONE]")
flat_cases , representatives, extractedapp, scl, decision_body = format_structured_json(cases_files)
print(TAB + "> Flat cases size: {}MiB".format(sys.getsizeof(flat_cases) / 1000))
schema_hints = {
'article': {
'col_type': COL_HINT.HOT_ONE
},
'documentcollectionid': {
'col_type': COL_HINT.HOT_ONE
},
'applicability': {
'col_type': COL_HINT.HOT_ONE
},
'paragraphs': {
'col_type': COL_HINT.HOT_ONE
},
'conclusion': {
'col_type': COL_HINT.HOT_ONE,
'sub_element': 'flatten'
}
}
output_path = os.path.join(build, 'structured')
with open(os.path.join(output_path, 'flat_cases.json'), 'w') as outfile:
json.dump(flat_cases, outfile, indent=4)
with open(os.path.join(output_path, 'schema_hint.json'), 'w') as outfile:
json.dump(schema_hints, outfile, indent=4)
X = flat_cases
df, schema, flat_schema, flat_type_mapping, flat_domain_mapping = normalize(X, schema_hints)
df.to_json(os.path.join(output_path, '{}.json'.format(output_prefix)), orient='records')
df.to_csv(os.path.join(output_path, '{}.csv'.format(output_prefix)))
json_files = [
('schema', schema.to_schema()),
('flat_schema', flat_schema.as_dict()),
('flat_type_mapping', flat_type_mapping),
('flat_domain_mapping', flat_domain_mapping)
]
for f in json_files:
with open(os.path.join(output_path, '{}_{}.json'.format(output_prefix, f[0])), 'w') as outfile:
json.dump(f[1], outfile, indent=4)
os.remove(os.path.join(output_path, 'flat_cases.json'))
os.remove(os.path.join(output_path, 'cases_flat_schema.json'))
os.remove(os.path.join(output_path, 'cases_flat_type_mapping.json'))
print(TAB + '> Generate appnos matrice [green][DONE]')
matrice_appnos = {}
for k, v in extractedapp.items():
matrice_appnos[k] = {e:1 for e in v['appnos']}
with open(os.path.join(output_path, 'matrice_appnos.json'), 'w') as outfile:
json.dump(matrice_appnos, outfile, indent=4)
print(TAB + '> Generate scl matrice [green][DONE]')
matrice_scl = {}
for k, v in scl.items():
matrice_scl[k] = {e: 1 for e in v['scl']}
with open(os.path.join(output_path, 'matrice_scl.json'), 'w') as outfile:
json.dump(matrice_scl, outfile, indent=4)
print(TAB + '> Generate representatives matrice [green][DONE]')
matrice_representedby = {}
for k, v in representatives.items():
matrice_representedby[k] = {e: 1 for e in v['representedby']}
with open(os.path.join(output_path, 'matrice_representatives.json'), 'w') as outfile:
json.dump(matrice_representedby, outfile, indent=4)
print(TAB + '> Generate decision body matrice [green][DONE]')
matrice_decision_body = {}
for k, v in decision_body.items():
matrice_decision_body[k] = {k:v for k,v in v['role'].items()}
with open(os.path.join(output_path, 'matrice_decision_body.json'), 'w') as outfile:
json.dump(matrice_decision_body, outfile, indent=4)
print(TAB + '> Create archives [green][DONE]')
# Raw
shutil.make_archive(os.path.join(build, 'raw', 'judgments'), 'zip',
os.path.join(build, 'raw', 'judgments'))
# All
from zipfile import ZipFile
with ZipFile(os.path.join(build, 'all.zip'), 'w') as zipObj:
# Iterate over all the files in directory
folders = ['unstructured', 'raw', 'structured']
for f in folders:
for folderName, _, filenames in os.walk(os.path.join(build, f)):
for filename in filenames:
if not filename.endswith('.zip'):
filePath = os.path.join(folderName, filename)
zipObj.write(filePath)
def main(args):
console = Console(record=True)
run(console,
build=args.build,
title=args.title,
force=args.f)
def parse_args(parser):
args = parser.parse_args()
# Check path
return args
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Normalize any databse of arbitrarily nested documents.')
parser.add_argument('--build', type=str, default="./build/echr_database/")
parser.add_argument('--title', type=str)
parser.add_argument('--schema_hints', type=str)
parser.add_argument('--output_prefix', type=str)
parser.add_argument('-f', action='store_true')
parser.add_argument('-u', action='store_true')
args = parse_args(parser)
main(args)
| [
"copy.deepcopy",
"numpy.arange",
"os.listdir",
"genson.SchemaBuilder",
"argparse.ArgumentParser",
"sys.getsizeof",
"flatdict.FlatterDict",
"numpy.concatenate",
"pandas.DataFrame",
"collections.OrderedDict",
"functools.reduce",
"rich.console.Console",
"pandas.get_dummies",
"numpy.bincount",... | [((550, 561), 'echr.utils.logger.getlogger', 'getlogger', ([], {}), '()\n', (559, 561), False, 'from echr.utils.logger import getlogger\n'), ((575, 595), 'rich.console.Console', 'Console', ([], {'record': '(True)'}), '(record=True)\n', (582, 595), False, 'from rich.console import Console\n'), ((630, 697), 'collections.OrderedDict', 'OrderedDict', (["[('number', float), ('integer', int), ('string', str)]"], {}), "([('number', float), ('integer', int), ('string', str)])\n", (641, 697), False, 'from collections import OrderedDict\n'), ((2576, 2613), 'functools.reduce', 'reduce', (['operator.getitem', 'items', 'root'], {}), '(operator.getitem, items, root)\n', (2582, 2613), False, 'from functools import reduce\n'), ((2747, 2762), 'genson.SchemaBuilder', 'SchemaBuilder', ([], {}), '()\n', (2760, 2762), False, 'from genson import SchemaBuilder\n'), ((7197, 7217), 'pandas.DataFrame', 'pd.DataFrame', (['flat_X'], {}), '(flat_X)\n', (7209, 7217), True, 'import pandas as pd\n'), ((8690, 8723), 'pandas.concat', 'pd.concat', (['([df] + encoded)'], {'axis': '(1)'}), '([df] + encoded, axis=1)\n', (8699, 8723), True, 'import pandas as pd\n'), ((9259, 9311), 'os.path.join', 'os.path.join', (['build', '"""raw"""', '"""preprocessed_documents"""'], {}), "(build, 'raw', 'preprocessed_documents')\n", (9271, 9311), False, 'import os\n'), ((10693, 10726), 'os.path.join', 'os.path.join', (['build', '"""structured"""'], {}), "(build, 'structured')\n", (10705, 10726), False, 'import os\n'), ((13933, 13953), 'rich.console.Console', 'Console', ([], {'record': '(True)'}), '(record=True)\n', (13940, 13953), False, 'from rich.console import Console\n'), ((14178, 14276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Normalize any databse of arbitrarily nested documents."""'}), "(description=\n 'Normalize any databse of arbitrarily nested documents.')\n", (14201, 14276), False, 'import argparse\n'), ((3277, 3315), 'flatdict.FlatterDict', 'flatdict.FlatterDict', (['x'], {'delimiter': '"""."""'}), "(x, delimiter='.')\n", (3297, 3315), False, 'import flatdict\n'), ((4234, 4278), 'flatdict.FlatterDict', 'flatdict.FlatterDict', (['x'], {'delimiter': 'DELIMITER'}), '(x, delimiter=DELIMITER)\n', (4254, 4278), False, 'import flatdict\n'), ((4293, 4309), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (4306, 4309), False, 'import copy\n'), ((6302, 6319), 'numpy.concatenate', 'np.concatenate', (['v'], {}), '(v)\n', (6316, 6319), True, 'import numpy as np\n'), ((6731, 6782), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'prefix_sep': '"""="""', 'columns': 'columns'}), "(df, prefix_sep='=', columns=columns)\n", (6745, 6782), True, 'import pandas as pd\n'), ((8949, 8985), 'rich.markdown.Markdown', 'Markdown', (['"""- **Step configuration**"""'], {}), "('- **Step configuration**')\n", (8957, 8985), False, 'from rich.markdown import Markdown\n'), ((9202, 9238), 'rich.markdown.Markdown', 'Markdown', (['"""- **Normalize database**"""'], {}), "('- **Normalize database**')\n", (9210, 9238), False, 'from rich.markdown import Markdown\n'), ((9331, 9360), 'os.path.join', 'os.path.join', (['input_folder', 'f'], {}), '(input_folder, f)\n', (9343, 9360), False, 'import os\n'), ((10812, 10852), 'json.dump', 'json.dump', (['flat_cases', 'outfile'], {'indent': '(4)'}), '(flat_cases, outfile, indent=4)\n', (10821, 10852), False, 'import json\n'), ((10940, 10982), 'json.dump', 'json.dump', (['schema_hints', 'outfile'], {'indent': '(4)'}), '(schema_hints, outfile, indent=4)\n', (10949, 10982), False, 'import json\n'), ((11674, 11718), 'os.path.join', 'os.path.join', (['output_path', '"""flat_cases.json"""'], {}), "(output_path, 'flat_cases.json')\n", (11686, 11718), False, 'import os\n'), ((11734, 11785), 'os.path.join', 'os.path.join', (['output_path', '"""cases_flat_schema.json"""'], {}), "(output_path, 'cases_flat_schema.json')\n", (11746, 11785), False, 'import os\n'), ((11801, 11858), 'os.path.join', 'os.path.join', (['output_path', '"""cases_flat_type_mapping.json"""'], {}), "(output_path, 'cases_flat_type_mapping.json')\n", (11813, 11858), False, 'import os\n'), ((12126, 12170), 'json.dump', 'json.dump', (['matrice_appnos', 'outfile'], {'indent': '(4)'}), '(matrice_appnos, outfile, indent=4)\n', (12135, 12170), False, 'import json\n'), ((12414, 12455), 'json.dump', 'json.dump', (['matrice_scl', 'outfile'], {'indent': '(4)'}), '(matrice_scl, outfile, indent=4)\n', (12423, 12455), False, 'import json\n'), ((12765, 12816), 'json.dump', 'json.dump', (['matrice_representedby', 'outfile'], {'indent': '(4)'}), '(matrice_representedby, outfile, indent=4)\n', (12774, 12816), False, 'import json\n'), ((13120, 13171), 'json.dump', 'json.dump', (['matrice_decision_body', 'outfile'], {'indent': '(4)'}), '(matrice_decision_body, outfile, indent=4)\n', (13129, 13171), False, 'import json\n'), ((13258, 13297), 'os.path.join', 'os.path.join', (['build', '"""raw"""', '"""judgments"""'], {}), "(build, 'raw', 'judgments')\n", (13270, 13297), False, 'import os\n'), ((13330, 13369), 'os.path.join', 'os.path.join', (['build', '"""raw"""', '"""judgments"""'], {}), "(build, 'raw', 'judgments')\n", (13342, 13369), False, 'import os\n'), ((1010, 1022), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1019, 1022), False, 'import json\n'), ((6355, 6367), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6364, 6367), True, 'import numpy as np\n'), ((9146, 9168), 'os.path.join', 'os.path.join', (['build', 'p'], {}), '(build, p)\n', (9158, 9168), False, 'import os\n'), ((9370, 9391), 'os.listdir', 'listdir', (['input_folder'], {}), '(input_folder)\n', (9377, 9391), False, 'from os import listdir\n'), ((9566, 9615), 'os.path.join', 'os.path.join', (['build', '"""unstructured"""', '"""cases.json"""'], {}), "(build, 'unstructured', 'cases.json')\n", (9578, 9615), False, 'import os\n'), ((10741, 10785), 'os.path.join', 'os.path.join', (['output_path', '"""flat_cases.json"""'], {}), "(output_path, 'flat_cases.json')\n", (10753, 10785), False, 'import os\n'), ((10868, 10913), 'os.path.join', 'os.path.join', (['output_path', '"""schema_hint.json"""'], {}), "(output_path, 'schema_hint.json')\n", (10880, 10913), False, 'import os\n'), ((11624, 11658), 'json.dump', 'json.dump', (['f[1]', 'outfile'], {'indent': '(4)'}), '(f[1], outfile, indent=4)\n', (11633, 11658), False, 'import json\n'), ((12051, 12099), 'os.path.join', 'os.path.join', (['output_path', '"""matrice_appnos.json"""'], {}), "(output_path, 'matrice_appnos.json')\n", (12063, 12099), False, 'import os\n'), ((12342, 12387), 'os.path.join', 'os.path.join', (['output_path', '"""matrice_scl.json"""'], {}), "(output_path, 'matrice_scl.json')\n", (12354, 12387), False, 'import os\n'), ((12681, 12738), 'os.path.join', 'os.path.join', (['output_path', '"""matrice_representatives.json"""'], {}), "(output_path, 'matrice_representatives.json')\n", (12693, 12738), False, 'import os\n'), ((13038, 13093), 'os.path.join', 'os.path.join', (['output_path', '"""matrice_decision_body.json"""'], {}), "(output_path, 'matrice_decision_body.json')\n", (13050, 13093), False, 'import os\n'), ((13431, 13461), 'os.path.join', 'os.path.join', (['build', '"""all.zip"""'], {}), "(build, 'all.zip')\n", (13443, 13461), False, 'import os\n'), ((6044, 6090), 'flatdict.FlatterDict', 'flatdict.FlatterDict', (['c_x'], {'delimiter': 'DELIMITER'}), '(c_x, delimiter=DELIMITER)\n', (6064, 6090), False, 'import flatdict\n'), ((6415, 6454), 'numpy.bincount', 'np.bincount', (['(i * m + f)'], {'minlength': '(n * m)'}), '(i * m + f, minlength=n * m)\n', (6426, 6454), True, 'import numpy as np\n'), ((8590, 8605), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (8602, 8605), True, 'import pandas as pd\n'), ((9769, 9789), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (9778, 9789), False, 'import json\n'), ((9806, 9840), 'json.dump', 'json.dump', (['data', 'outfile'], {'indent': '(4)'}), '(data, outfile, indent=4)\n', (9815, 9840), False, 'import json\n'), ((13663, 13685), 'os.path.join', 'os.path.join', (['build', 'f'], {}), '(build, f)\n', (13675, 13685), False, 'import os\n'), ((8451, 8466), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (8463, 8466), True, 'import pandas as pd\n'), ((9421, 9450), 'os.path.join', 'os.path.join', (['input_folder', 'f'], {}), '(input_folder, f)\n', (9433, 9450), False, 'import os\n'), ((10185, 10210), 'sys.getsizeof', 'sys.getsizeof', (['flat_cases'], {}), '(flat_cases)\n', (10198, 10210), False, 'import sys\n'), ((13820, 13854), 'os.path.join', 'os.path.join', (['folderName', 'filename'], {}), '(folderName, filename)\n', (13832, 13854), False, 'import os\n')] |
from math import pi, cos, sin
from numpy.random.mtrand import uniform
from pydesim import Model
from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, \
Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue
from pycsmaca.simulations.modules.app_layer import ControlledSource
from pycsmaca.simulations.modules.station import Station
class _HalfDuplexNetworkBase(Model):
def __init__(self, sim):
super().__init__(sim)
if sim.params.num_stations < 2:
raise ValueError('minimum number of stations in network is 2')
# Building connection manager:
self.__conn_manager = ConnectionManager(sim)
self.__stations = []
conn_radius = sim.params.connection_radius
for i in range(sim.params.num_stations):
# Building elementary components:
source = self.create_source(i)
max_propagation = conn_radius / sim.params.speed_of_light
transmitter = Transmitter(sim, max_propagation=max_propagation)
receiver = Receiver(sim)
queue = self.create_queue(i, source=source)
radio = Radio(
sim, self.__conn_manager,
connection_radius=conn_radius,
position=self.get_position(i)
)
# Building wireless interfaces:
iface = WirelessInterface(sim, i + 1, queue, transmitter,
receiver, radio)
# Building station:
sta = Station(sim, source=source, interfaces=[iface])
self.__stations.append(sta)
# Writing switching table:
self.write_switch_table(i)
# Adding stations as children:
self.children['stations'] = self.__stations
@property
def destination_address(self):
raise NotImplementedError
def create_source(self, index):
raise NotImplementedError
def create_queue(self, index, source=None):
return Queue(self.sim)
def get_position(self, index):
raise NotImplementedError
def write_switch_table(self, index):
raise NotImplementedError
@property
def stations(self):
return self.__stations
@property
def connection_manager(self):
return self.__conn_manager
@property
def num_stations(self):
return len(self.stations)
def get_iface(self, index):
if index < self.num_stations:
return self.stations[index].interfaces[-1]
raise ValueError(f'station index {index} out of bounds')
@property
def clients(self):
raise NotImplementedError
@property
def server(self):
return NotImplementedError
def __str__(self):
return 'Network'
# noinspection PyTypeChecker
def describe_topology(self):
def str_sid(c):
return c.source.source_id if c.source else '<NONE>'
def str_peers(iface):
_peers = self.connection_manager.get_peers(iface.radio)
return ", ".join([str(peer.parent.address) for peer in _peers])
def str_ifaces(c):
_prefix = "\n\t\t\t"
return _prefix + _prefix.join([
f'[addr:{iface.address}], sends to: {str_peers(iface)}'
for i, iface in enumerate(c.interfaces)
])
def str_sw_table(c):
d = c.switch.table.as_dict()
if not d:
return 'EMPTY'
return "\n\t\t\t" + "\n\t\t\t".join([
f'{key} via {val[1]} (interface "{val[0]}")'
for key, val in d.items()
])
s1 = 'NETWORK TOPOLOGY'
s2 = f'- num stations: {self.num_stations}'
s3 = '- clients:\n\t' + '\n\t'.join([
f'{i}: SID={str_sid(cli)}\n\t\t- interfaces: {str_ifaces(cli)}'
f'\n\t\t- switching table:{str_sw_table(cli)}'
for i, cli in enumerate(self.clients)
])
s4 = f'- server:\n\t\t- interfaces: {str_ifaces(self.server)}'
return '\n'.join([s1, s2, s3, s4])
# noinspection PyUnresolvedReferences
def get_stats(self):
from collections import namedtuple
client_fields = [
'index', 'service_time', 'num_retries', 'queue_size', 'tx_busy',
'rx_busy', 'arrival_intervals', 'num_packets_sent', 'delay', 'sid',
'num_rx_collided', 'num_rx_success',
]
server_fields = [
'arrival_intervals', 'num_rx_collided', 'num_rx_success',
'num_packets_received',
]
client_class = namedtuple('Client', client_fields)
server_class = namedtuple('Server', server_fields)
_client_sources = [cli.source for cli in self.clients]
_client_ifaces = [cli.interfaces[0] for cli in self.clients]
_srv = self.server
clients = [
client_class(
index=i,
service_time=iface.transmitter.service_time.mean(),
num_retries=iface.transmitter.num_retries_vector.mean(),
queue_size=iface.queue.size_trace.timeavg(),
tx_busy=iface.transmitter.busy_trace.timeavg(),
rx_busy=iface.receiver.busy_trace.timeavg(),
arrival_intervals=(
src.arrival_intervals.statistic().mean() if src else None),
num_packets_sent=iface.transmitter.num_sent,
delay=(
_srv.sink.source_delays.get(src.source_id).mean()
if src else None),
sid=(src.source_id if src else None),
num_rx_collided=iface.receiver.num_collisions,
num_rx_success=iface.receiver.num_received,
) for i, (src, iface) in enumerate(
zip(_client_sources, _client_ifaces))
]
server = server_class(
arrival_intervals=_srv.sink.arrival_intervals.statistic().mean(),
num_rx_collided=_srv.interfaces[0].receiver.num_collisions,
num_rx_success=_srv.interfaces[0].receiver.num_received,
num_packets_received=_srv.sink.num_packets_received,
)
return (client_fields, clients), (server_fields, server)
class WirelessHalfDuplexLineNetwork(_HalfDuplexNetworkBase):
def __init__(self, sim):
super().__init__(sim)
def create_source(self, index):
if index in self.sim.params.active_sources:
return RandomSource(
self.sim,
self.sim.params.payload_size,
self.sim.params.source_interval,
source_id=index,
dest_addr=self.destination_address
)
return None
@property
def destination_address(self):
return self.sim.params.num_stations
def get_position(self, index):
return index * self.sim.params.distance, 0
def write_switch_table(self, index):
if index < self.sim.params.num_stations - 1:
sta = self.stations[index]
iface = sta.interfaces[0]
switch_conn = sta.get_switch_connection_for(iface)
sta.switch.table.add(
self.destination_address,
switch_conn.name,
iface.address + 1
)
@property
def clients(self):
return self.stations[:-1]
@property
def server(self):
return self.stations[-1]
class CollisionDomainNetwork(_HalfDuplexNetworkBase):
def __init__(self, sim):
super().__init__(sim)
@property
def destination_address(self):
return 1
def create_source(self, index):
if index > 0:
return RandomSource(
self.sim, self.sim.params.payload_size,
self.sim.params.source_interval,
source_id=index, dest_addr=self.destination_address
)
return None
def get_position(self, index):
area_radius = self.sim.params.connection_radius / 2.1
distance, angle = uniform(0.1, 1) * area_radius, uniform(0, 2 * pi)
position = (distance * cos(angle), distance * sin(angle))
return position
def write_switch_table(self, index):
if index > 0:
sta = self.stations[index]
iface = sta.interfaces[0]
switch_conn = sta.get_switch_connection_for(iface)
sta.switch.table.add(
self.destination_address,
switch_conn.name,
self.destination_address
)
@property
def clients(self):
return tuple(self.stations[1:])
@property
def server(self):
return self.stations[0]
class CollisionDomainSaturatedNetwork(CollisionDomainNetwork):
def __init__(self, sim):
super().__init__(sim)
def create_source(self, index):
if index > 0:
return ControlledSource(
self.sim, self.sim.params.payload_size,
source_id=index, dest_addr=self.destination_address
)
return None
def create_queue(self, index, source=None):
if index > 0:
return SaturatedQueue(self.sim, source=source)
return Queue(self.sim)
| [
"pycsmaca.simulations.modules.Transmitter",
"pycsmaca.simulations.modules.SaturatedQueue",
"collections.namedtuple",
"pycsmaca.simulations.modules.station.Station",
"pycsmaca.simulations.modules.RandomSource",
"pycsmaca.simulations.modules.ConnectionManager",
"math.cos",
"pycsmaca.simulations.modules.... | [((658, 680), 'pycsmaca.simulations.modules.ConnectionManager', 'ConnectionManager', (['sim'], {}), '(sim)\n', (675, 680), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((2015, 2030), 'pycsmaca.simulations.modules.Queue', 'Queue', (['self.sim'], {}), '(self.sim)\n', (2020, 2030), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((4621, 4656), 'collections.namedtuple', 'namedtuple', (['"""Client"""', 'client_fields'], {}), "('Client', client_fields)\n", (4631, 4656), False, 'from collections import namedtuple\n'), ((4680, 4715), 'collections.namedtuple', 'namedtuple', (['"""Server"""', 'server_fields'], {}), "('Server', server_fields)\n", (4690, 4715), False, 'from collections import namedtuple\n'), ((9247, 9262), 'pycsmaca.simulations.modules.Queue', 'Queue', (['self.sim'], {}), '(self.sim)\n', (9252, 9262), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((997, 1046), 'pycsmaca.simulations.modules.Transmitter', 'Transmitter', (['sim'], {'max_propagation': 'max_propagation'}), '(sim, max_propagation=max_propagation)\n', (1008, 1046), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((1070, 1083), 'pycsmaca.simulations.modules.Receiver', 'Receiver', (['sim'], {}), '(sim)\n', (1078, 1083), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((1381, 1447), 'pycsmaca.simulations.modules.WirelessInterface', 'WirelessInterface', (['sim', '(i + 1)', 'queue', 'transmitter', 'receiver', 'radio'], {}), '(sim, i + 1, queue, transmitter, receiver, radio)\n', (1398, 1447), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((1537, 1584), 'pycsmaca.simulations.modules.station.Station', 'Station', (['sim'], {'source': 'source', 'interfaces': '[iface]'}), '(sim, source=source, interfaces=[iface])\n', (1544, 1584), False, 'from pycsmaca.simulations.modules.station import Station\n'), ((6494, 6637), 'pycsmaca.simulations.modules.RandomSource', 'RandomSource', (['self.sim', 'self.sim.params.payload_size', 'self.sim.params.source_interval'], {'source_id': 'index', 'dest_addr': 'self.destination_address'}), '(self.sim, self.sim.params.payload_size, self.sim.params.\n source_interval, source_id=index, dest_addr=self.destination_address)\n', (6506, 6637), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((7723, 7866), 'pycsmaca.simulations.modules.RandomSource', 'RandomSource', (['self.sim', 'self.sim.params.payload_size', 'self.sim.params.source_interval'], {'source_id': 'index', 'dest_addr': 'self.destination_address'}), '(self.sim, self.sim.params.payload_size, self.sim.params.\n source_interval, source_id=index, dest_addr=self.destination_address)\n', (7735, 7866), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((8099, 8117), 'numpy.random.mtrand.uniform', 'uniform', (['(0)', '(2 * pi)'], {}), '(0, 2 * pi)\n', (8106, 8117), False, 'from numpy.random.mtrand import uniform\n'), ((8926, 9039), 'pycsmaca.simulations.modules.app_layer.ControlledSource', 'ControlledSource', (['self.sim', 'self.sim.params.payload_size'], {'source_id': 'index', 'dest_addr': 'self.destination_address'}), '(self.sim, self.sim.params.payload_size, source_id=index,\n dest_addr=self.destination_address)\n', (8942, 9039), False, 'from pycsmaca.simulations.modules.app_layer import ControlledSource\n'), ((9192, 9231), 'pycsmaca.simulations.modules.SaturatedQueue', 'SaturatedQueue', (['self.sim'], {'source': 'source'}), '(self.sim, source=source)\n', (9206, 9231), False, 'from pycsmaca.simulations.modules import RandomSource, Queue, Transmitter, Receiver, Radio, ConnectionManager, WirelessInterface, SaturatedQueue\n'), ((8068, 8083), 'numpy.random.mtrand.uniform', 'uniform', (['(0.1)', '(1)'], {}), '(0.1, 1)\n', (8075, 8083), False, 'from numpy.random.mtrand import uniform\n'), ((8149, 8159), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (8152, 8159), False, 'from math import pi, cos, sin\n'), ((8172, 8182), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (8175, 8182), False, 'from math import pi, cos, sin\n')] |
#!/usr/bin/env python
# coding=utf-8
"""
Ant Group
Copyright (c) 2004-2020 All Rights Reserved.
------------------------------------------------------
File Name : NN
Author : <NAME>
Email: <EMAIL>
Create Time : 2020-09-11 14:29
Description : description what the main function of this file
"""
from stensorflow.ml.nn.layers.layer import Layer
from stensorflow.basic.basic_class.pair import SharedVariablePair
from stensorflow.basic.basic_class.private import PrivateVariable
from stensorflow.ml.nn.layers.input import Input
import tensorflow as tf
import time
import numpy as np
class NN:
"""
The class of neural network.
"""
def __init__(self):
self.layers = []
def addLayer(self, ly: Layer):
# 逐层添加
# if fathers is not None:
# l.fathers = fathers
for father in ly.fathers:
if father not in self.layers:
raise Exception("must add its fathers befor add it to network")
self.layers += [ly]
def compile(self):
# 编译模型
for ly in self.layers:
for father in ly.fathers:
if isinstance(father, Layer):
father.add_child(ly)
else:
raise Exception("father must be a layer")
l_last = self.layers[-1]
assert isinstance(l_last, Layer)
l_last.forward()
for ly in self.layers:
if isinstance(ly, Input):
ly.backward()
def get_train_sgd_op(self, learningRate, l2_regularization, momentum=0.0):
"""
Construct a new Stochastic Gradient Descent
"""
train_ops = []
for ly in self.layers:
if isinstance(ly, Layer):
for i in range(len(ly.w)):
wi = ly.w[i]
assert isinstance(wi, SharedVariablePair) or isinstance(wi, PrivateVariable)
ploss_pwi = ly.ploss_pw[i] + l2_regularization * wi
if momentum > 0.0:
v = SharedVariablePair(ownerL=ploss_pwi.ownerL, ownerR=ploss_pwi.ownerR, shape=ploss_pwi.zeros_like())
v.load_from_numpy(np.zeros(shape=ploss_pwi.shape))
v_new = momentum * v + ploss_pwi
v_up_op = v.assign(v_new)
assign_op = wi.assign(wi - learningRate * v_new)
train_ops += [v_up_op, assign_op]
else:
assign_op = wi.assign(wi - learningRate * ploss_pwi)
train_ops += [assign_op]
return tf.group(train_ops)
def train_sgd(self, learning_rate, batch_num, l2_regularization, sess, momentum=0.0):
learning_rate_list = None
# 如果传入学习率list,转换为Tensor
if isinstance(learning_rate, list):
learning_rate_list = learning_rate
self.learning_rate = tf.compat.v1.placeholder(dtype='float64', shape=[])
else:
self.learning_rate = learning_rate
train_op = self.get_train_sgd_op(self.learning_rate, l2_regularization, momentum)
sess.run(tf.compat.v1.global_variables_initializer())
start_time = time.time()
if learning_rate_list is not None:
for i in range(batch_num):
# 每次传入不同的学习率进行梯度下降
print("batch ", i)
sess.run(train_op, feed_dict={self.learning_rate: learning_rate_list[i]})
if i%10==0:
print("time=", time.time()-start_time)
else:
for i in range(batch_num):
print("batch ", i)
sess.run(train_op)
if i % 10 == 0:
print("time=", time.time() - start_time)
# 输出某层结果的测试代码
# print(self.layers[3])
# tmp = sess.run([train_op, self.layers[3].y.to_tf_tensor("R")])
# print(tmp[1])
def cut_off(self):
for ly in self.layers:
assert isinstance(ly, Layer)
ly.cut_off()
def predict(self, x):
raise NotImplementedError
| [
"tensorflow.compat.v1.placeholder",
"tensorflow.group",
"numpy.zeros",
"time.time",
"tensorflow.compat.v1.global_variables_initializer"
] | [((2634, 2653), 'tensorflow.group', 'tf.group', (['train_ops'], {}), '(train_ops)\n', (2642, 2653), True, 'import tensorflow as tf\n'), ((3223, 3234), 'time.time', 'time.time', ([], {}), '()\n', (3232, 3234), False, 'import time\n'), ((2935, 2986), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': '"""float64"""', 'shape': '[]'}), "(dtype='float64', shape=[])\n", (2959, 2986), True, 'import tensorflow as tf\n'), ((3157, 3200), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (3198, 3200), True, 'import tensorflow as tf\n'), ((2196, 2227), 'numpy.zeros', 'np.zeros', ([], {'shape': 'ploss_pwi.shape'}), '(shape=ploss_pwi.shape)\n', (2204, 2227), True, 'import numpy as np\n'), ((3540, 3551), 'time.time', 'time.time', ([], {}), '()\n', (3549, 3551), False, 'import time\n'), ((3754, 3765), 'time.time', 'time.time', ([], {}), '()\n', (3763, 3765), False, 'import time\n')] |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import math
import re
import os
import numpy
from ordered_set import OrderedSet
import matplotlib.pyplot as plt
import matplotlib.text as mpl_text
import matplotlib.collections
import matplotlib.patches
from matplotlib.widgets import Slider
from mpl_toolkits import axes_grid1
from armi import runLog
from armi.reactor.flags import Flags
from armi.reactor import grids
def colorGenerator(skippedColors=10):
"""
Selects a color from the built-in wx color database.
Parameters
----------
skippedColors: int
Number of colors to skip in the built-in wx color database when generating the next color. Without skipping
colors the next color may be similar to the previous color.
Notes
-----
Will cycle indefinitely to accommodate large cores. Colors will repeat.
"""
from wx.lib.colourdb import getColourList
excludedColors = ["WHITE", "CREAM", "BLACK", "MINTCREAM"]
colors = getColourList()
for start in itertools.cycle(range(20, 20 + skippedColors)):
for i in range(start, len(colors), skippedColors):
if colors[i] not in excludedColors:
yield colors[i]
def plotBlockDepthMap(
core,
param="pdens",
fName=None,
bare=False,
cmapName="jet",
labels=(),
labelFmt="{0:.3f}",
legendMap=None,
fontSize=None,
minScale=None,
maxScale=None,
axisEqual=False,
makeColorBar=False,
cBarLabel="",
title="",
shuffleArrows=False,
titleSize=25,
depthIndex=0,
):
"""
Plot a param distribution in xy space with the ability to page through depth.
Notes
-----
This is useful for visualizing the spatial distribution of a param through the core.
Blocks could possibly not be in alignment between assemblies, but the depths
viewable are based on the first fuel assembly.
Parameters
----------
The kwarg definitions are the same as those of ``plotFaceMap``.
depthIndex: int
The the index of the elevation to show block params.
The index is determined by the index of the blocks in the first fuel assembly.
"""
fuelAssem = core.getFirstAssembly(typeSpec=Flags.FUEL)
if not fuelAssem:
raise ValueError(
"Could not find fuel assembly. "
"This method uses the first fuel blocks mesh for the axial mesh of the plot. "
"Cannot proceed without fuel block."
)
# block mid point elevation
elevations = [elev for _b, elev in fuelAssem.getBlocksAndZ()]
data = []
for elevation in elevations:
paramValsAtElevation = []
for a in core:
paramValsAtElevation.append(a.getBlockAtElevation(elevation).p[param])
data.append(paramValsAtElevation)
data = numpy.array(data)
fig = plt.figure(figsize=(12, 12), dpi=100)
# Make these now, so they are still referenceable after plotFaceMap.
patches = _makeAssemPatches(core)
collection = matplotlib.collections.PatchCollection(
patches, cmap=cmapName, alpha=1.0
)
texts = []
plotFaceMap(
core,
param=param,
vals="peak",
data=None, # max values so legend is set correctly
bare=bare,
cmapName=cmapName,
labels=labels,
labelFmt=labelFmt,
legendMap=legendMap,
fontSize=fontSize,
minScale=minScale,
maxScale=maxScale,
axisEqual=axisEqual,
makeColorBar=makeColorBar,
cBarLabel=cBarLabel,
title=title,
shuffleArrows=shuffleArrows,
titleSize=titleSize,
referencesToKeep=[patches, collection, texts],
)
# make space for the slider
fig.subplots_adjust(bottom=0.15)
ax_slider = fig.add_axes([0.1, 0.05, 0.8, 0.04])
# This controls what the slider does.
def update(i):
# int, since we are indexing an array.
i = int(i)
collection.set_array(data[i, :])
for valToPrint, text in zip(data[i, :], texts):
text.set_text(labelFmt.format(valToPrint))
# Slider doesn't seem to work unless assigned to variable
_slider = DepthSlider(
ax_slider, "Depth(cm)", elevations, update, "green", valInit=depthIndex
)
if fName:
plt.savefig(fName, dpi=150)
else:
plt.show()
plt.close()
return fName
def plotFaceMap(
core,
param="pdens",
vals="peak",
data=None,
fName=None,
bare=False,
cmapName="jet",
labels=(),
labelFmt="{0:.3f}",
legendMap=None,
fontSize=None,
minScale=None,
maxScale=None,
axisEqual=False,
makeColorBar=False,
cBarLabel="",
title="",
shuffleArrows=False,
titleSize=25,
referencesToKeep=None,
):
"""
Plot a face map of the core.
Parameters
----------
core: Core
The core to plot.
param : str, optional
The block-parameter to plot. Default: pdens
vals : ['peak', 'average', 'sum'], optional
the type of vals to produce. Will find peak, average, or sum of block values
in an assembly. Default: peak
data : list(numeric)
rather than using param and vals, use the data supplied as is. It must be in the same order as iter(r).
fName : str, optional
File name to create. If none, will show on screen.
bare : bool, optional
If True, will skip axis labels, etc.
cmapName : str
The name of the matplotlib colormap to use. Default: jet
Other possibilities: http://matplotlib.org/examples/pylab_examples/show_colormaps.html
labels : iterable(str), optional
Data labels corresponding to data values.
labelFmt : str, optional
A format string that determines how the data is printed if ``labels`` is not provided.
fontSize : int, optional
Font size in points
minScale : float, optional
The minimum value for the low color on your colormap (to set scale yourself)
Default: autoscale
maxScale : float, optional
The maximum value for the high color on your colormap (to set scale yourself)
Default: autoscale
axisEqual : Boolean, optional
If True, horizontal and vertical axes are scaled equally such that a circle
appears as a circle rather than an ellipse.
If False, this scaling constraint is not imposed.
makeColorBar : Boolean, optional
If True, a vertical color bar is added on the right-hand side of the plot.
If False, no color bar is added.
cBarLabel : String, optional
If True, this string is the color bar quantity label.
If False, the color bar will have no label.
When makeColorBar=False, cBarLabel affects nothing.
title : String, optional
If True, the string is added as the plot title.
If False, no plot title is added.
shuffleArrows : list, optional
Adds arrows indicating fuel shuffling maneuvers
plotPartsToUpdate : list, optional
Send references to the parts of the plot such as patches, collections
and texts to be changed by another plot utility.
Examples
--------
Plotting a BOL assembly type facemap with a legend:
>>> plotFaceMap(core, param='typeNumAssem', cmapName='RdYlBu')
"""
if referencesToKeep:
patches, collection, texts = referencesToKeep
else:
plt.figure(figsize=(12, 12), dpi=100)
# set patch (shapes such as hexagon) heat map values
patches = _makeAssemPatches(core)
collection = matplotlib.collections.PatchCollection(
patches, cmap=cmapName, alpha=1.0
)
texts = []
ax = plt.gca()
plt.title(title, size=titleSize)
# get param vals
if data is None:
data = []
for a in core:
if vals == "peak":
data.append(a.getMaxParam(param))
elif vals == "average":
data.append(a.calcAvgParam(param))
elif vals == "sum":
data.append(a.calcTotalParam(param))
else:
raise ValueError(
"{0} is an invalid entry for `vals` in plotFaceMap. Use peak, average, or sum.".format(
vals
)
)
if not labels:
labels = [None] * len(data)
if len(data) != len(labels):
raise ValueError(
"Data had length {}, but lables had length {}. "
"They should be equal length.".format(len(data), len(labels))
)
collection.set_array(numpy.array(data))
if minScale or maxScale:
collection.set_clim([minScale, maxScale])
ax.add_collection(collection)
collection.norm.autoscale(numpy.array(data))
# Makes text in the center of each shape displaying the values.
_setPlotValText(ax, texts, core, data, labels, labelFmt, fontSize)
if makeColorBar: # allow a color bar option
collection2 = matplotlib.collections.PatchCollection(
patches, cmap=cmapName, alpha=1.0
)
collection2.set_array(numpy.array(data))
if "radial" in cBarLabel:
colbar = plt.colorbar(collection2, ticks=[x + 1 for x in range(max(data))])
else:
colbar = plt.colorbar(collection2)
colbar.set_label(cBarLabel, size=20)
colbar.ax.tick_params(labelsize=16)
if legendMap is not None:
legend = _createFaceMapLegend(
legendMap, matplotlib.cm.get_cmap(cmapName), collection.norm
)
else:
legend = None
if axisEqual: # don't "squish" patches vertically or horizontally
ax.set_aspect("equal", "datalim")
ax.autoscale_view(tight=True)
# make it 2-D, for now...
shuffleArrows = shuffleArrows or []
for (sourceCoords, destinationCoords) in shuffleArrows:
ax.annotate(
"",
xy=destinationCoords[:2],
xytext=sourceCoords[:2],
arrowprops={"arrowstyle": "->", "color": "white"},
)
if bare:
ax.set_xticks([])
ax.set_yticks([])
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
else:
plt.xlabel("x (cm)")
plt.ylabel("y (cm)")
if fName:
if legend:
# expand so the legend fits if necessary
pltKwargs = {"bbox_extra_artists": (legend,), "bbox_inches": "tight"}
else:
pltKwargs = {}
try:
plt.savefig(fName, dpi=150, **pltKwargs)
except IOError:
runLog.warning(
"Cannot update facemap at {0}: IOError. Is the file open?"
"".format(fName)
)
elif referencesToKeep:
# Don't show yet, since it will be updated.
return fName
else:
plt.show()
plt.close()
return fName
def _makeAssemPatches(core):
"""Return a list of assembly shaped patch for each assembly."""
patches = []
if isinstance(core.spatialGrid, grids.HexGrid):
nSides = 6
elif isinstance(core.spatialGrid, grids.ThetaRZGrid):
raise ValueError(
"This plot function is not currently supported for ThetaRZGrid grids."
)
else:
nSides = 4
pitch = core.getAssemblyPitch()
for a in core:
x, y = a.getLocationObject().coords(pitch)
if nSides == 6:
assemPatch = matplotlib.patches.RegularPolygon(
(x, y), nSides, pitch / math.sqrt(3), orientation=math.pi / 2.0
)
elif nSides == 4:
# for rectangle x, y is defined as sides instead of center
assemPatch = matplotlib.patches.Rectangle(
(x - pitch[0] / 2, y - pitch[1] / 2), *pitch
)
else:
raise ValueError(f"Unexpected number of sides: {nSides}.")
patches.append(assemPatch)
return patches
def _setPlotValText(ax, texts, core, data, labels, labelFmt, fontSize):
"""Write param values down, and return text so it can be edited later."""
pitch = core.getAssemblyPitch()
for a, val, label in zip(core, data, labels):
x, y = a.getLocationObject().coords(pitch)
# Write text on top of patch locations.
if label is None and labelFmt is not None:
# Write the value
labelText = labelFmt.format(val)
text = ax.text(
x, y, labelText, zorder=1, ha="center", va="center", fontsize=fontSize
)
elif label is not None:
text = ax.text(
x, y, label, zorder=1, ha="center", va="center", fontsize=fontSize
)
else:
# labelFmt was none, so they don't want any text plotted
continue
texts.append(text)
def _createFaceMapLegend(legendMap, cmap, norm):
"""Make special assembly-legend for the assembly face map plot with assembly counts."""
class AssemblyLegend(object):
"""
Custom Legend artist handler.
Matplotlib allows you to define a class that implements ``legend_artist`` to give you
full control over how the legend keys and labels are drawn. This is done here to get
Hexagons with Letters in them on the legend, which is not a built-in legend option.
See: http://matplotlib.org/users/legend_guide.html#implementing-a-custom-legend-handler
"""
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
letter, index = orig_handle
x0, y0 = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
x = x0 + width / 2.0
y = y0 + height / 2.0
normVal = norm(index)
colorRgb = cmap(normVal)
patch = matplotlib.patches.RegularPolygon(
(x, y),
6,
height,
orientation=math.pi / 2.0,
facecolor=colorRgb,
transform=handlebox.get_transform(),
)
handlebox.add_artist(patch)
txt = mpl_text.Text(x=x, y=y, text=letter, ha="center", va="center", size=7)
handlebox.add_artist(txt)
return (patch, txt)
ax = plt.gca()
keys = []
labels = []
for value, label, description in legendMap:
keys.append((label, value))
labels.append(description)
legend = ax.legend(
keys,
labels,
handler_map={tuple: AssemblyLegend()},
loc="center left",
bbox_to_anchor=(1.0, 0.5),
frameon=False,
prop={"size": 9},
)
return legend
class DepthSlider(Slider):
"""
Page slider used to view params at different depths.
"""
def __init__(
self,
ax,
sliderLabel,
depths,
updateFunc,
selectedDepthColor,
fontsize=8,
valInit=0,
**kwargs,
):
# The color of the currently displayed depth page.
self.selectedDepthColor = selectedDepthColor
self.nonSelectedDepthColor = "w"
self.depths = depths
# Make the selection depth buttons
self.depthSelections = []
numDepths = float(len(depths))
rectangleBot = 0
textYCoord = 0.5
# startBoundaries go from zero to just below 1.
leftBoundary = [i / numDepths for i, _depths in enumerate(depths)]
for leftBoundary, depth in zip(leftBoundary, depths):
# First depth (leftBoundary==0) is on, rest are off.
if leftBoundary == 0:
color = self.selectedDepthColor
else:
color = self.nonSelectedDepthColor
depthSelectBox = matplotlib.patches.Rectangle(
(leftBoundary, rectangleBot),
1.0 / numDepths,
1,
transform=ax.transAxes,
facecolor=color,
)
ax.add_artist(depthSelectBox)
self.depthSelections.append(depthSelectBox)
# Make text halfway into box
textXCoord = leftBoundary + 0.5 / numDepths
ax.text(
textXCoord,
textYCoord,
"{:.1f}".format(depth),
ha="center",
va="center",
transform=ax.transAxes,
fontsize=fontsize,
)
# Make forward and backward button
backwardArrow, forwardArrow = "$\u25C0$", "$\u25B6$"
divider = axes_grid1.make_axes_locatable(ax)
buttonWidthPercent = "5%"
backwardAxes = divider.append_axes("right", size=buttonWidthPercent, pad=0.03)
forwardAxes = divider.append_axes("right", size=buttonWidthPercent, pad=0.03)
self.backButton = matplotlib.widgets.Button(
backwardAxes,
label=backwardArrow,
color=self.nonSelectedDepthColor,
hovercolor=self.selectedDepthColor,
)
self.backButton.label.set_fontsize(fontsize)
self.backButton.on_clicked(self.previous)
self.forwardButton = matplotlib.widgets.Button(
forwardAxes,
label=forwardArrow,
color=self.nonSelectedDepthColor,
hovercolor=self.selectedDepthColor,
)
self.forwardButton.label.set_fontsize(fontsize)
self.forwardButton.on_clicked(self.next)
# init at end since slider will set val to 0, and it needs to have state
# setup before doing that
Slider.__init__(self, ax, sliderLabel, 0, len(depths), valinit=0, **kwargs)
self.on_changed(updateFunc)
self.set_val(valInit) # need to set after updateFunc is added.
# Turn off value visibility since the buttons text shows the value
self.valtext.set_visible(False)
def set_val(self, val):
"""
Set the value and update the color.
Notes
-----
valmin/valmax are set on the parent to 0 and len(depths).
"""
val = int(val)
# valmax is not allowed, since it is out of the array.
# valmin is allowed since 0 index is in depth array.
if val < self.valmin or val >= self.valmax:
# invalid, so ignore
return
# activate color is first since we still have access to self.val
self.updatePageDepthColor(val)
Slider.set_val(self, val)
def next(self, _event):
"""Move forward to the next depth (page)."""
self.set_val(self.val + 1)
def previous(self, _event):
"""Move backward to the previous depth (page)."""
self.set_val(self.val - 1)
def updatePageDepthColor(self, newVal):
"""Update the page colors."""
self.depthSelections[self.val].set_facecolor(self.nonSelectedDepthColor)
self.depthSelections[newVal].set_facecolor(self.selectedDepthColor)
def plotAssemblyTypes(
blueprints,
coreName,
assems=None,
plotNumber=1,
maxAssems=None,
showBlockAxMesh=True,
):
"""
Generate a plot showing the axial block and enrichment distributions of each assembly type in the core.
Parameters
----------
bluepprints: Blueprints
The blueprints to plot assembly types of.
assems: list
list of assembly objects to be plotted.
plotNumber: integer
number of uniquely identify the assembly plot from others and to prevent plots from being overwritten.
maxAssems: integer
maximum number of assemblies to plot in the assems list.
showBlockAxMesh: bool
if true, the axial mesh information will be displayed on the right side of the assembly plot.
"""
if assems is None:
assems = list(blueprints.assemblies.values())
if not isinstance(assems, (list, set, tuple)):
assems = [assems]
if not isinstance(plotNumber, int):
raise TypeError("Plot number should be an integer")
if maxAssems is not None and not isinstance(maxAssems, int):
raise TypeError("Maximum assemblies should be an integer")
numAssems = len(assems)
if maxAssems is None:
maxAssems = numAssems
# Set assembly/block size constants
yBlockHeights = []
yBlockAxMesh = OrderedSet()
assemWidth = 5.0
assemSeparation = 0.3
xAssemLoc = 0.5
xAssemEndLoc = numAssems * (assemWidth + assemSeparation) + assemSeparation
# Setup figure
fig, ax = plt.subplots(figsize=(15, 15), dpi=300)
for index, assem in enumerate(assems):
isLastAssem = True if index == (numAssems - 1) else False
(xBlockLoc, yBlockHeights, yBlockAxMesh) = _plotBlocksInAssembly(
ax,
assem,
isLastAssem,
yBlockHeights,
yBlockAxMesh,
xAssemLoc,
xAssemEndLoc,
showBlockAxMesh,
)
xAxisLabel = re.sub(" ", "\n", assem.getType().upper())
ax.text(
xBlockLoc + assemWidth / 2.0,
-5,
xAxisLabel,
fontsize=13,
ha="center",
va="top",
)
xAssemLoc += assemWidth + assemSeparation
# Set up plot layout
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.yaxis.set_ticks_position("left")
yBlockHeights.insert(0, 0.0)
yBlockHeights.sort()
yBlockHeightDiffs = numpy.diff(
yBlockHeights
) # Compute differential heights between each block
ax.set_yticks([0.0] + list(set(numpy.cumsum(yBlockHeightDiffs))))
ax.xaxis.set_visible(False)
ax.set_title("Assembly Designs for {}".format(coreName), y=1.03)
ax.set_ylabel("Thermally Expanded Axial Heights (cm)".upper(), labelpad=20)
ax.set_xlim([0.0, 0.5 + maxAssems * (assemWidth + assemSeparation)])
# Plot and save figure
ax.plot()
figName = coreName + "AssemblyTypes{}.png".format(plotNumber)
runLog.debug("Writing assem layout {} in {}".format(figName, os.getcwd()))
fig.savefig(figName)
plt.close(fig)
return figName
def _plotBlocksInAssembly(
axis,
assem,
isLastAssem,
yBlockHeights,
yBlockAxMesh,
xAssemLoc,
xAssemEndLoc,
showBlockAxMesh,
):
# Set dictionary of pre-defined block types and colors for the plot
lightsage = "xkcd:light sage"
blockTypeColorMap = collections.OrderedDict(
{
"fuel": "tomato",
"shield": "cadetblue",
"reflector": "darkcyan",
"aclp": "lightslategrey",
"plenum": "white",
"duct": "plum",
"control": lightsage,
"handling socket": "lightgrey",
"grid plate": "lightgrey",
"inlet nozzle": "lightgrey",
}
)
# Initialize block positions
blockWidth = 5.0
yBlockLoc = 0
xBlockLoc = xAssemLoc
xTextLoc = xBlockLoc + blockWidth / 20.0
for b in assem:
blockHeight = b.getHeight()
blockXsId = b.p.xsType
yBlockCenterLoc = yBlockLoc + blockHeight / 2.5
# Get the basic text label for the block
try:
blockType = [
bType
for bType in blockTypeColorMap.keys()
if b.hasFlags(Flags.fromString(bType))
][0]
color = blockTypeColorMap[blockType]
except IndexError:
blockType = b.getType()
color = "grey"
# Get the detailed text label for the block
dLabel = ""
if b.hasFlags(Flags.FUEL):
dLabel = " {:0.2f}%".format(b.getFissileMassEnrich() * 100)
elif b.hasFlags(Flags.CONTROL):
blockType = "ctrl"
dLabel = " {:0.2f}%".format(b.getBoronMassEnrich() * 100)
dLabel += " ({})".format(blockXsId)
# Set up block rectangle
blockPatch = matplotlib.patches.Rectangle(
(xBlockLoc, yBlockLoc),
blockWidth,
blockHeight,
facecolor=color,
alpha=0.7,
edgecolor="k",
lw=1.0,
ls="solid",
)
axis.add_patch(blockPatch)
axis.text(
xTextLoc,
yBlockCenterLoc,
blockType.upper() + dLabel,
ha="left",
fontsize=10,
)
yBlockLoc += blockHeight
yBlockHeights.append(yBlockLoc)
# Add location, block heights, and axial mesh points to ordered set
yBlockAxMesh.add((yBlockCenterLoc, blockHeight, b.p.axMesh))
# Add the block heights, block number of axial mesh points on the far right of the plot.
if isLastAssem and showBlockAxMesh:
xEndLoc = 0.5 + xAssemEndLoc
for bCenter, bHeight, axMeshPoints in yBlockAxMesh:
axis.text(
xEndLoc,
bCenter,
"{} cm ({})".format(bHeight, axMeshPoints),
fontsize=10,
ha="left",
)
return xBlockLoc, yBlockHeights, yBlockAxMesh
| [
"matplotlib.pyplot.ylabel",
"math.sqrt",
"ordered_set.OrderedSet",
"numpy.array",
"armi.reactor.flags.Flags.fromString",
"matplotlib.pyplot.xlabel",
"wx.lib.colourdb.getColourList",
"numpy.diff",
"matplotlib.pyplot.close",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"collections.OrderedDict",... | [((1557, 1572), 'wx.lib.colourdb.getColourList', 'getColourList', ([], {}), '()\n', (1570, 1572), False, 'from wx.lib.colourdb import getColourList\n'), ((3398, 3415), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (3409, 3415), False, 'import numpy\n'), ((3427, 3464), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)', 'dpi': '(100)'}), '(figsize=(12, 12), dpi=100)\n', (3437, 3464), True, 'import matplotlib.pyplot as plt\n'), ((4942, 4953), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4951, 4953), True, 'import matplotlib.pyplot as plt\n'), ((8309, 8318), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8316, 8318), True, 'import matplotlib.pyplot as plt\n'), ((8324, 8356), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': 'titleSize'}), '(title, size=titleSize)\n', (8333, 8356), True, 'import matplotlib.pyplot as plt\n'), ((11570, 11581), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11579, 11581), True, 'import matplotlib.pyplot as plt\n'), ((14996, 15005), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15003, 15005), True, 'import matplotlib.pyplot as plt\n'), ((21009, 21021), 'ordered_set.OrderedSet', 'OrderedSet', ([], {}), '()\n', (21019, 21021), False, 'from ordered_set import OrderedSet\n'), ((21203, 21242), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)', 'dpi': '(300)'}), '(figsize=(15, 15), dpi=300)\n', (21215, 21242), True, 'import matplotlib.pyplot as plt\n'), ((22195, 22220), 'numpy.diff', 'numpy.diff', (['yBlockHeights'], {}), '(yBlockHeights)\n', (22205, 22220), False, 'import numpy\n'), ((22827, 22841), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (22836, 22841), True, 'import matplotlib.pyplot as plt\n'), ((23153, 23427), 'collections.OrderedDict', 'collections.OrderedDict', (["{'fuel': 'tomato', 'shield': 'cadetblue', 'reflector': 'darkcyan', 'aclp':\n 'lightslategrey', 'plenum': 'white', 'duct': 'plum', 'control':\n lightsage, 'handling socket': 'lightgrey', 'grid plate': 'lightgrey',\n 'inlet nozzle': 'lightgrey'}"], {}), "({'fuel': 'tomato', 'shield': 'cadetblue',\n 'reflector': 'darkcyan', 'aclp': 'lightslategrey', 'plenum': 'white',\n 'duct': 'plum', 'control': lightsage, 'handling socket': 'lightgrey',\n 'grid plate': 'lightgrey', 'inlet nozzle': 'lightgrey'})\n", (23176, 23427), False, 'import collections\n'), ((4880, 4907), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fName'], {'dpi': '(150)'}), '(fName, dpi=150)\n', (4891, 4907), True, 'import matplotlib.pyplot as plt\n'), ((4926, 4936), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4934, 4936), True, 'import matplotlib.pyplot as plt\n'), ((8023, 8060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)', 'dpi': '(100)'}), '(figsize=(12, 12), dpi=100)\n', (8033, 8060), True, 'import matplotlib.pyplot as plt\n'), ((9208, 9225), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (9219, 9225), False, 'import numpy\n'), ((9370, 9387), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (9381, 9387), False, 'import numpy\n'), ((10936, 10956), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (cm)"""'], {}), "('x (cm)')\n", (10946, 10956), True, 'import matplotlib.pyplot as plt\n'), ((10965, 10985), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (cm)"""'], {}), "('y (cm)')\n", (10975, 10985), True, 'import matplotlib.pyplot as plt\n'), ((17275, 17309), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'axes_grid1.make_axes_locatable', (['ax'], {}), '(ax)\n', (17305, 17309), False, 'from mpl_toolkits import axes_grid1\n'), ((19153, 19178), 'matplotlib.widgets.Slider.set_val', 'Slider.set_val', (['self', 'val'], {}), '(self, val)\n', (19167, 19178), False, 'from matplotlib.widgets import Slider\n'), ((9727, 9744), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (9738, 9744), False, 'import numpy\n'), ((9904, 9929), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['collection2'], {}), '(collection2)\n', (9916, 9929), True, 'import matplotlib.pyplot as plt\n'), ((11221, 11261), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fName'], {'dpi': '(150)'}), '(fName, dpi=150, **pltKwargs)\n', (11232, 11261), True, 'import matplotlib.pyplot as plt\n'), ((11554, 11564), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11562, 11564), True, 'import matplotlib.pyplot as plt\n'), ((14845, 14915), 'matplotlib.text.Text', 'mpl_text.Text', ([], {'x': 'x', 'y': 'y', 'text': 'letter', 'ha': '"""center"""', 'va': '"""center"""', 'size': '(7)'}), "(x=x, y=y, text=letter, ha='center', va='center', size=7)\n", (14858, 14915), True, 'import matplotlib.text as mpl_text\n'), ((22784, 22795), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (22793, 22795), False, 'import os\n'), ((12224, 12236), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (12233, 12236), False, 'import math\n'), ((22321, 22352), 'numpy.cumsum', 'numpy.cumsum', (['yBlockHeightDiffs'], {}), '(yBlockHeightDiffs)\n', (22333, 22352), False, 'import numpy\n'), ((24043, 24066), 'armi.reactor.flags.Flags.fromString', 'Flags.fromString', (['bType'], {}), '(bType)\n', (24059, 24066), False, 'from armi.reactor.flags import Flags\n')] |
"""Class for a collection of grid properties"""
version = '24th November 2021'
# Nexus is a registered trademark of the Halliburton Company
import logging
log = logging.getLogger(__name__)
log.debug('property.py version ' + version)
import os
import numpy as np
import resqpy.olio.ab_toolbox as abt
import resqpy.olio.box_utilities as bxu
import resqpy.olio.load_data as ld
import resqpy.olio.write_data as wd
import resqpy.olio.xml_et as rqet
from .property_collection import PropertyCollection
from .property_common import selective_version_of_collection
class GridPropertyCollection(PropertyCollection):
"""Class for RESQML Property collection for an IJK Grid, inheriting from PropertyCollection."""
def __init__(self, grid = None, property_set_root = None, realization = None):
"""Creates a new property collection related to an IJK grid.
arguments:
grid (grid.Grid object, optional): must be present unless creating a completely blank, empty collection
property_set_root (optional): if present, the collection is populated with the properties defined in the xml tree
of the property set; grid must not be None when using this argument
realization (integer, optional): if present, the single realisation (within an ensemble) that this collection is for;
if None, then the collection is either covering a whole ensemble (individual properties can each be flagged with a
realisation number), or is for properties that do not have multiple realizations
returns:
the new GridPropertyCollection object
note:
usually a grid should be passed, however a completely blank collection may be created prior to using
collection inheritance methods to populate from another collection, in which case the grid can be lazily left
as None here
:meta common:
"""
if grid is not None:
log.debug('initialising grid property collection for grid: ' + str(rqet.citation_title_for_node(grid.root)))
log.debug('grid uuid: ' + str(grid.uuid))
super().__init__(support = grid, property_set_root = property_set_root, realization = realization)
self._copy_support_to_grid_attributes()
# NB: RESQML documentation is not clear which order is correct; should be kept consistent with same data in fault.py
# face_index_map maps from (axis, p01) to face index value in range 0..5
# self.face_index_map = np.array([[0, 1], [4, 2], [5, 3]], dtype = int)
self.face_index_map = np.array([[0, 1], [2, 4], [5, 3]], dtype = int) # order: top, base, J-, I+, J+, I-
# and the inverse, maps from 0..5 to (axis, p01)
# self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 1], [2, 1], [1, 0], [2, 0]], dtype = int)
self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 0], [2, 1], [1, 1], [2, 0]], dtype = int)
def _copy_support_to_grid_attributes(self):
# following three pseudonyms are for backward compatibility
self.grid = self.support
self.grid_root = self.support_root
self.grid_uuid = self.support_uuid
def set_grid(self, grid, grid_root = None, modify_parts = True):
"""Sets the supporting representation object for the collection to be the given grid.
note:
this method does not need to be called if the grid was passed during object initialisation.
"""
self.set_support(support = grid, modify_parts = modify_parts)
self._copy_support_to_grid_attributes()
def h5_slice_for_box(self, part, box):
"""Returns a subset of the array for part, without loading the whole array (unless already cached).
arguments:
part (string): the part name for which the array slice is required
box (numpy int array of shape (2, 3)): the min, max indices for K, J, I axes for which an array
extract is required
returns:
numpy array that is a hyper-slice of the hdf5 array
note:
this method always fetches from the hdf5 file and does not attempt local caching; the whole array
is not loaded; all axes continue to exist in the returned array, even where the sliced extent of
an axis is 1; the upper indices indicated in the box are included in the data (unlike the python
protocol)
"""
slice_tuple = (slice(box[0, 0], box[1, 0] + 1), slice(box[0, 1],
box[1, 1] + 1), slice(box[0, 2], box[1, 2] + 1))
return self.h5_slice(part, slice_tuple)
def extend_imported_list_copying_properties_from_other_grid_collection(self,
other,
box = None,
refinement = None,
coarsening = None,
realization = None,
copy_all_realizations = False,
uncache_other_arrays = True):
"""Extends this collection's imported list with properties from other collection.
Optionally extract for a box.
arguments:
other: another GridPropertyCollection object which might relate to a different grid object
box: (numpy int array of shape (2, 3), optional): if present, a logical ijk cuboid subset of the source arrays
is extracted, box axes are (min:max, kji0); if None, the full arrays are copied
refinement (resqpy.olio.fine_coarse.FineCoarse object, optional): if present, other is taken to be a collection
for a coarser grid and the property values are sampled for a finer grid based on the refinement mapping
coarsening (resqpy.olio.fine_coarse.FineCoarse object, optional): if present, other is taken to be a collection
for a finer grid and the property values are upscaled for a coarser grid based on the coarsening mapping
realization (int, optional): if present, only properties for this realization are copied; if None, only
properties without a realization number are copied unless copy_all_realizations is True
copy_all_realizations (boolean, default False): if True (and realization is None), all properties are copied;
if False, only properties with a realization of None are copied; ignored if realization is not None
uncache_other_arrays (boolean, default True): if True, after each array is copied, the original is uncached from
the source collection (to free up memory)
notes:
this function can be used to copy properties between one grid object and another compatible one, for example
after a grid manipulation has generated a new version of the geometry; it can also be used to select an ijk box
subset of the property data and/or refine or coarsen the property data;
if a box is supplied, the first index indicates min (0) or max (1) and the second index indicates k (0), j (1) or i (2);
the values in box are zero based and cells matching the maximum indices are included (unlike with python ranges);
when coarsening or refining, this function ignores the 'within...box' attributes of the FineCoarse object so
the box argument must be used to effect a local grid coarsening or refinement
"""
# todo: optional use of active cell mask in coarsening
_extend_imported_initial_assertions(other, box, refinement, coarsening)
if coarsening is not None: # static upscaling of key property kinds, simple sampling of others
_extend_imported_with_coarsening(self, other, box, coarsening, realization, copy_all_realizations,
uncache_other_arrays)
else:
_extend_imported_no_coarsening(self, other, box, refinement, realization, copy_all_realizations,
uncache_other_arrays)
def import_nexus_property_to_cache(self,
file_name,
keyword,
extent_kji = None,
discrete = False,
uom = None,
time_index = None,
null_value = None,
property_kind = None,
local_property_kind_uuid = None,
facet_type = None,
facet = None,
realization = None,
use_binary = True):
"""Reads a property array from an ascii (or pure binary) file, caches and adds to imported list.
Does not add to collection dict.
arguments:
file_name (string): the name of the file to read the array data from; should contain data for one array only, without
the keyword
keyword (string): the keyword to associate with the imported data, which will become the citation title
extent_kji (optional, default None): if present, [nk, nj, ni] being the extent (shape) of the array to be imported;
if None, the shape of the grid associated with this collection is used
discrete (boolean, optional, default False): if True, integer data is imported, if False, float data
uom (string, optional, default None): the resqml units of measure applicable to the data
time_index (integer, optional, default None): if present, this array is for time varying data and this value is the
index into a time series associated with this collection
null_value (int or float, optional, default None): if present, this is used in the metadata to indicate that
this value is to be interpreted as a null value wherever it appears in the data (this does not change the
data during import)
property_kind (string): resqml property kind, or None
local_property_kind_uuid (uuid.UUID or string): uuid of local property kind, or None for standard property kind
facet_type (string): resqml facet type, or None
facet (string): resqml facet, or None
realization (int): realization number, or None
use_binary (boolean, optional, default True): if True, and an up-to-date pure binary version of the file exists,
then the data is loaded from that instead of from the ascii file; if True but the binary version does not
exist (or is older than the ascii file), the pure binary version is written as a side effect of the import;
if False, the ascii file is used even if a pure binary equivalent exists, and no binary file is written
note:
this function only performs the first importation step of actually reading the array into memory; other steps
must follow to include the array as a part in the resqml model and in this collection of properties (see doc
string for add_cached_array_to_imported_list() for the sequence of steps)
"""
log.debug(f'importing {keyword} array from file {file_name}')
# note: code in resqml_import adds imported arrays to model and to dict
if self.imported_list is None:
self.imported_list = []
if extent_kji is None:
extent_kji = self.grid.extent_kji
if discrete:
data_type = 'integer'
else:
data_type = 'real'
try:
import_array = ld.load_array_from_file(file_name,
extent_kji,
data_type = data_type,
comment_char = '!',
data_free_of_comments = False,
use_binary = use_binary)
except Exception:
log.exception('failed to import {} array from file {}'.format(keyword, file_name))
return None
self.add_cached_array_to_imported_list(import_array,
file_name,
keyword,
discrete = discrete,
uom = uom,
time_index = time_index,
null_value = null_value,
property_kind = property_kind,
local_property_kind_uuid = local_property_kind_uuid,
facet_type = facet_type,
facet = facet,
realization = realization,
points = False)
return import_array
def import_vdb_static_property_to_cache(self,
vdbase,
keyword,
grid_name = 'ROOT',
uom = None,
realization = None,
property_kind = None,
facet_type = None,
facet = None):
"""Reads a vdb static property array, caches and adds to imported list (but not collection dict).
arguments:
vdbase: an object of class vdb.VDB, already initialised with the path of the vdb
keyword (string): the Nexus keyword (or equivalent) of the static property to be loaded
grid_name (string): the grid name as used in the vdb
uom (string): The resqml unit of measure that applies to the data
realization (optional, int): The realization number that this property belongs to; use None
if not applicable
property_kind (string, optional): the RESQML property kind of the property
facet_type (string, optional): a RESQML facet type for the property
facet (string, optional): the RESQML facet value for the given facet type for the property
returns:
cached array containing the property data; the cached array is in an unpacked state
(ie. can be directly indexed with [k0, j0, i0])
note:
when importing from a vdb (or other sources), use methods such as this to build up a list
of imported arrays; then write hdf5 for the imported arrays; finally create xml for imported
properties
"""
log.info('importing vdb static property {} array'.format(keyword))
keyword = keyword.upper()
try:
discrete = True
dtype = None
if keyword[0].upper() == 'I' or keyword in ['KID', 'UID', 'UNPACK', 'DAD']:
# coerce to integer values (vdb stores integer data as reals!)
dtype = 'int32'
elif keyword in ['DEADCELL', 'LIVECELL']:
dtype = 'bool' # could use the default dtype of 64 bit integer
else:
dtype = 'float' # convert to 64 bit; could omit but RESQML states 64 bit
discrete = False
import_array = vdbase.grid_static_property(grid_name, keyword, dtype = dtype)
assert import_array is not None
except Exception:
log.exception(f'failed to import static property {keyword} from vdb')
return None
self.add_cached_array_to_imported_list(import_array,
vdbase.path,
keyword,
discrete = discrete,
uom = uom,
time_index = None,
null_value = None,
realization = realization,
property_kind = property_kind,
facet_type = facet_type,
facet = facet)
return import_array
def import_vdb_recurrent_property_to_cache(self,
vdbase,
timestep,
keyword,
grid_name = 'ROOT',
time_index = None,
uom = None,
realization = None,
property_kind = None,
facet_type = None,
facet = None):
"""Reads a vdb recurrent property array for one timestep, caches and adds to imported list.
Does not add to collection dict.
arguments:
vdbase: an object of class vdb.VDB, already initialised with the path of the vdb
timestep (int): the Nexus timestep number at which the property array was generated; NB. this is
not necessarily the same as a resqml time index
keyword (string): the Nexus keyword (or equivalent) of the recurrent property to be loaded
grid_name (string): the grid name as used in the vdb
time_index (int, optional): if present, used as the time index, otherwise timestep is used
uom (string): The resqml unit of measure that applies to the data
realization (optional, int): the realization number that this property belongs to; use None
if not applicable
property_kind (string, optional): the RESQML property kind of the property
facet_type (string, optional): a RESQML facet type for the property
facet (string, optional): the RESQML facet value for the given facet type for the property
returns:
cached array containing the property data; the cached array is in an unpacked state
(ie. can be directly indexed with [k0, j0, i0])
notes:
when importing from a vdb (or other sources), use methods such as this to build up a list
of imported arrays; then write hdf5 for the imported arrays; finally create xml for imported
properties
"""
log.info('importing vdb recurrent property {0} array for timestep {1}'.format(keyword, str(timestep)))
if time_index is None:
time_index = timestep
keyword = keyword.upper()
try:
import_array = vdbase.grid_recurrent_property_for_timestep(grid_name, keyword, timestep, dtype = 'float')
assert import_array is not None
except Exception:
# could raise an exception (as for static properties)
log.error(f'failed to import recurrent property {keyword} from vdb for timestep {timestep}')
return None
self.add_cached_array_to_imported_list(import_array,
vdbase.path,
keyword,
discrete = False,
uom = uom,
time_index = time_index,
null_value = None,
realization = realization,
property_kind = property_kind,
facet_type = facet_type,
facet = facet)
return import_array
def import_ab_property_to_cache(self,
file_name,
keyword,
extent_kji = None,
discrete = None,
uom = None,
time_index = None,
null_value = None,
property_kind = None,
local_property_kind_uuid = None,
facet_type = None,
facet = None,
realization = None):
"""Reads a property array from a pure binary file, caches and adds to imported list (but not collection dict).
arguments:
file_name (string): the name of the file to read the array data from; should contain data for one array only in
'pure binary' format (as used by ab_* suite of utilities)
keyword (string): the keyword to associate with the imported data, which will become the citation title
extent_kji (optional, default None): if present, [nk, nj, ni] being the extent (shape) of the array to be imported;
if None, the shape of the grid associated with this collection is used
discrete (boolean, optional, default False): if True, integer data is imported, if False, float data
uom (string, optional, default None): the resqml units of measure applicable to the data
time_index (integer, optional, default None): if present, this array is for time varying data and this value is the
index into a time series associated with this collection
null_value (int or float, optional, default None): if present, this is used in the metadata to indicate that
this value is to be interpreted as a null value wherever it appears in the data (this does not change the
data during import)
property_kind (string): resqml property kind, or None
local_property_kind_uuid (uuid.UUID or string): uuid of local property kind, or None
facet_type (string): resqml facet type, or None
facet (string): resqml facet, or None
realization (int): realization number, or None
note:
this function only performs the first importation step of actually reading the array into memory; other steps
must follow to include the array as a part in the resqml model and in this collection of properties (see doc
string for add_cached_array_to_imported_list() for the sequence of steps)
"""
if self.imported_list is None:
self.imported_list = []
if extent_kji is None:
extent_kji = self.grid.extent_kji
assert file_name[-3:] in ['.db', '.fb', '.ib', '.lb',
'.bb'], 'file extension not in pure binary array expected set for: ' + file_name
if discrete is None:
discrete = (file_name[-3:] in ['.ib', '.lb', '.bb'])
else:
assert discrete == (file_name[-3:]
in ['.ib', '.lb',
'.bb']), 'discrete argument is not consistent with file extension for: ' + file_name
try:
import_array = abt.load_array_from_ab_file(
file_name, extent_kji, return_64_bit = False) # todo: RESQML indicates 64 bit for everything
except Exception:
log.exception('failed to import property from pure binary file: ' + file_name)
return None
self.add_cached_array_to_imported_list(import_array,
file_name,
keyword,
discrete = discrete,
uom = uom,
time_index = time_index,
null_value = null_value,
property_kind = property_kind,
local_property_kind_uuid = local_property_kind_uuid,
facet_type = facet_type,
facet = facet,
realization = realization)
return import_array
def decoarsen_imported_list(self, decoarsen_array = None, reactivate = True):
"""Decoarsen imported Nexus properties if needed.
arguments:
decoarsen_array (int array, optional): if present, the naturalised cell index of the coarsened host cell, for each fine cell;
if None, the ICOARS keyword is searched for in the imported list and if not found KID data is used to derive the mapping
reactivate (boolean, default True): if True, the parent grid will have decoarsened cells' inactive flag set to that of the
host cell
returns:
a copy of the array used for decoarsening, if established, or None if no decoarsening array was identified
notes:
a return value of None indicates that no decoarsening occurred;
coarsened values are redistributed quite naively, with coarse volumes being split equally between fine cells, similarly for
length and area based properties; default used for most properties is simply to replicate the coarse value;
the ICOARS array itself is left unchanged, which means the method should only be called once for an imported list;
if no array is passed and no ICOARS array found, the KID values are inspected and the decoarsen array reverse engineered;
the method must be called before the imported arrays are written to hdf5;
reactivation only modifies the grid object attribute and does not write to hdf5, so the method should be called prior to
writing the grid in this situation
"""
# imported_list is list pf:
# (0: uuid, 1: file_name, 2: keyword, 3: cached_name, 4: discrete, 5: uom, 6: time_index, 7: null_value, 8: min_value, 9: max_value,
# 10: property_kind, 11: facet_type, 12: facet, 13: realization, 14: indexable_element, 15: count, 16: local_property_kind_uuid,
# 17: const_value)
skip_keywords = ['UID', 'ICOARS', 'KID', 'DAD'] # TODO: complete this list
decoarsen_length_kinds = ['length', 'cell length', 'thickness', 'permeability thickness', 'permeability length']
decoarsen_area_kinds = ['transmissibility']
decoarsen_volume_kinds = ['volume', 'rock volume', 'pore volume', 'fluid volume']
assert self.grid is not None
kid_attr_name = None
k_share = j_share = i_share = None
if decoarsen_array is None:
for import_item in self.imported_list:
if (import_item[14] is None or import_item[14] == 'cells') and import_item[4] and hasattr(
self, import_item[3]):
if import_item[2] == 'ICOARS':
decoarsen_array = self.__dict__[import_item[3]] - 1 # ICOARS values are one based
break
if import_item[2] == 'KID':
kid_attr_name = import_item[3]
if decoarsen_array is None and kid_attr_name is not None:
kid = self.__dict__[kid_attr_name]
kid_mask = (kid == -3) # -3 indicates cell inactive due to coarsening
assert kid_mask.shape == tuple(self.grid.extent_kji)
if np.any(kid_mask):
log.debug(f'{np.count_nonzero(kid_mask)} cells marked as requiring decoarsening in KID data')
decoarsen_array = np.full(self.grid.extent_kji, -1, dtype = int)
k_share = np.zeros(self.grid.extent_kji, dtype = int)
j_share = np.zeros(self.grid.extent_kji, dtype = int)
i_share = np.zeros(self.grid.extent_kji, dtype = int)
natural = 0
for k0 in range(self.grid.nk):
for j0 in range(self.grid.nj):
for i0 in range(self.grid.ni):
# if decoarsen_array[k0, j0, i0] < 0:
if kid[k0, j0, i0] == 0:
# assert not kid_mask[k0, j0, i0]
ke = k0 + 1
while ke < self.grid.nk and kid_mask[ke, j0, i0]:
ke += 1
je = j0 + 1
while je < self.grid.nj and kid_mask[k0, je, i0]:
je += 1
ie = i0 + 1
while ie < self.grid.ni and kid_mask[k0, j0, ie]:
ie += 1
# todo: check for conflict and resolve
decoarsen_array[k0:ke, j0:je, i0:ie] = natural
k_share[k0:ke, j0:je, i0:ie] = ke - k0
j_share[k0:ke, j0:je, i0:ie] = je - j0
i_share[k0:ke, j0:je, i0:ie] = ie - i0
elif not kid_mask[k0, j0, i0]: # inactive for reasons other than coarsening
decoarsen_array[k0, j0, i0] = natural
k_share[k0, j0, i0] = 1
j_share[k0, j0, i0] = 1
i_share[k0, j0, i0] = 1
natural += 1
assert np.all(decoarsen_array >= 0)
if decoarsen_array is None:
return None
cell_count = decoarsen_array.size
host_count = len(np.unique(decoarsen_array))
log.debug(f'{host_count} of {cell_count} are hosts; difference is {cell_count - host_count}')
assert cell_count == self.grid.cell_count()
if np.all(decoarsen_array.flatten() == np.arange(cell_count, dtype = int)):
return None # identity array
if k_share is None:
sharing_needed = False
for import_item in self.imported_list:
kind = import_item[10]
if kind in decoarsen_volume_kinds or kind in decoarsen_area_kinds or kind in decoarsen_length_kinds:
sharing_needed = True
break
if sharing_needed:
k_share = np.zeros(self.grid.extent_kji, dtype = int)
j_share = np.zeros(self.grid.extent_kji, dtype = int)
i_share = np.zeros(self.grid.extent_kji, dtype = int)
natural = 0
for k0 in range(self.grid.nk):
for j0 in range(self.grid.nj):
for i0 in range(self.grid.ni):
if k_share[k0, j0, i0] == 0:
ke = k0 + 1
while ke < self.grid.nk and decoarsen_array[ke, j0, i0] == natural:
ke += 1
je = j0 + 1
while je < self.grid.nj and decoarsen_array[k0, je, i0] == natural:
je += 1
ie = i0 + 1
while ie < self.grid.ni and decoarsen_array[k0, j0, ie] == natural:
ie += 1
k_share[k0:ke, j0:je, i0:ie] = ke - k0
j_share[k0:ke, j0:je, i0:ie] = je - j0
i_share[k0:ke, j0:je, i0:ie] = ie - i0
natural += 1
if k_share is not None:
assert np.all(k_share > 0) and np.all(j_share > 0) and np.all(i_share > 0)
volume_share = (k_share * j_share * i_share).astype(float)
k_share = k_share.astype(float)
j_share = j_share.astype(float)
i_share = i_share.astype(float)
property_count = 0
for import_item in self.imported_list:
if import_item[3] is None or not hasattr(self, import_item[3]):
continue # todo: handle decoarsening of const arrays?
if import_item[14] is not None and import_item[14] != 'cells':
continue
coarsened = self.__dict__[import_item[3]].flatten()
assert coarsened.size == cell_count
keyword = import_item[2]
if keyword.upper() in skip_keywords:
continue
kind = import_item[10]
if kind in decoarsen_volume_kinds:
redistributed = coarsened[decoarsen_array] / volume_share
elif kind in decoarsen_area_kinds:
# only transmissibilty currently in this set of supported property kinds
log.warning(
f'decoarsening of transmissibility {keyword} skipped due to simple methods not yielding correct values'
)
elif kind in decoarsen_length_kinds:
facet_dir = import_item[12] if import_item[11] == 'direction' else None
if kind in ['thickness', 'permeability thickness'] or (facet_dir == 'K'):
redistributed = coarsened[decoarsen_array] / k_share
elif facet_dir == 'J':
redistributed = coarsened[decoarsen_array] / j_share
elif facet_dir == 'I':
redistributed = coarsened[decoarsen_array] / i_share
else:
log.warning(f'decoarsening of length property {keyword} skipped as direction not established')
else:
redistributed = coarsened[decoarsen_array]
self.__dict__[import_item[3]] = redistributed.reshape(self.grid.extent_kji)
property_count += 1
if property_count:
log.debug(f'{property_count} properties decoarsened')
if reactivate and hasattr(self.grid, 'inactive'):
log.debug('reactivating cells inactive due to coarsening')
pre_count = np.count_nonzero(self.grid.inactive)
self.grid.inactive = self.grid.inactive.flatten()[decoarsen_array].reshape(self.grid.extent_kji)
post_count = np.count_nonzero(self.grid.inactive)
log.debug(f'{pre_count - post_count} cells reactivated')
return decoarsen_array
def write_nexus_property(
self,
part,
file_name,
keyword = None,
headers = True,
append = False,
columns = 20,
decimals = 3, # note: decimals only applicable to real numbers
blank_line_after_i_block = True,
blank_line_after_j_block = False,
space_separated = False, # default is tab separated
use_binary = False,
binary_only = False,
nan_substitute_value = None):
"""Writes the property array to a file in a format suitable for including as nexus input.
arguments:
part (string): the part name for which the array is to be exported
file_name (string): the path of the file to be created (any existing file will be overwritten)
keyword (string, optional, default None): if not None, the Nexus keyword to be included in the
ascii export file (otherwise data only is written, without a keyword)
headers (boolean, optional, default True): if True, some header comments are included in the
ascii export file, using a Nexus comment character
append (boolean, optional, default False): if True, any existing file is appended to rather than
overwritten
columns (integer, optional, default 20): the maximum number of data items to be written per line
decimals (integer, optional, default 3): the number of decimal places included in the values
written to the ascii export file (ignored for integer data)
blank_line_after_i_block (boolean, optional, default True): if True, a blank line is inserted
after each I-block of data (ie. when the J index changes)
blank_line_after_j_block (boolean, optional, default False): if True, a blank line is inserted
after each J-block of data (ie. when the K index changes)
space_separated (boolean, optional, default False): if True, a space is inserted between values;
if False, a tab is used
use_binary (boolean, optional, default False): if True, a pure binary copy of the array is
written
binary_only (boolean, optional, default False): if True, and if use_binary is True, then no
ascii file is generated; if False (or if use_binary is False) then an ascii file is written
nan_substitute_value (float, optional, default None): if a value is supplied, any not-a-number
values are replaced with this value in the exported file (the cached property array remains
unchanged); if None, then 'nan' or 'Nan' will appear in the ascii export file
"""
array_ref = self.cached_part_array_ref(part)
assert (array_ref is not None)
extent_kji = array_ref.shape
assert (len(extent_kji) == 3)
wd.write_array_to_ascii_file(file_name,
extent_kji,
array_ref,
headers = headers,
keyword = keyword,
columns = columns,
data_type = rqet.simplified_data_type(array_ref.dtype),
decimals = decimals,
target_simulator = 'nexus',
blank_line_after_i_block = blank_line_after_i_block,
blank_line_after_j_block = blank_line_after_j_block,
space_separated = space_separated,
append = append,
use_binary = use_binary,
binary_only = binary_only,
nan_substitute_value = nan_substitute_value)
def write_nexus_property_generating_filename(
self,
part,
directory,
use_title_for_keyword = False,
headers = True,
columns = 20,
decimals = 3, # note: decimals only applicable to real numbers
blank_line_after_i_block = True,
blank_line_after_j_block = False,
space_separated = False, # default is tab separated
use_binary = False,
binary_only = False,
nan_substitute_value = None):
"""Writes the property array to a file using a filename generated from the citation title etc.
arguments:
part (string): the part name for which the array is to be exported
directory (string): the path of the diractory into which the file will be written
use_title_for_keyword (boolean, optional, default False): if True, the citation title for the property part
is used as a keyword in the ascii export file
for other arguments, see the docstring for the write_nexus_property() function
note:
the generated filename consists of:
the citation title (with spaces replaced with underscores);
the facet type and facet, if present;
_t_ and the time_index, if the part has a time index
_r_ and the realisation number, if the part has a realisation number
"""
title = self.citation_title_for_part(part).replace(' ', '_')
if use_title_for_keyword:
keyword = title
else:
keyword = None
fname = title
facet_type = self.facet_type_for_part(part)
if facet_type is not None:
fname += '_' + facet_type.replace(' ', '_') + '_' + self.facet_for_part(part).replace(' ', '_')
time_index = self.time_index_for_part(part)
if time_index is not None:
fname += '_t_' + str(time_index)
realisation = self.realization_for_part(part)
if realisation is not None:
fname += '_r_' + str(realisation)
# could add .dat extension
self.write_nexus_property(part,
os.path.join(directory, fname),
keyword = keyword,
headers = headers,
append = False,
columns = columns,
decimals = decimals,
blank_line_after_i_block = blank_line_after_i_block,
blank_line_after_j_block = blank_line_after_j_block,
space_separated = space_separated,
use_binary = use_binary,
binary_only = binary_only,
nan_substitute_value = nan_substitute_value)
def write_nexus_collection(self,
directory,
use_title_for_keyword = False,
headers = True,
columns = 20,
decimals = 3,
blank_line_after_i_block = True,
blank_line_after_j_block = False,
space_separated = False,
use_binary = False,
binary_only = False,
nan_substitute_value = None):
"""Writes a set of files, one for each part in the collection.
arguments:
directory (string): the path of the diractory into which the files will be written
for other arguments, see the docstrings for the write_nexus_property_generating_filename() and
write_nexus_property() functions
note:
the generated filenames are based on the citation titles etc., as for
write_nexus_property_generating_filename()
"""
for part in self.dict.keys():
self.write_nexus_property_generating_filename(part,
directory,
use_title_for_keyword = use_title_for_keyword,
headers = headers,
columns = columns,
decimals = decimals,
blank_line_after_i_block = blank_line_after_i_block,
blank_line_after_j_block = blank_line_after_j_block,
space_separated = space_separated,
use_binary = use_binary,
binary_only = binary_only,
nan_substitute_value = nan_substitute_value)
def _array_box(collection, part, box = None, uncache_other_arrays = True):
full_array = collection.cached_part_array_ref(part)
if box is None:
a = full_array.copy()
else:
a = full_array[box[0, 0]:box[1, 0] + 1, box[0, 1]:box[1, 1] + 1, box[0, 2]:box[1, 2] + 1].copy()
full_array = None
if uncache_other_arrays:
collection.uncache_part_array(part)
return a
def _coarsening_sample(coarsening, a):
# for now just take value from first cell in box
# todo: find most common element in box
a_coarsened = np.empty(tuple(coarsening.coarse_extent_kji), dtype = a.dtype)
assert a.shape == tuple(coarsening.fine_extent_kji)
# todo: try to figure out some numpy slice operations to avoid use of for loops
for k in range(coarsening.coarse_extent_kji[0]):
for j in range(coarsening.coarse_extent_kji[1]):
for i in range(coarsening.coarse_extent_kji[2]):
# local box within lgc space of fine cells, for 1 coarse cell
cell_box = coarsening.fine_box_for_coarse((k, j, i))
a_coarsened[k, j, i] = a[tuple(cell_box[0])]
return a_coarsened
def _coarsening_sum(coarsening, a, axis = None):
a_coarsened = np.empty(tuple(coarsening.coarse_extent_kji))
assert a.shape == tuple(coarsening.fine_extent_kji)
# todo: try to figure out some numpy slice operations to avoid use of for loops
for k in range(coarsening.coarse_extent_kji[0]):
for j in range(coarsening.coarse_extent_kji[1]):
for i in range(coarsening.coarse_extent_kji[2]):
cell_box = coarsening.fine_box_for_coarse(
(k, j, i)) # local box within lgc space of fine cells, for 1 coarse cell
# yapf: disable
a_coarsened[k, j, i] = np.nansum(a[cell_box[0, 0]:cell_box[1, 0] + 1,
cell_box[0, 1]:cell_box[1, 1] + 1,
cell_box[0, 2]:cell_box[1, 2] + 1])
# yapf: enable
if axis is not None:
axis_1 = (axis + 1) % 3
axis_2 = (axis + 2) % 3
# yapf: disable
divisor = ((cell_box[1, axis_1] + 1 - cell_box[0, axis_1]) *
(cell_box[1, axis_2] + 1 - cell_box[0, axis_2]))
# yapf: enable
a_coarsened[k, j, i] = a_coarsened[k, j, i] / float(divisor)
return a_coarsened
def _coarsening_weighted_mean(coarsening, a, fine_weight, coarse_weight = None, zero_weight_result = np.NaN):
a_coarsened = np.empty(tuple(coarsening.coarse_extent_kji))
assert a.shape == tuple(coarsening.fine_extent_kji)
assert fine_weight.shape == a.shape
if coarse_weight is not None:
assert coarse_weight.shape == a_coarsened.shape
for k in range(coarsening.coarse_extent_kji[0]):
for j in range(coarsening.coarse_extent_kji[1]):
for i in range(coarsening.coarse_extent_kji[2]):
_coarsening_weighted_mean_singlecell(a_coarsened, a, coarsening, k, j, i, fine_weight, coarse_weight,
zero_weight_result)
if coarse_weight is not None:
mask = np.logical_or(np.isnan(coarse_weight), coarse_weight == 0.0)
a_coarsened = np.where(mask, zero_weight_result, a_coarsened / coarse_weight)
return a_coarsened
def _coarsening_weighted_mean_singlecell(a_coarsened, a, coarsening, k, j, i, fine_weight, coarse_weight,
zero_weight_result):
cell_box = coarsening.fine_box_for_coarse((k, j, i)) # local box within lgc space of fine cells, for 1 coarse cell
a_coarsened[k, j, i] = np.nansum(
a[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1, cell_box[0, 2]:cell_box[1, 2] + 1] *
fine_weight[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1,
cell_box[0, 2]:cell_box[1, 2] + 1])
if coarse_weight is None:
weight = np.nansum(fine_weight[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1,
cell_box[0, 2]:cell_box[1, 2] + 1])
if np.isnan(weight) or weight == 0.0:
a_coarsened[k, j, i] = zero_weight_result
else:
a_coarsened[k, j, i] /= weight
def _add_to_imported(collection, a, title, info, null_value = None, const_value = None):
collection.add_cached_array_to_imported_list(
a,
title,
info[10], # citation_title
discrete = not info[4],
indexable_element = 'cells',
uom = info[15],
time_index = info[12],
null_value = null_value,
property_kind = info[7],
local_property_kind_uuid = info[17],
facet_type = info[8],
facet = info[9],
realization = info[0],
const_value = const_value,
points = info[21])
def _extend_imported_initial_assertions(other, box, refinement, coarsening):
import resqpy.grid as grr # at global level was causing issues due to circular references, ie. grid importing this module
assert other.support is not None and isinstance(other.support,
grr.Grid), 'other property collection has no grid support'
assert refinement is None or coarsening is None, 'refinement and coarsening both specified simultaneously'
if box is not None:
assert bxu.valid_box(box, other.grid.extent_kji)
if refinement is not None:
assert tuple(bxu.extent_of_box(box)) == tuple(refinement.coarse_extent_kji)
elif coarsening is not None:
assert tuple(bxu.extent_of_box(box)) == tuple(coarsening.fine_extent_kji)
# todo: any contraints on realization numbers ?
def _extend_imported_with_coarsening(collection, other, box, coarsening, realization, copy_all_realizations,
uncache_other_arrays):
assert collection.support is not None and tuple(collection.support.extent_kji) == tuple(
coarsening.coarse_extent_kji)
source_rv, source_ntg, source_poro, source_sat, source_perm = _extend_imported_get_fine_collections(
other, realization)
fine_rv_array, coarse_rv_array = _extend_imported_coarsen_rock_volume(source_rv, other, box, collection,
realization, uncache_other_arrays, coarsening,
copy_all_realizations)
fine_ntg_array, coarse_ntg_array = _extend_imported_coarsen_ntg(source_ntg, other, box, collection, realization,
uncache_other_arrays, coarsening,
copy_all_realizations, fine_rv_array,
coarse_rv_array)
fine_nrv_array, coarse_nrv_array = _extend_imported_nrv_arrays(fine_ntg_array, coarse_ntg_array, fine_rv_array,
coarse_rv_array)
fine_poro_array, coarse_poro_array = _extend_imported_coarsen_poro(source_poro, other, box, collection, realization,
uncache_other_arrays, coarsening,
copy_all_realizations, fine_nrv_array,
coarse_nrv_array)
_extend_imported_coarsen_sat(source_sat, other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations, fine_nrv_array, coarse_nrv_array, fine_poro_array,
coarse_poro_array)
_extend_imported_coarsen_perm(source_perm, other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations, fine_nrv_array, coarse_nrv_array)
_extend_imported_coarsen_lengths(other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations)
_extend_imported_coarsen_other(other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations, fine_rv_array, coarse_rv_array)
def _extend_imported_get_fine_collections(other, realization):
# look for properties by kind, process in order: rock volume, net to gross ratio, porosity, permeability, saturation
source_rv = selective_version_of_collection(other, realization = realization, property_kind = 'rock volume')
source_ntg = selective_version_of_collection(other, realization = realization, property_kind = 'net to gross ratio')
source_poro = selective_version_of_collection(other, realization = realization, property_kind = 'porosity')
source_sat = selective_version_of_collection(other, realization = realization, property_kind = 'saturation')
source_perm = selective_version_of_collection(other, realization = realization, property_kind = 'permeability rock')
# todo: add kh and some other property kinds
return source_rv, source_ntg, source_poro, source_sat, source_perm
def _extend_imported_coarsen_rock_volume(source_rv, other, box, collection, realization, uncache_other_arrays,
coarsening, copy_all_realizations):
# bulk rock volume
fine_rv_array = coarse_rv_array = None
if source_rv.number_of_parts() == 0:
log.debug('computing bulk rock volume from fine and coarse grid geometries')
source_rv_array = other.support.volume()
if box is None:
fine_rv_array = source_rv_array
else:
fine_rv_array = source_rv_array[box[0, 0]:box[1, 0] + 1, box[0, 1]:box[1, 1] + 1, box[0, 2]:box[1, 2] + 1]
coarse_rv_array = collection.support.volume()
else:
for (part, info) in source_rv.dict.items():
if not copy_all_realizations and info[0] != realization:
continue
fine_rv_array = _array_box(other, part, box = box, uncache_other_arrays = uncache_other_arrays)
coarse_rv_array = _coarsening_sum(coarsening, fine_rv_array)
_add_to_imported(collection, coarse_rv_array, 'coarsened from grid ' + str(other.support.uuid), info)
return fine_rv_array, coarse_rv_array
def _extend_imported_coarsen_ntg(source_ntg, other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations, fine_rv_array, coarse_rv_array):
# net to gross ratio
# note that coarsened ntg values may exceed one when reference bulk rock volumes are from grid geometries
fine_ntg_array = coarse_ntg_array = None
for (part, info) in source_ntg.dict.items():
if not copy_all_realizations and info[0] != realization:
continue
fine_ntg_array = _array_box(other, part, box = box, uncache_other_arrays = uncache_other_arrays)
coarse_ntg_array = _coarsening_weighted_mean(coarsening,
fine_ntg_array,
fine_rv_array,
coarse_weight = coarse_rv_array,
zero_weight_result = 0.0)
_add_to_imported(collection, coarse_ntg_array, 'coarsened from grid ' + str(other.support.uuid), info)
return fine_ntg_array, coarse_ntg_array
def _extend_imported_nrv_arrays(fine_ntg_array, coarse_ntg_array, fine_rv_array, coarse_rv_array):
"""Note: these arrays are generated only in memory for coarsening calculations for other properties. These are not added to the property collection"""
if fine_ntg_array is None:
fine_nrv_array = fine_rv_array
coarse_nrv_array = coarse_rv_array
else:
fine_nrv_array = fine_rv_array * fine_ntg_array
coarse_nrv_array = coarse_rv_array * coarse_ntg_array
return fine_nrv_array, coarse_nrv_array
def _extend_imported_coarsen_poro(source_poro, other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations, fine_nrv_array, coarse_nrv_array):
fine_poro_array = coarse_poro_array = None
for (part, info) in source_poro.dict.items():
if not copy_all_realizations and info[0] != realization:
continue
fine_poro_array = _array_box(other, part, box = box, uncache_other_arrays = uncache_other_arrays)
coarse_poro_array = _coarsening_weighted_mean(coarsening,
fine_poro_array,
fine_nrv_array,
coarse_weight = coarse_nrv_array,
zero_weight_result = 0.0)
_add_to_imported(collection, coarse_poro_array, 'coarsened from grid ' + str(other.support.uuid), info)
return fine_poro_array, coarse_poro_array
def _extend_imported_coarsen_sat(source_sat, other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations, fine_nrv_array, coarse_nrv_array, fine_poro_array,
coarse_poro_array):
# saturations
fine_sat_array = coarse_sat_array = None
fine_sat_weight = fine_nrv_array
coarse_sat_weight = coarse_nrv_array
if fine_poro_array is not None:
fine_sat_weight *= fine_poro_array
coarse_sat_weight *= coarse_poro_array
for (part, info) in source_sat.dict.items():
if not copy_all_realizations and info[0] != realization:
continue
fine_sat_array = _array_box(other, part, box = box, uncache_other_arrays = uncache_other_arrays)
coarse_sat_array = _coarsening_weighted_mean(coarsening,
fine_sat_array,
fine_sat_weight,
coarse_weight = coarse_sat_weight,
zero_weight_result = 0.0)
_add_to_imported(collection, coarse_sat_array, 'coarsened from grid ' + str(other.support.uuid), info)
def _extend_imported_coarsen_perm(source_perm, other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations, fine_nrv_array, coarse_nrv_array):
# permeabilities
# todo: use more harmonic, arithmetic mean instead of just bulk rock volume weighted; consider ntg
for (part, info) in source_perm.dict.items():
if not copy_all_realizations and info[0] != realization:
continue
fine_perm_array = _array_box(other, part, box = box, uncache_other_arrays = uncache_other_arrays)
coarse_perm_array = _coarsening_weighted_mean(coarsening,
fine_perm_array,
fine_nrv_array,
coarse_weight = coarse_nrv_array,
zero_weight_result = 0.0)
_add_to_imported(collection, coarse_perm_array, 'coarsened from grid ' + str(other.support.uuid), info)
def _extend_imported_coarsen_lengths(other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations):
# cell lengths
source_cell_lengths = selective_version_of_collection(other,
realization = realization,
property_kind = 'cell length')
for (part, info) in source_cell_lengths.dict.items():
if not copy_all_realizations and info[0] != realization:
continue
fine_cl_array = _array_box(other, part, box = box, uncache_other_arrays = uncache_other_arrays)
assert info[5] == 1 and info[8] == 'direction'
axis = 'KJI'.index(info[9][0].upper())
coarse_cl_array = _coarsening_sum(coarsening, fine_cl_array, axis = axis)
_add_to_imported(collection, coarse_cl_array, 'coarsened from grid ' + str(other.support.uuid), info)
def _extend_imported_coarsen_other(other, box, collection, realization, uncache_other_arrays, coarsening,
copy_all_realizations, fine_rv_array, coarse_rv_array):
# TODO: all other supported property kinds requiring special treatment
# default behaviour is bulk volume weighted mean for continuous data, first cell in box for discrete
handled_kinds = ('rock volume', 'net to gross ratio', 'porosity', 'saturation', 'permeability rock',
'rock permeability', 'cell length')
for (part, info) in other.dict.items():
if not copy_all_realizations and info[0] != realization:
continue
if info[7] in handled_kinds:
continue
fine_ordinary_array = _array_box(other, part, box = box, uncache_other_arrays = uncache_other_arrays)
if info[4]:
coarse_ordinary_array = _coarsening_weighted_mean(coarsening,
fine_ordinary_array,
fine_rv_array,
coarse_weight = coarse_rv_array)
else:
coarse_ordinary_array = _coarsening_sample(coarsening, fine_ordinary_array)
_add_to_imported(collection, coarse_ordinary_array, 'coarsened from grid ' + str(other.support.uuid), info)
def _extend_imported_no_coarsening(collection, other, box, refinement, realization, copy_all_realizations,
uncache_other_arrays):
if realization is None:
source_collection = other
else:
source_collection = selective_version_of_collection(other, realization = realization)
for (part, info) in source_collection.dict.items():
_extend_imported_no_coarsening_single(source_collection, part, info, collection, other, box, refinement,
realization, copy_all_realizations, uncache_other_arrays)
def _extend_imported_no_coarsening_single(source_collection, part, info, collection, other, box, refinement,
realization, copy_all_realizations, uncache_other_arrays):
if not copy_all_realizations and info[0] != realization:
return
const_value = info[20]
if const_value is None:
a = _array_box(source_collection, part, box = box, uncache_other_arrays = uncache_other_arrays)
else:
a = None
if refinement is not None and a is not None: # simple resampling
a = _extend_imported_no_coarsening_single_resampling(a, info, collection, refinement)
collection.add_cached_array_to_imported_list(
a,
'copied from grid ' + str(other.support.uuid),
info[10], # citation_title
discrete = not info[4],
indexable_element = 'cells',
uom = info[15],
time_index = info[12],
null_value = None, # todo: extract from other's xml
property_kind = info[7],
local_property_kind_uuid = info[17],
facet_type = info[8],
facet = info[9],
realization = info[0],
const_value = const_value,
points = info[21])
def _extend_imported_no_coarsening_single_resampling(a, info, collection, refinement):
if info[6] != 'cells':
# todo: appropriate refinement of data for other indexable elements
return
# todo: dividing up of values when needed, eg. volumes, areas, lengths
assert tuple(a.shape) == tuple(refinement.coarse_extent_kji)
assert collection.support is not None and tuple(collection.support.extent_kji) == tuple(refinement.fine_extent_kji)
k_ratio_vector = refinement.coarse_for_fine_axial_vector(0)
a_refined_k = np.empty(
(refinement.fine_extent_kji[0], refinement.coarse_extent_kji[1], refinement.coarse_extent_kji[2]),
dtype = a.dtype)
a_refined_k[:, :, :] = a[k_ratio_vector, :, :]
j_ratio_vector = refinement.coarse_for_fine_axial_vector(1)
a_refined_kj = np.empty(
(refinement.fine_extent_kji[0], refinement.fine_extent_kji[1], refinement.coarse_extent_kji[2]),
dtype = a.dtype)
a_refined_kj[:, :, :] = a_refined_k[:, j_ratio_vector, :]
i_ratio_vector = refinement.coarse_for_fine_axial_vector(2)
a = np.empty(tuple(refinement.fine_extent_kji), dtype = a.dtype)
a[:, :, :] = a_refined_kj[:, :, i_ratio_vector]
# for cell length properties, scale down the values in accordance with refinement
if info[4] and info[7] == 'cell length' and info[8] == 'direction' and info[5] == 1:
a = _extend_imported_no_coarsening_single_resampling_length(a, info, refinement)
return a
def _extend_imported_no_coarsening_single_resampling_length(a, info, refinement):
dir_ch = info[9].upper()
log.debug(f'refining cell lengths for axis {dir_ch}')
if dir_ch == 'K':
a *= refinement.proportions_for_axis(0).reshape((-1, 1, 1))
elif dir_ch == 'J':
a *= refinement.proportions_for_axis(1).reshape((1, -1, 1))
elif dir_ch == 'I':
a *= refinement.proportions_for_axis(2).reshape((1, 1, -1))
return a
| [
"logging.getLogger",
"numpy.count_nonzero",
"numpy.array",
"resqpy.olio.xml_et.citation_title_for_node",
"resqpy.olio.box_utilities.extent_of_box",
"numpy.arange",
"resqpy.olio.load_data.load_array_from_file",
"resqpy.olio.xml_et.simplified_data_type",
"numpy.where",
"resqpy.olio.ab_toolbox.load_a... | [((165, 192), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (182, 192), False, 'import logging\n'), ((48495, 48746), 'numpy.nansum', 'np.nansum', (['(a[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1,\n cell_box[0, 2]:cell_box[1, 2] + 1] * fine_weight[cell_box[0, 0]:\n cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1, cell_box[0, 2]:\n cell_box[1, 2] + 1])'], {}), '(a[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1\n ] + 1, cell_box[0, 2]:cell_box[1, 2] + 1] * fine_weight[cell_box[0, 0]:\n cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1, cell_box[0, 2]:\n cell_box[1, 2] + 1])\n', (48504, 48746), True, 'import numpy as np\n'), ((65118, 65244), 'numpy.empty', 'np.empty', (['(refinement.fine_extent_kji[0], refinement.coarse_extent_kji[1], refinement\n .coarse_extent_kji[2])'], {'dtype': 'a.dtype'}), '((refinement.fine_extent_kji[0], refinement.coarse_extent_kji[1],\n refinement.coarse_extent_kji[2]), dtype=a.dtype)\n', (65126, 65244), True, 'import numpy as np\n'), ((65394, 65518), 'numpy.empty', 'np.empty', (['(refinement.fine_extent_kji[0], refinement.fine_extent_kji[1], refinement.\n coarse_extent_kji[2])'], {'dtype': 'a.dtype'}), '((refinement.fine_extent_kji[0], refinement.fine_extent_kji[1],\n refinement.coarse_extent_kji[2]), dtype=a.dtype)\n', (65402, 65518), True, 'import numpy as np\n'), ((2620, 2665), 'numpy.array', 'np.array', (['[[0, 1], [2, 4], [5, 3]]'], {'dtype': 'int'}), '([[0, 1], [2, 4], [5, 3]], dtype=int)\n', (2628, 2665), True, 'import numpy as np\n'), ((2915, 2984), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [2, 1], [1, 1], [2, 0]]'], {'dtype': 'int'}), '([[0, 0], [0, 1], [1, 0], [2, 1], [1, 1], [2, 0]], dtype=int)\n', (2923, 2984), True, 'import numpy as np\n'), ((48091, 48154), 'numpy.where', 'np.where', (['mask', 'zero_weight_result', '(a_coarsened / coarse_weight)'], {}), '(mask, zero_weight_result, a_coarsened / coarse_weight)\n', (48099, 48154), True, 'import numpy as np\n'), ((48816, 48948), 'numpy.nansum', 'np.nansum', (['fine_weight[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1\n ] + 1, cell_box[0, 2]:cell_box[1, 2] + 1]'], {}), '(fine_weight[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:\n cell_box[1, 1] + 1, cell_box[0, 2]:cell_box[1, 2] + 1])\n', (48825, 48948), True, 'import numpy as np\n'), ((50261, 50302), 'resqpy.olio.box_utilities.valid_box', 'bxu.valid_box', (['box', 'other.grid.extent_kji'], {}), '(box, other.grid.extent_kji)\n', (50274, 50302), True, 'import resqpy.olio.box_utilities as bxu\n'), ((12233, 12374), 'resqpy.olio.load_data.load_array_from_file', 'ld.load_array_from_file', (['file_name', 'extent_kji'], {'data_type': 'data_type', 'comment_char': '"""!"""', 'data_free_of_comments': '(False)', 'use_binary': 'use_binary'}), "(file_name, extent_kji, data_type=data_type,\n comment_char='!', data_free_of_comments=False, use_binary=use_binary)\n", (12256, 12374), True, 'import resqpy.olio.load_data as ld\n'), ((24229, 24300), 'resqpy.olio.ab_toolbox.load_array_from_ab_file', 'abt.load_array_from_ab_file', (['file_name', 'extent_kji'], {'return_64_bit': '(False)'}), '(file_name, extent_kji, return_64_bit=False)\n', (24256, 24300), True, 'import resqpy.olio.ab_toolbox as abt\n'), ((28585, 28601), 'numpy.any', 'np.any', (['kid_mask'], {}), '(kid_mask)\n', (28591, 28601), True, 'import numpy as np\n'), ((30851, 30877), 'numpy.unique', 'np.unique', (['decoarsen_array'], {}), '(decoarsen_array)\n', (30860, 30877), True, 'import numpy as np\n'), ((35232, 35268), 'numpy.count_nonzero', 'np.count_nonzero', (['self.grid.inactive'], {}), '(self.grid.inactive)\n', (35248, 35268), True, 'import numpy as np\n'), ((35403, 35439), 'numpy.count_nonzero', 'np.count_nonzero', (['self.grid.inactive'], {}), '(self.grid.inactive)\n', (35419, 35439), True, 'import numpy as np\n'), ((41748, 41778), 'os.path.join', 'os.path.join', (['directory', 'fname'], {}), '(directory, fname)\n', (41760, 41778), False, 'import os\n'), ((48022, 48045), 'numpy.isnan', 'np.isnan', (['coarse_weight'], {}), '(coarse_weight)\n', (48030, 48045), True, 'import numpy as np\n'), ((48994, 49010), 'numpy.isnan', 'np.isnan', (['weight'], {}), '(weight)\n', (49002, 49010), True, 'import numpy as np\n'), ((28747, 28791), 'numpy.full', 'np.full', (['self.grid.extent_kji', '(-1)'], {'dtype': 'int'}), '(self.grid.extent_kji, -1, dtype=int)\n', (28754, 28791), True, 'import numpy as np\n'), ((28820, 28861), 'numpy.zeros', 'np.zeros', (['self.grid.extent_kji'], {'dtype': 'int'}), '(self.grid.extent_kji, dtype=int)\n', (28828, 28861), True, 'import numpy as np\n'), ((28890, 28931), 'numpy.zeros', 'np.zeros', (['self.grid.extent_kji'], {'dtype': 'int'}), '(self.grid.extent_kji, dtype=int)\n', (28898, 28931), True, 'import numpy as np\n'), ((28960, 29001), 'numpy.zeros', 'np.zeros', (['self.grid.extent_kji'], {'dtype': 'int'}), '(self.grid.extent_kji, dtype=int)\n', (28968, 29001), True, 'import numpy as np\n'), ((30693, 30721), 'numpy.all', 'np.all', (['(decoarsen_array >= 0)'], {}), '(decoarsen_array >= 0)\n', (30699, 30721), True, 'import numpy as np\n'), ((31080, 31112), 'numpy.arange', 'np.arange', (['cell_count'], {'dtype': 'int'}), '(cell_count, dtype=int)\n', (31089, 31112), True, 'import numpy as np\n'), ((31555, 31596), 'numpy.zeros', 'np.zeros', (['self.grid.extent_kji'], {'dtype': 'int'}), '(self.grid.extent_kji, dtype=int)\n', (31563, 31596), True, 'import numpy as np\n'), ((31625, 31666), 'numpy.zeros', 'np.zeros', (['self.grid.extent_kji'], {'dtype': 'int'}), '(self.grid.extent_kji, dtype=int)\n', (31633, 31666), True, 'import numpy as np\n'), ((31695, 31736), 'numpy.zeros', 'np.zeros', (['self.grid.extent_kji'], {'dtype': 'int'}), '(self.grid.extent_kji, dtype=int)\n', (31703, 31736), True, 'import numpy as np\n'), ((32847, 32866), 'numpy.all', 'np.all', (['(k_share > 0)'], {}), '(k_share > 0)\n', (32853, 32866), True, 'import numpy as np\n'), ((32871, 32890), 'numpy.all', 'np.all', (['(j_share > 0)'], {}), '(j_share > 0)\n', (32877, 32890), True, 'import numpy as np\n'), ((32895, 32914), 'numpy.all', 'np.all', (['(i_share > 0)'], {}), '(i_share > 0)\n', (32901, 32914), True, 'import numpy as np\n'), ((38841, 38883), 'resqpy.olio.xml_et.simplified_data_type', 'rqet.simplified_data_type', (['array_ref.dtype'], {}), '(array_ref.dtype)\n', (38866, 38883), True, 'import resqpy.olio.xml_et as rqet\n'), ((46527, 46649), 'numpy.nansum', 'np.nansum', (['a[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1] + 1,\n cell_box[0, 2]:cell_box[1, 2] + 1]'], {}), '(a[cell_box[0, 0]:cell_box[1, 0] + 1, cell_box[0, 1]:cell_box[1, 1\n ] + 1, cell_box[0, 2]:cell_box[1, 2] + 1])\n', (46536, 46649), True, 'import numpy as np\n'), ((50363, 50385), 'resqpy.olio.box_utilities.extent_of_box', 'bxu.extent_of_box', (['box'], {}), '(box)\n', (50380, 50385), True, 'import resqpy.olio.box_utilities as bxu\n'), ((2048, 2087), 'resqpy.olio.xml_et.citation_title_for_node', 'rqet.citation_title_for_node', (['grid.root'], {}), '(grid.root)\n', (2076, 2087), True, 'import resqpy.olio.xml_et as rqet\n'), ((50488, 50510), 'resqpy.olio.box_utilities.extent_of_box', 'bxu.extent_of_box', (['box'], {}), '(box)\n', (50505, 50510), True, 'import resqpy.olio.box_utilities as bxu\n'), ((28632, 28658), 'numpy.count_nonzero', 'np.count_nonzero', (['kid_mask'], {}), '(kid_mask)\n', (28648, 28658), True, 'import numpy as np\n')] |
#!/usr/bin/env/python
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import csv
#Coloque aquí el tipo de estrella con el que se va a trabajar. OPCIONES= 'Cefeida', 'RR_Lyrae', 'BinariaECL'.
tipo_estrella='RR_Lyrae';
#Importar los números de las estrellas desde el archivo csv:
ID_estrellas=np.loadtxt('numero_estrellas.csv',delimiter=',',dtype='str', skiprows=1);
vecCep=ID_estrellas[:,0];
vecRRLyr=ID_estrellas[:,1];
vecECL=ID_estrellas[:,2];
destinoMax='Parte1/fnpeaks/'
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
if tipo_estrella=='Cefeida' or tipo_estrella==1:
label_path='Datos/'+'1_Cefeidas'+'/I/OGLE-LMC-CEP-';
nombre_res=destinoMax+'Cefeidas/OGLE-LMC-CEP-';
numero_estrella=vecCep;
elif tipo_estrella=='RR_Lyrae' or tipo_estrella==2:
label_path='Datos/'+'2_RR_Lyrae'+'/I/OGLE-LMC-RRLYR-';
nombre_res=destinoMax+'RR_Lyrae/OGLE-LMC-RRLYR-';
numero_estrella=vecRRLyr;
else:
label_path='Datos/'+'3_BinariasEclipsantes'+'/I/OGLE-LMC-ECL-';
nombre_res=destinoMax+'ECL/OGLE-LMC-ECL-';
numero_estrella=vecECL;
#fin if
extension='.dat';
extensionMax='.max';
vint=np.vectorize(np.int);
def transformar_ts(tp,t0,t_datos):
phi_raw=(1/tp)*(t_datos-t0);
phi=phi_raw-vint(phi_raw);
return phi;
# fin transformar phis
N_mult=np.genfromtxt("IndicesMult.csv", delimiter=",", skip_header=1);
N_mult_fnP=N_mult[:,1];
lista_estrellas=[];
lista_periodos=[];
for k in range(len(numero_estrella)):
elSeniorArchivoMax=nombre_res+numero_estrella[k]+extensionMax;
dat_tp=np.genfromtxt(elSeniorArchivoMax,delimiter=' ',skip_header=9, usecols=2);
periodos=dat_tp;
if tipo_estrella=='BinariaECL':
tp_estrella=N_mult_fnP[k]*periodos[0];
if numero_estrella[k]=='01729':
tp_estrella=periodos[3];
elif tipo_estrella=='RR_Lyrae' and numero_estrella[k]=='00573':
tp_estrella=periodos[1];
else:
tp_estrella=periodos[0];
#fin if
elSeniorArchivo=label_path+numero_estrella[k]+extension;
datos=np.genfromtxt(elSeniorArchivo,delimiter=' ');
t_dat=datos[:,0];
us=datos[:,1];
tini=t_dat[0];
phis=transformar_ts(tp_estrella,t_dat[0],t_dat);
phis1=phis+1;
phisG=np.r_[phis,phis1];
usG=np.r_[us,us];
fig1=plt.figure();
ax1=fig1.add_subplot(111);
ax1.scatter(phisG,usG);
ax1.set_ylim(ax1.get_ylim()[::-1]);
ax1.set_xlabel("fase");
ax1.set_ylabel("Magnitud");
nombreFoto2="fnP_Curva_de_luz_de_"+tipo_estrella+"-"+numero_estrella[k]+".png";
plt.savefig(nombreFoto2);
plt.close(fig1);
LabelEstrella="Estella_"+tipo_estrella+"_"+numero_estrella[k]+"_fnP";
lista_estrellas.append(LabelEstrella);
lista_periodos.append(tp_estrella);
#fin for
lista_estrellas=np.array(lista_estrellas);
lista_periodos=np.array(lista_periodos);
datos_exportacion=np.c_[lista_estrellas,lista_periodos];
encabezado=['Nombre_estrella','Periodo_fnP[dias]'];
with open('periodos_hallados_fnP.csv', 'w', encoding='UTF8', newline='') as f:
writer=csv.writer(f);
writer.writerow(encabezado);
writer.writerows(datos_exportacion);
#fin with
| [
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"csv.writer",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"numpy.vectorize"
] | [((319, 393), 'numpy.loadtxt', 'np.loadtxt', (['"""numero_estrellas.csv"""'], {'delimiter': '""","""', 'dtype': '"""str"""', 'skiprows': '(1)'}), "('numero_estrellas.csv', delimiter=',', dtype='str', skiprows=1)\n", (329, 393), True, 'import numpy as np\n'), ((1251, 1271), 'numpy.vectorize', 'np.vectorize', (['np.int'], {}), '(np.int)\n', (1263, 1271), True, 'import numpy as np\n'), ((1410, 1472), 'numpy.genfromtxt', 'np.genfromtxt', (['"""IndicesMult.csv"""'], {'delimiter': '""","""', 'skip_header': '(1)'}), "('IndicesMult.csv', delimiter=',', skip_header=1)\n", (1423, 1472), True, 'import numpy as np\n'), ((2752, 2777), 'numpy.array', 'np.array', (['lista_estrellas'], {}), '(lista_estrellas)\n', (2760, 2777), True, 'import numpy as np\n'), ((2794, 2818), 'numpy.array', 'np.array', (['lista_periodos'], {}), '(lista_periodos)\n', (2802, 2818), True, 'import numpy as np\n'), ((1648, 1725), 'numpy.genfromtxt', 'np.genfromtxt', (['elSeniorArchivoMax'], {'delimiter': '""" """', 'skip_header': '(9)', 'usecols': '(2)'}), "(elSeniorArchivoMax, delimiter=' ', skip_header=9, usecols=2)\n", (1661, 1725), True, 'import numpy as np\n'), ((2080, 2125), 'numpy.genfromtxt', 'np.genfromtxt', (['elSeniorArchivo'], {'delimiter': '""" """'}), "(elSeniorArchivo, delimiter=' ')\n", (2093, 2125), True, 'import numpy as np\n'), ((2293, 2305), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2303, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2533, 2557), 'matplotlib.pyplot.savefig', 'plt.savefig', (['nombreFoto2'], {}), '(nombreFoto2)\n', (2544, 2557), True, 'import matplotlib.pyplot as plt\n'), ((2560, 2575), 'matplotlib.pyplot.close', 'plt.close', (['fig1'], {}), '(fig1)\n', (2569, 2575), True, 'import matplotlib.pyplot as plt\n'), ((3017, 3030), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3027, 3030), False, 'import csv\n')] |
import json
import os
import subprocess
import unittest
from shutil import rmtree
from sys import platform
import numpy as np
import pandas as pd
from elf.io import open_file
from pybdv.util import get_key
from mobie import add_image
from mobie.validation import validate_source_metadata
from mobie.metadata import read_dataset_metadata
class TestSegmentation(unittest.TestCase):
test_folder = './test-folder'
root = './test-folder/data'
shape = (128, 128, 128)
dataset_name = 'test'
def init_dataset(self):
data_path = os.path.join(self.test_folder, 'data.h5')
data_key = 'data'
with open_file(data_path, 'a') as f:
f.create_dataset(data_key, data=np.random.rand(*self.shape))
tmp_folder = os.path.join(self.test_folder, 'tmp-init')
raw_name = 'test-raw'
scales = [[2, 2, 2]]
add_image(data_path, data_key, self.root, self.dataset_name, raw_name,
resolution=(1, 1, 1), chunks=(64, 64, 64), scale_factors=scales,
tmp_folder=tmp_folder)
def setUp(self):
os.makedirs(self.test_folder, exist_ok=True)
self.init_dataset()
self.seg_path = os.path.join(self.test_folder, 'seg.h5')
self.seg_key = 'seg'
self.data = np.random.randint(0, 100, size=self.shape)
with open_file(self.seg_path, 'a') as f:
f.create_dataset(self.seg_key, data=self.data)
def tearDown(self):
try:
rmtree(self.test_folder)
except OSError:
pass
def check_segmentation(self, dataset_folder, name):
self.assertTrue(os.path.exists(dataset_folder))
exp_data = self.data
# check the segmentation metadata
metadata = read_dataset_metadata(dataset_folder)
self.assertIn(name, metadata['sources'])
validate_source_metadata(name, metadata['sources'][name], dataset_folder)
# check the segmentation data
seg_path = os.path.join(dataset_folder, 'images', 'bdv-n5', f'{name}.n5')
self.assertTrue(os.path.exists(seg_path))
key = get_key(False, 0, 0, 0)
with open_file(seg_path, 'r') as f:
data = f[key][:]
self.assertTrue(np.array_equal(data, exp_data))
# check the table
table_path = os.path.join(dataset_folder, 'tables', name, 'default.tsv')
self.assertTrue(os.path.exists(table_path)), table_path
table = pd.read_csv(table_path, sep='\t')
label_ids = table['label_id'].values
exp_label_ids = np.unique(data)
if 0 in exp_label_ids:
exp_label_ids = exp_label_ids[1:]
self.assertTrue(np.array_equal(label_ids, exp_label_ids))
def test_add_segmentation(self):
from mobie import add_segmentation
dataset_folder = os.path.join(self.root, self.dataset_name)
seg_name = 'seg'
tmp_folder = os.path.join(self.test_folder, 'tmp-seg')
scales = [[2, 2, 2]]
add_segmentation(self.seg_path, self.seg_key,
self.root, self.dataset_name, seg_name,
resolution=(1, 1, 1), scale_factors=scales,
chunks=(64, 64, 64), tmp_folder=tmp_folder)
self.check_segmentation(dataset_folder, seg_name)
@unittest.skipIf(platform == "win32", "CLI does not work on windows")
def test_cli(self):
seg_name = 'seg'
resolution = json.dumps([1., 1., 1.])
scales = json.dumps([[2, 2, 2]])
chunks = json.dumps([64, 64, 64])
tmp_folder = os.path.join(self.test_folder, 'tmp-seg')
cmd = ['mobie.add_segmentation',
'--input_path', self.seg_path,
'--input_key', self.seg_key,
'--root', self.root,
'--dataset_name', self.dataset_name,
'--name', seg_name,
'--resolution', resolution,
'--scale_factors', scales,
'--chunks', chunks,
'--tmp_folder', tmp_folder]
subprocess.run(cmd)
dataset_folder = os.path.join(self.root, self.dataset_name)
self.check_segmentation(dataset_folder, seg_name)
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.rand",
"pandas.read_csv",
"unittest.skipIf",
"mobie.validation.validate_source_metadata",
"unittest.main",
"os.path.exists",
"mobie.metadata.read_dataset_metadata",
"pybdv.util.get_key",
"json.dumps",
"subprocess.run",
"elf.io.open_file",
"numpy.unique",
"os.makedirs",
"mobie... | [((3303, 3371), 'unittest.skipIf', 'unittest.skipIf', (["(platform == 'win32')", '"""CLI does not work on windows"""'], {}), "(platform == 'win32', 'CLI does not work on windows')\n", (3318, 3371), False, 'import unittest\n'), ((4221, 4236), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4234, 4236), False, 'import unittest\n'), ((553, 594), 'os.path.join', 'os.path.join', (['self.test_folder', '"""data.h5"""'], {}), "(self.test_folder, 'data.h5')\n", (565, 594), False, 'import os\n'), ((761, 803), 'os.path.join', 'os.path.join', (['self.test_folder', '"""tmp-init"""'], {}), "(self.test_folder, 'tmp-init')\n", (773, 803), False, 'import os\n'), ((872, 1038), 'mobie.add_image', 'add_image', (['data_path', 'data_key', 'self.root', 'self.dataset_name', 'raw_name'], {'resolution': '(1, 1, 1)', 'chunks': '(64, 64, 64)', 'scale_factors': 'scales', 'tmp_folder': 'tmp_folder'}), '(data_path, data_key, self.root, self.dataset_name, raw_name,\n resolution=(1, 1, 1), chunks=(64, 64, 64), scale_factors=scales,\n tmp_folder=tmp_folder)\n', (881, 1038), False, 'from mobie import add_image\n'), ((1097, 1141), 'os.makedirs', 'os.makedirs', (['self.test_folder'], {'exist_ok': '(True)'}), '(self.test_folder, exist_ok=True)\n', (1108, 1141), False, 'import os\n'), ((1195, 1235), 'os.path.join', 'os.path.join', (['self.test_folder', '"""seg.h5"""'], {}), "(self.test_folder, 'seg.h5')\n", (1207, 1235), False, 'import os\n'), ((1285, 1327), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': 'self.shape'}), '(0, 100, size=self.shape)\n', (1302, 1327), True, 'import numpy as np\n'), ((1756, 1793), 'mobie.metadata.read_dataset_metadata', 'read_dataset_metadata', (['dataset_folder'], {}), '(dataset_folder)\n', (1777, 1793), False, 'from mobie.metadata import read_dataset_metadata\n'), ((1851, 1924), 'mobie.validation.validate_source_metadata', 'validate_source_metadata', (['name', "metadata['sources'][name]", 'dataset_folder'], {}), "(name, metadata['sources'][name], dataset_folder)\n", (1875, 1924), False, 'from mobie.validation import validate_source_metadata\n'), ((1983, 2045), 'os.path.join', 'os.path.join', (['dataset_folder', '"""images"""', '"""bdv-n5"""', 'f"""{name}.n5"""'], {}), "(dataset_folder, 'images', 'bdv-n5', f'{name}.n5')\n", (1995, 2045), False, 'import os\n'), ((2110, 2133), 'pybdv.util.get_key', 'get_key', (['(False)', '(0)', '(0)', '(0)'], {}), '(False, 0, 0, 0)\n', (2117, 2133), False, 'from pybdv.util import get_key\n'), ((2311, 2370), 'os.path.join', 'os.path.join', (['dataset_folder', '"""tables"""', 'name', '"""default.tsv"""'], {}), "(dataset_folder, 'tables', name, 'default.tsv')\n", (2323, 2370), False, 'import os\n'), ((2451, 2484), 'pandas.read_csv', 'pd.read_csv', (['table_path'], {'sep': '"""\t"""'}), "(table_path, sep='\\t')\n", (2462, 2484), True, 'import pandas as pd\n'), ((2555, 2570), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (2564, 2570), True, 'import numpy as np\n'), ((2820, 2862), 'os.path.join', 'os.path.join', (['self.root', 'self.dataset_name'], {}), '(self.root, self.dataset_name)\n', (2832, 2862), False, 'import os\n'), ((2910, 2951), 'os.path.join', 'os.path.join', (['self.test_folder', '"""tmp-seg"""'], {}), "(self.test_folder, 'tmp-seg')\n", (2922, 2951), False, 'import os\n'), ((2990, 3172), 'mobie.add_segmentation', 'add_segmentation', (['self.seg_path', 'self.seg_key', 'self.root', 'self.dataset_name', 'seg_name'], {'resolution': '(1, 1, 1)', 'scale_factors': 'scales', 'chunks': '(64, 64, 64)', 'tmp_folder': 'tmp_folder'}), '(self.seg_path, self.seg_key, self.root, self.dataset_name,\n seg_name, resolution=(1, 1, 1), scale_factors=scales, chunks=(64, 64, \n 64), tmp_folder=tmp_folder)\n', (3006, 3172), False, 'from mobie import add_segmentation\n'), ((3443, 3470), 'json.dumps', 'json.dumps', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (3453, 3470), False, 'import json\n'), ((3485, 3508), 'json.dumps', 'json.dumps', (['[[2, 2, 2]]'], {}), '([[2, 2, 2]])\n', (3495, 3508), False, 'import json\n'), ((3526, 3550), 'json.dumps', 'json.dumps', (['[64, 64, 64]'], {}), '([64, 64, 64])\n', (3536, 3550), False, 'import json\n'), ((3573, 3614), 'os.path.join', 'os.path.join', (['self.test_folder', '"""tmp-seg"""'], {}), "(self.test_folder, 'tmp-seg')\n", (3585, 3614), False, 'import os\n'), ((4041, 4060), 'subprocess.run', 'subprocess.run', (['cmd'], {}), '(cmd)\n', (4055, 4060), False, 'import subprocess\n'), ((4087, 4129), 'os.path.join', 'os.path.join', (['self.root', 'self.dataset_name'], {}), '(self.root, self.dataset_name)\n', (4099, 4129), False, 'import os\n'), ((634, 659), 'elf.io.open_file', 'open_file', (['data_path', '"""a"""'], {}), "(data_path, 'a')\n", (643, 659), False, 'from elf.io import open_file\n'), ((1341, 1370), 'elf.io.open_file', 'open_file', (['self.seg_path', '"""a"""'], {}), "(self.seg_path, 'a')\n", (1350, 1370), False, 'from elf.io import open_file\n'), ((1486, 1510), 'shutil.rmtree', 'rmtree', (['self.test_folder'], {}), '(self.test_folder)\n', (1492, 1510), False, 'from shutil import rmtree\n'), ((1633, 1663), 'os.path.exists', 'os.path.exists', (['dataset_folder'], {}), '(dataset_folder)\n', (1647, 1663), False, 'import os\n'), ((2070, 2094), 'os.path.exists', 'os.path.exists', (['seg_path'], {}), '(seg_path)\n', (2084, 2094), False, 'import os\n'), ((2147, 2171), 'elf.io.open_file', 'open_file', (['seg_path', '"""r"""'], {}), "(seg_path, 'r')\n", (2156, 2171), False, 'from elf.io import open_file\n'), ((2231, 2261), 'numpy.array_equal', 'np.array_equal', (['data', 'exp_data'], {}), '(data, exp_data)\n', (2245, 2261), True, 'import numpy as np\n'), ((2672, 2712), 'numpy.array_equal', 'np.array_equal', (['label_ids', 'exp_label_ids'], {}), '(label_ids, exp_label_ids)\n', (2686, 2712), True, 'import numpy as np\n'), ((2395, 2421), 'os.path.exists', 'os.path.exists', (['table_path'], {}), '(table_path)\n', (2409, 2421), False, 'import os\n'), ((710, 737), 'numpy.random.rand', 'np.random.rand', (['*self.shape'], {}), '(*self.shape)\n', (724, 737), True, 'import numpy as np\n')] |
"""Functions to create and plot outlier scores (or other) in a fixed bounded range. Intended to use to
show the results of an outlier algorithm in a user friendly UI"""
import numpy as np
def make_linear_part(max_score, min_score):
"""
:param bottom: the proportion of the graph used for the bottom "sigmoid"
:param middle: the proportion of the graph used for the middle linear part
:param top: the proportion of the graph used for the top "sigmoid"
:param max_score: the maximum score seen on train
:param min_score: the minimum score seen on train
:return: the linear part of the ui score mapping
"""
slope = 1 / (max_score - min_score)
def linear_part(x):
return x * slope + 1 - slope * min_score
return linear_part
def make_top_part(base, max_score, min_score):
"""
The base has to be between 0 and 1, strictly.
The function will be of the form -base ** (-x + t) + C, where t and C
are the two constants to solve for. The constraints are continuity and
smoothness at max_score when pieced with the linear part
"""
slope = 1 / (max_score - min_score)
t = np.log(slope / np.log(base)) / np.log(base) + max_score
# at the limit when x->inf, the function will approach c
c = 2 + base ** (-max_score + t)
def top_part(x):
return -(base ** (-x + t)) + c
return top_part, c
def make_bottom_part(base, max_score, min_score):
"""
The base has to be between 0 and 1, strictly.
The function will be of the form -base ** (-x + t) + C, where t and C
are the two constants to solve for. The constraints are continuity and
smoothness at max_score when pieced with the linear part
"""
slope = 1 / (max_score - min_score)
t = np.log(slope / np.log(base)) / np.log(base) - min_score
# at the limit when x->-inf, the function will approach c
c = 1 - base ** (min_score + t)
def bottom_part(x):
return base ** (x + t) + c
return bottom_part, c
def make_ui_score_mapping(
min_lin_score, max_lin_score, top_base=2, bottom_base=2, max_score=10, reverse=False
):
"""
Plot a sigmoid function to map outlier scores to (by default) the range (0, 10)
The function is not only continuous but also smooth and the radius of the corners are controlled by the floats
top_base and bottom_base
:param min_lin_score: float, the minimum scores which is map with a linear function
:param max_lin_score: float, the maximum scores which is map with a linear function
:param top_base: float, the base of the exponential function on top of the linear part
:param bottom_base: float, the base of the exponential function on the bottom of the linear part
:param max_score: float, the upper bound of the function
:param reverse: boolean, whether to mirror the function along its center
:return: a mapping, sigmoid like
------------------------ Example of use: ---------------------------
from oplot.ui_scores_mapping import make_ui_score_mapping
import numpy as np
import matplotlib,pyplot as plt
sigmoid_map = make_ui_score_mapping(min_lin_score=1,
max_lin_score=9,
top_base=2,
bottom_base=2,
max_score=10)
x = np.linspace(-5, 15, 100)
plt.plot(x, [sigmoid_map(i) for i in x])
"""
linear_part = make_linear_part(max_lin_score, min_lin_score)
bottom_part, min_ = make_bottom_part(bottom_base, max_lin_score, min_lin_score)
top_part, max_ = make_top_part(top_base, max_lin_score, min_lin_score)
if reverse:
def ui_score_mapping(x):
if x < min_lin_score:
return max_score - max_score * (bottom_part(x) - min_) / (max_ - min_)
if x > max_lin_score:
return max_score - max_score * (top_part(x) - min_) / (max_ - min_)
else:
return max_score - max_score * (linear_part(x) - min_) / (max_ - min_)
else:
def ui_score_mapping(x):
if x < min_lin_score:
return max_score * (bottom_part(x) - min_) / (max_ - min_)
if x > max_lin_score:
return max_score * (top_part(x) - min_) / (max_ - min_)
else:
return max_score * (linear_part(x) - min_) / (max_ - min_)
return ui_score_mapping
def between_percentiles_mean(scores, min_percentile=0.450, max_percentile=0.55):
"""
Get the mean of the scores between the specified percentiles
"""
import numpy
scores = numpy.array(scores)
sorted_scores = numpy.sort(scores)
high_scores = sorted_scores[
int(min_percentile * len(sorted_scores)) : int(
max_percentile * len(sorted_scores)
)
]
return numpy.mean(high_scores)
def tune_ui_map(
scores,
truth=None,
all_normal=True,
min_percentile_normal=0.25,
max_percentile_normal=0.75,
min_percentile_abnormal=0.25,
max_percentile_abnormal=0.75,
lower_base=10,
upper_base=10,
abnormal_fact=2,
):
"""
Construct a ui scores map spreading out the scores between 0 and 10, where high means normal. Scores is
an array of raw stroll scores. NOTE: it assumes large scores means abnormal, small means normal!! Need to adapt
otherwise.
LOWERING the default range for the normal scores from [0.25, 0.75] to say [0., 0.25] will DECREASE the average
quality score of normal sounds.
INCREASING the range for the abnormal scores from [0.25, 0.75] to say [0.5, 1.0] will DECREASE the average quality
score of abnormal sounds.
"""
scores = np.array(scores)
# we have examples of normal and abnormal
if truth is not None and len(set(truth)) == 2:
truth = np.array(truth)
median_normal = between_percentiles_mean(
scores[truth == 0],
min_percentile=min_percentile_normal,
max_percentile=max_percentile_normal,
)
median_abnormal = between_percentiles_mean(
scores[truth == 1],
min_percentile=min_percentile_abnormal,
max_percentile=max_percentile_abnormal,
)
# if not the scores are all normal
elif all_normal:
median_normal = between_percentiles_mean(
scores,
min_percentile=min_percentile_normal,
max_percentile=max_percentile_normal,
)
normal_large = between_percentiles_mean(
scores, min_percentile=0.9, max_percentile=1
)
# as an approximation of the median abnormal, we use the media
median_abnormal = normal_large * abnormal_fact
# probably never useful, in case all scores are from abnormal
else:
median_abnormal = between_percentiles_mean(
scores,
min_percentile=min_percentile_abnormal,
max_percentile=max_percentile_abnormal,
)
median_normal = median_abnormal / 10
return median_normal, median_abnormal, lower_base, upper_base
| [
"numpy.log",
"numpy.array",
"numpy.sort",
"numpy.mean"
] | [((4671, 4690), 'numpy.array', 'numpy.array', (['scores'], {}), '(scores)\n', (4682, 4690), False, 'import numpy\n'), ((4711, 4729), 'numpy.sort', 'numpy.sort', (['scores'], {}), '(scores)\n', (4721, 4729), False, 'import numpy\n'), ((4894, 4917), 'numpy.mean', 'numpy.mean', (['high_scores'], {}), '(high_scores)\n', (4904, 4917), False, 'import numpy\n'), ((5751, 5767), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (5759, 5767), True, 'import numpy as np\n'), ((5881, 5896), 'numpy.array', 'np.array', (['truth'], {}), '(truth)\n', (5889, 5896), True, 'import numpy as np\n'), ((1182, 1194), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (1188, 1194), True, 'import numpy as np\n'), ((1798, 1810), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (1804, 1810), True, 'import numpy as np\n'), ((1166, 1178), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (1172, 1178), True, 'import numpy as np\n'), ((1782, 1794), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (1788, 1794), True, 'import numpy as np\n')] |
import csv
import os
import sys
import typing
import keras
import librosa
import numpy as np
sys.path.append(os.path.dirname(os.path.realpath(__file__))) # TODO(TK): replace this with a correct import when mevonai is a package
import bulkDiarize as bk
default_model_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'model', 'lstm_cnn_rectangular_lowdropout_trainedoncustomdata.h5')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
classes = ['Neutral', 'Happy', 'Sad', 'Angry', 'Fearful', 'Disgusted', 'Surprised']
class EmotionRecognizer:
def __init__(self, model_file: typing.Optional[str] = None):
if model_file is not None:
self._model = keras.models.load_model(model_file)
else:
self._model = keras.models.load_model(default_model_path)
self._classes = ('Neutral', 'Happy', 'Sad', 'Angry', 'Fearful', 'Disgusted', 'Surprised')
def predict_proba(
self,
audio_data: typing.Any, # TODO(TK): replace with np.typing.ArrayLike when numpy upgrades to 1.20+ (conditional on TensorFlow support)
sample_rate: int,
) -> typing.Dict[str, float]:
mfccs = librosa.feature.mfcc(y=audio_data, sr=sample_rate, n_mfcc=13)
result = np.zeros((13, 216))
result[:mfccs.shape[0], :mfccs.shape[1]] = mfccs
temp = np.zeros((1, 13, 216)) # np.expand_dims(result, axis=0)
temp[0] = result
t = np.expand_dims(temp, axis=3)
ans = self._model.predict(t).flatten()
return {emotion: prob for emotion, prob in zip(self._classes, ans)}
def predict(folder, classes, model):
solutions = []
filenames=[]
for subdir in os.listdir(folder):
# print(subdir)
lst = []
predictions=[]
# print("Sub",subdir)
filenames.append(subdir)
for file in os.listdir(f'{folder}{"/"}{subdir}'):
# print(subdir,"+",file)
temp = np.zeros((1,13,216))
X, sample_rate = librosa.load(os.path.join(f'{folder}{"/"}{subdir}{"/"}', file), res_type='kaiser_fast', duration=2.5, sr=22050*2, offset=0.5)
mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13)
result = np.zeros((13,216))
result[:mfccs.shape[0],:mfccs.shape[1]] = mfccs
temp[0] = result
t = np.expand_dims(temp,axis=3)
ans=model.predict(t).flatten()
if ans.shape[0] != len(classes):
raise RuntimeError("Unexpected number of classes encountered")
# print("SOL",classes[ans[0]])
predictions.append(classes[np.argmax(ans)])
if len(predictions) < 2:
predictions.append('None')
solutions.append(predictions)
return solutions,filenames
if __name__ == '__main__':
model = keras.models.load_model(default_model_path)
INPUT_FOLDER_PATH = "input/"
OUTPUT_FOLDER_PATH = "output/"
# bk.diarizeFromFolder(INPUT_FOLDER_PATH,OUTPUT_FOLDER_PATH)
for subdir in os.listdir(INPUT_FOLDER_PATH):
bk.diarizeFromFolder(f'{INPUT_FOLDER_PATH}{subdir}{"/"}',(f'{OUTPUT_FOLDER_PATH}{subdir}{"/"}'))
print("Diarized",subdir)
folder = OUTPUT_FOLDER_PATH
for subdir in os.listdir(folder):
predictions,filenames = predict(f'{folder}{"/"}{subdir}', classes, model)
# print("filename:",filenames,",Predictions:",predictions)
with open('SER_'+subdir+'.csv', 'w') as csvFile:
writer = csv.writer(csvFile)
for i in range(len(filenames)):
csvData = [filenames[i], 'person01',predictions[i][0],'person02',predictions[i][1]]
print("filename:",filenames[i],",Predicted Emotion := Person1:",predictions[i][0],",Person2:",predictions[i][1])
writer.writerow(csvData)
csvFile.close()
os.remove("filterTemp.wav")
| [
"os.listdir",
"keras.models.load_model",
"bulkDiarize.diarizeFromFolder",
"csv.writer",
"os.path.join",
"librosa.feature.mfcc",
"numpy.argmax",
"os.path.realpath",
"numpy.zeros",
"numpy.expand_dims",
"os.remove"
] | [((1679, 1697), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1689, 1697), False, 'import os\n'), ((2824, 2867), 'keras.models.load_model', 'keras.models.load_model', (['default_model_path'], {}), '(default_model_path)\n', (2847, 2867), False, 'import keras\n'), ((3019, 3048), 'os.listdir', 'os.listdir', (['INPUT_FOLDER_PATH'], {}), '(INPUT_FOLDER_PATH)\n', (3029, 3048), False, 'import os\n'), ((3239, 3257), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (3249, 3257), False, 'import os\n'), ((3848, 3875), 'os.remove', 'os.remove', (['"""filterTemp.wav"""'], {}), "('filterTemp.wav')\n", (3857, 3875), False, 'import os\n'), ((127, 153), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (143, 153), False, 'import os\n'), ((306, 332), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (322, 332), False, 'import os\n'), ((1169, 1230), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'audio_data', 'sr': 'sample_rate', 'n_mfcc': '(13)'}), '(y=audio_data, sr=sample_rate, n_mfcc=13)\n', (1189, 1230), False, 'import librosa\n'), ((1248, 1267), 'numpy.zeros', 'np.zeros', (['(13, 216)'], {}), '((13, 216))\n', (1256, 1267), True, 'import numpy as np\n'), ((1340, 1362), 'numpy.zeros', 'np.zeros', (['(1, 13, 216)'], {}), '((1, 13, 216))\n', (1348, 1362), True, 'import numpy as np\n'), ((1434, 1462), 'numpy.expand_dims', 'np.expand_dims', (['temp'], {'axis': '(3)'}), '(temp, axis=3)\n', (1448, 1462), True, 'import numpy as np\n'), ((1855, 1891), 'os.listdir', 'os.listdir', (['f"""{folder}{\'/\'}{subdir}"""'], {}), '(f"{folder}{\'/\'}{subdir}")\n', (1865, 1891), False, 'import os\n'), ((3058, 3157), 'bulkDiarize.diarizeFromFolder', 'bk.diarizeFromFolder', (['f"""{INPUT_FOLDER_PATH}{subdir}{\'/\'}"""', 'f"""{OUTPUT_FOLDER_PATH}{subdir}{\'/\'}"""'], {}), '(f"{INPUT_FOLDER_PATH}{subdir}{\'/\'}",\n f"{OUTPUT_FOLDER_PATH}{subdir}{\'/\'}")\n', (3078, 3157), True, 'import bulkDiarize as bk\n'), ((680, 715), 'keras.models.load_model', 'keras.models.load_model', (['model_file'], {}), '(model_file)\n', (703, 715), False, 'import keras\n'), ((756, 799), 'keras.models.load_model', 'keras.models.load_model', (['default_model_path'], {}), '(default_model_path)\n', (779, 799), False, 'import keras\n'), ((1949, 1971), 'numpy.zeros', 'np.zeros', (['(1, 13, 216)'], {}), '((1, 13, 216))\n', (1957, 1971), True, 'import numpy as np\n'), ((2145, 2197), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'X', 'sr': 'sample_rate', 'n_mfcc': '(13)'}), '(y=X, sr=sample_rate, n_mfcc=13)\n', (2165, 2197), False, 'import librosa\n'), ((2219, 2238), 'numpy.zeros', 'np.zeros', (['(13, 216)'], {}), '((13, 216))\n', (2227, 2238), True, 'import numpy as np\n'), ((2343, 2371), 'numpy.expand_dims', 'np.expand_dims', (['temp'], {'axis': '(3)'}), '(temp, axis=3)\n', (2357, 2371), True, 'import numpy as np\n'), ((3486, 3505), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (3496, 3505), False, 'import csv\n'), ((2012, 2061), 'os.path.join', 'os.path.join', (['f"""{folder}{\'/\'}{subdir}{\'/\'}"""', 'file'], {}), '(f"{folder}{\'/\'}{subdir}{\'/\'}", file)\n', (2024, 2061), False, 'import os\n'), ((2620, 2634), 'numpy.argmax', 'np.argmax', (['ans'], {}), '(ans)\n', (2629, 2634), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# loader.py
# Copyright (c) 2014-?, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The module contains a layer of functionality that allows
abstract saving and loading of files.
A loader class inherits from :class:`Loader`.
A singleton class :class:`LoaderSet` is the main public interface
of this module. available as a global variable ``LOADERS``.
It keeps track of all registered loaders and takes care
after them (presents them with options, requests etc.)
Loaders are registered as classes using the decorator :func:`loader`.
The concept is that there is one loader instance per file loaded.
When we want to save a file, we use a loading loader to provide data to
save and then we instantiate a saving loader (if needed) and save the data.
Individual loaders absolutely have to implement methods :meth:`Loader._save`
and :meth:`Loader._load2reg`.
This module facilitates integration of its functionality by defining
:func:`update_parser` and :func:`settle_loaders`. While the first one can
add capabilities to a parser (or parser group), the second one updates
``LOADERS`` accordingly while given parsed arguments.
Rough edges (but not rough enough to be worth the trouble):
* You can't force different loaders for image, template and output. If you
need this, you have to rely on autodetection based on file extension.
* Similarly, there is a problem with loader options --- they are shared among
all loaders. This is both a bug and a feature though.
* To show the loaders help, you have to satisfy the parser by specifying
a template and image file strings (they don't have to be real filenames tho).
"""
import sys
def _str2nptype(stri):
import numpy as np
msg = ("The string '%s' is supposed to correspond to a "
"numpy type" % stri)
try:
typ = getattr(np, stri)
except Exception as exc:
msg += " but it is not the case at all - %s." % str(exc)
raise ValueError(msg)
typestr = type(typ).__name__
# We allow mock object for people who know what they are doing.
if typestr not in ("type", "Mock"):
msg += " but it is a different animal than 'type': '%s'" % typestr
raise ValueError(msg)
return typ
def _str2flat(stri):
assert stri in "R,G,B,V".split(","), \
"Flat value has to be one of R, G, B, V, is '%s' instead" % stri
return stri
def flatten(image, char):
"""
Given a layered image (typically (y, x, RGB)), return a plain 2D image
(y, x) according to a spec.
Args:
image (np.ndarray): The image to flatten
char (char): One of (R, G, B, or V (=value))
Returns:
np.ndarray - The 2D image.
"""
if image.ndim < 3:
return image
char2idx = dict(R=0, G=1, B=2)
ret = None
if char == "V":
ret = image.mean(axis=2)
elif char in char2idx:
ret = image[:, :, char2idx[char]]
else:
# Shouldn't happen
assert False, "Unhandled - invalid flat spec '%s'" % char
return ret
class LoaderSet(object):
_LOADERS = []
# singleton-like functionality
_we = None
def __init__(self):
if LoaderSet._we is not None:
return
loaders = [loader() for loader in LoaderSet._LOADERS]
self.loader_dict = {}
for loader in loaders:
self.loader_dict[loader.name] = loader
self.loaders = sorted(loaders, key=lambda x: x.priority)
LoaderSet._we = self
def _choose_loader(self, fname):
"""
Use autodetection to select a loader to use.
Returns:
Loader instance or None if no loader can be used.
"""
for loader in self.loaders:
if loader.guessCanLoad(fname):
return loader
# Ouch, no loader available!
return None
def get_loader(self, fname, lname=None):
"""
Try to select a loader. Either we know what we want, or an
autodetection will take place.
Exceptions are raised when things go wrong.
"""
if lname is None:
ret = self._choose_loader(fname)
if ret is None:
msg = ("No loader wanted to load '%s' during autodetection"
% fname)
raise IOError(msg)
else:
ret = self._get_loader(lname)
# Make sure that we don't return the same instance multiple times
ret = ret.spawn()
return ret
def _get_loader(self, lname):
if lname not in self.loader_dict:
msg = "No loader named '%s'." % lname
msg += " Choose one of %s." % self.loader_dict.keys()
raise KeyError(msg)
return self.loader_dict(lname)
def get_loader_names(self):
"""
What are the names of loaders that we know.
"""
ret = self.loader_dict.keys()
return tuple(ret)
@classmethod
def add_loader(cls, loader_cls):
"""
Use this method (at early run-time) to register a loader
"""
cls._LOADERS.append(loader_cls)
def print_loader_help(self, lname=None):
"""
Print info about loaders.
Either print short summary about all loaders, or focus just on one.
"""
if lname is None:
msg = "Available loaders: %s\n" % (self.get_loader_names(),)
# Lowest priority first - they are usually the most general ones
for loader in self.loaders[::-1]:
msg += "\n\t%s: %s\n\tAccepts options: %s\n" % (
loader.name, loader.desc, tuple(loader.opts.keys()))
else:
loader = self.loader_dict[lname]
msg = "Loader '%s':\n" % loader.name
msg += "\t%s\n" % loader.desc
msg += "Accepts options:\n"
for opt in loader.opts:
msg += "\t'%s' (default '%s'): %s\n" % (
opt, loader.defaults[opt], loader.opts[opt], )
print(msg)
def distribute_opts(self, opts):
"""
Propagate loader options to all loaders.
"""
if opts is None:
# don't return, do something so possible problems surface.
opts = {}
for loader in self.loaders:
loader.setOpts(opts)
def loader_of(lname, priority):
"""
A decorator interconnecting an abstract loader with the rest of imreg_dft
It sets the "nickname" of the loader and its priority during autodetection
"""
def wrapped(cls):
cls.name = lname
cls.priority = priority
LoaderSet.add_loader(cls)
return cls
return wrapped
class Loader(object):
"""
.. automethod:: _save
.. automethod:: _load2reg
"""
name = None
priority = 10
desc = ""
opts = {}
defaults = {}
str2val = {}
def __init__(self):
self.loaded = None
self._opts = {}
# First run, the second will hopefully follow later
self.setOpts({})
# We may record some useful stuff for saving during loading
self.saveopts = {}
def spawn(self):
"""
Makes a new instance of the object's class
BUT it conserves vital data.
"""
cls = self.__class__
ret = cls()
# options passed on command-line
ret._opts = self._opts
return ret
def setOpts(self, options):
for opt in self.opts:
stri = options.get(opt, self.defaults[opt])
val = self.str2val.get(opt, lambda x: x)(stri)
self._opts[opt] = val
def guessCanLoad(self, fname):
"""
Guess whether we can load a filename just according to the name
(extension)
"""
return False
def load2reg(self, fname):
"""
Given a filename, it loads it and returns in a form suitable for
registration (i.e. float, flattened, ...).
"""
try:
ret = self._load2reg(fname)
except IOError as err:
print("Couldn't load '%s': %s" % (fname, err.strerror))
sys.exit(1)
return ret
def get2save(self):
assert self.loaded is not None, \
"Saving without loading beforehand, which is not supported. "
return self.loaded
def _load2reg(self, fname):
"""
To be implemented by derived class.
Load data from fname in a way that they can be used in the
registration process (so it is a 2D array).
Possibly take into account options passed upon the class creation.
"""
raise NotImplementedError("Use the derived class")
def _save(self, fname, tformed):
"""
To be implemented by derived class.
Save data to fname, possibly taking into account previous loads
and/or options passed upon the class creation.
"""
raise NotImplementedError("Use the derived class")
def save(self, fname, what, loader):
"""
Given the registration result, save the transformed input.
"""
sopts = loader.saveopts
self.saveopts.update(sopts)
self._save(fname, what)
@loader_of("mat", 10)
class _MatLoader(Loader):
desc = "Loader of .mat (MATLAB v5) binary files"
opts = {"in": "The structure to load (empty => autodetect)",
"out": "The structure to save the result to (empty => the same "
"as the 'in'",
"type": "Name of the numpy data type for the output (such as "
"int, uint8 etc.)",
"flat": "How to flatten (the possibly RGB image) for the "
"registration. Values can be R, G, B or V (V for value - "
"a number proportional to average of R, G and B)",
}
defaults = {"in": "", "out": "", "type": "float", "flat": "V"}
str2val = {"type": _str2nptype, "flat": _str2flat}
def __init__(self):
super(_MatLoader, self).__init__()
# By default, we have not loaded anything
self.saveopts["loaded_all"] = {}
def _load2reg(self, fname):
from scipy import io
mat = io.loadmat(fname)
if self._opts["in"] == "":
valid = [key for key in mat if not key.startswith("_")]
if len(valid) != 1:
raise RuntimeError(
"You have to supply an input key, there is an ambiguity "
"of what to load, candidates are: %s" % (tuple(valid),))
else:
key = valid[0]
else:
key = self._opts["in"]
keys = mat.keys()
if key not in keys:
raise LookupError(
"You requested load of '{}', but you can only choose from"
" {}".format(key, tuple(keys)))
ret = mat[key]
self.saveopts["loaded_all"] = mat
self.saveopts["key"] = key
self.loaded = ret
# flattening is a no-op on 2D images
ret = flatten(ret, self._opts["flat"])
return ret
def _save(self, fname, tformed):
from scipy import io
if self._opts["out"] == "":
assert "key" in self.saveopts, \
"Don't know how to save the output - what .mat struct?"
key = self.saveopts["key"]
else:
key = self._opts["out"]
out = self.saveopts["loaded_all"]
out[key] = tformed.astype(self._opts["type"])
io.savemat(fname, out)
def guessCanLoad(self, fname):
return fname.endswith(".mat")
@loader_of("pil", 50)
class _PILLoader(Loader):
desc = "Loader of image formats that Pillow (or PIL) can support"
opts = {"flat": _MatLoader.opts["flat"]}
defaults = {"flat": _MatLoader.defaults["flat"]}
str2val = {"flat": _MatLoader.str2val["flat"]}
def __init__(self):
super(_PILLoader, self).__init__()
def _load2reg(self, fname):
# from scipy import misc
# loaded = misc.imread(fname)
import imageio
loaded = imageio.imread(fname)
self.loaded = loaded
ret = loaded
# flattening is a no-op on 2D images
ret = flatten(ret, self._opts["flat"])
return ret
def _save(self, fname, tformed):
from scipy import misc
img = misc.toimage(tformed)
img.save(fname)
def guessCanLoad(self, fname):
"We think that we can do everything"
return True
@loader_of("hdr", 10)
class _HDRLoader(Loader):
desc = ("Loader of .hdr and .img binary files. Supply the '.hdr' as input,"
"a '.img' with the same basename is expected.")
opts = {"norm": "Whether to divide the value by 255.0 (0 for not to)"}
defaults = {"norm": "1"}
def __init__(self):
super(_HDRLoader, self).__init__()
def guessCanLoad(self, fname):
return fname.endswith(".hdr")
def _load2reg(self, fname):
"""Return image data from img&hdr uint8 files."""
import numpy as np
basename = fname.rstrip(".hdr")
with open(basename + '.hdr', 'r') as fh:
hdr = fh.readlines()
img = np.fromfile(basename + '.img', np.uint8, -1)
img.shape = int(hdr[4].split()[-1]), int(hdr[3].split()[-1])
if int(self._opts["norm"]):
img = img.astype(np.float64)
img /= 255.0
return img
def _save(self, fname, tformed):
import numpy as np
# Shouldn't happen, just to make sure
tformed[tformed > 1.0] = 1.0
tformed[tformed < 0.0] = 0.0
tformed *= 255.0
uint = tformed.astype(np.uint8)
uint.tofile(fname)
def _parse_opts(stri):
from argparse import ArgumentTypeError
components = stri.split(",")
ret = {}
for comp in components:
sides = comp.split("=")
if len(sides) != 2:
raise ArgumentTypeError(
"The options spec has to look like 'option=value', got %s."
% comp)
lhs, rhs = sides
valid_optname = False
for loader in LOADERS.loaders:
if lhs in loader.opts:
valid_optname = True
break
if not valid_optname:
raise ArgumentTypeError(
"The option '%s' is not understood by any loader" % lhs)
ret[lhs] = rhs
return ret
def update_parser(parser):
parser.add_argument(
"--loader", choices=LOADERS.get_loader_names(), default=None,
help="Force usage of a concrete loader (default is autodetection). "
"If you plan on using two types of loaders to load input, or save the"
" output, autodetection is the only way to achieve this.")
parser.add_argument(
"--loader-opts", default=None, type=_parse_opts,
help="Options for a loader "
"(use --loader to make sure that one is used or read the docs.)")
parser.add_argument(
"--help-loader", default=False, action="store_true",
help="Get help on all loaders or on the current loader "
"and its options.")
def settle_loaders(args, fnames=None):
"""
The function to be called as soon as args are parsed.
It:
#. If requested by passed args, it prints loaders help
and then exits the app
#. If filenames are supplied, it returns list of respective loaders.
Args:
args (namespace): The output of :func:`argparse.parse_args`
fnames (list, optional): List of filenames to load
Returns:
list - list of loaders to load respective fnames.
"""
if args.help_loader:
LOADERS.print_loader_help(args.loader)
sys.exit(0)
LOADERS.distribute_opts(args.loader_opts)
loaders = []
if fnames is not None:
for fname in fnames:
loader = LOADERS.get_loader(fname, args.loader)
loaders.append(loader)
return loaders
LOADERS = LoaderSet()
| [
"numpy.fromfile",
"scipy.io.savemat",
"scipy.io.loadmat",
"argparse.ArgumentTypeError",
"scipy.misc.toimage",
"sys.exit",
"imageio.imread"
] | [((11653, 11670), 'scipy.io.loadmat', 'io.loadmat', (['fname'], {}), '(fname)\n', (11663, 11670), False, 'from scipy import io\n'), ((12973, 12995), 'scipy.io.savemat', 'io.savemat', (['fname', 'out'], {}), '(fname, out)\n', (12983, 12995), False, 'from scipy import io\n'), ((13551, 13572), 'imageio.imread', 'imageio.imread', (['fname'], {}), '(fname)\n', (13565, 13572), False, 'import imageio\n'), ((13817, 13838), 'scipy.misc.toimage', 'misc.toimage', (['tformed'], {}), '(tformed)\n', (13829, 13838), False, 'from scipy import misc\n'), ((14654, 14698), 'numpy.fromfile', 'np.fromfile', (["(basename + '.img')", 'np.uint8', '(-1)'], {}), "(basename + '.img', np.uint8, -1)\n", (14665, 14698), True, 'import numpy as np\n'), ((17166, 17177), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (17174, 17177), False, 'import sys\n'), ((15386, 15476), 'argparse.ArgumentTypeError', 'ArgumentTypeError', (['("The options spec has to look like \'option=value\', got %s." % comp)'], {}), '(\n "The options spec has to look like \'option=value\', got %s." % comp)\n', (15403, 15476), False, 'from argparse import ArgumentTypeError\n'), ((15741, 15815), 'argparse.ArgumentTypeError', 'ArgumentTypeError', (['("The option \'%s\' is not understood by any loader" % lhs)'], {}), '("The option \'%s\' is not understood by any loader" % lhs)\n', (15758, 15815), False, 'from argparse import ArgumentTypeError\n'), ((9588, 9599), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9596, 9599), False, 'import sys\n')] |
__author__ = 'mason'
from domain_orderFulfillment import *
from timer import DURATION
from state import state
import numpy as np
'''
This is a randomly generated problem
'''
def GetCostOfMove(id, r, loc1, loc2, dist):
return 1 + dist
def GetCostOfLookup(id, item):
return max(1, np.random.beta(2, 2))
def GetCostOfWrap(id, orderName, m, item):
return max(1, np.random.normal(5, .5))
def GetCostOfPickup(id, r, item):
return max(1, np.random.normal(4, 1))
def GetCostOfPutdown(id, r, item):
return max(1, np.random.normal(4, 1))
def GetCostOfLoad(id, orderName, r, m, item):
return max(1, np.random.normal(3, .5))
DURATION.TIME = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
DURATION.COUNTER = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
rv.LOCATIONS = [0, 1, 2, 3, 4, 5, 200]
rv.FACTORY1 = frozenset({0, 1, 2, 3, 4, 5, 200})
rv.FACTORY_UNION = rv.FACTORY1
rv.SHIPPING_DOC = {rv.FACTORY1: 0}
rv.GROUND_EDGES = {0: [1, 5, 200, 2, 4], 1: [2, 4, 5, 0, 3, 200], 2: [0, 1, 4, 5], 3: [1], 4: [0, 1, 2, 5], 5: [1, 2, 0, 4], 200: [1, 0]}
rv.GROUND_WEIGHTS = {(0, 1): 6.499229647088665, (0, 5): 2.692119274311481, (0, 200): 6.2181264795712705, (0, 2): 7.121374187150064, (0, 4): 7.908766557240531, (1, 2): 10.484258297196071, (1, 4): 2.8722782433551934, (1, 5): 4.117098308924607, (1, 3): 7.129538742746605, (1, 200): 8.245597546318098, (2, 4): 9.026732394538875, (2, 5): 5.704832854262499, (4, 5): 11.599770968738499}
rv.ROBOTS = { 'r0': rv.FACTORY1, }
rv.ROBOT_CAPACITY = {'r0': 4.599413029987371}
rv.MACHINES = { 'm0': rv.FACTORY1, 'm1': rv.FACTORY1, 'm2': rv.FACTORY1, }
rv.PALLETS = { 'p0', 'p1', 'p2', }
def ResetState():
state.OBJECTS = { 'o0': True, 'o1': True, 'o2': True, 'o3': True, 'o4': True, 'o5': True, 'o6': True, }
state.OBJ_WEIGHT = {'o0': 4.599413029987371, 'o1': 4.599413029987371, 'o2': 3.4035481992311962, 'o3': 4.599413029987371, 'o4': 4.599413029987371, 'o5': 4.599413029987371, 'o6': 4.599413029987371}
state.OBJ_CLASS = {'type0': ['o0', 'o1'], 'type1': ['o2'], 'type2': ['o3', 'o4', 'o5', 'o6']}
state.loc = { 'r0': 1, 'm0': 5, 'm1': 4, 'm2': 1, 'p0': 2, 'p1': 1, 'p2': 200, 'o0': 200, 'o1': 3, 'o2': 4, 'o3': 200, 'o4': 0, 'o5': 200, 'o6': 0,}
state.load = { 'r0': NIL,}
state.busy = {'r0': False, 'm0': False, 'm1': False, 'm2': False}
state.numUses = {'m0': 10, 'm1': 13, 'm2': 9}
state.var1 = {'temp': 'r0', 'temp1': 'r0', 'temp2': 1, 'redoId': 0}
state.shouldRedo = {}
tasks = {
4: [['orderStart', ['type0']]],
6: [['orderStart', ['type0']]],
}
eventsEnv = {
} | [
"numpy.random.normal",
"numpy.random.beta"
] | [((291, 311), 'numpy.random.beta', 'np.random.beta', (['(2)', '(2)'], {}), '(2, 2)\n', (305, 311), True, 'import numpy as np\n'), ((375, 399), 'numpy.random.normal', 'np.random.normal', (['(5)', '(0.5)'], {}), '(5, 0.5)\n', (391, 399), True, 'import numpy as np\n'), ((453, 475), 'numpy.random.normal', 'np.random.normal', (['(4)', '(1)'], {}), '(4, 1)\n', (469, 475), True, 'import numpy as np\n'), ((531, 553), 'numpy.random.normal', 'np.random.normal', (['(4)', '(1)'], {}), '(4, 1)\n', (547, 553), True, 'import numpy as np\n'), ((620, 644), 'numpy.random.normal', 'np.random.normal', (['(3)', '(0.5)'], {}), '(3, 0.5)\n', (636, 644), True, 'import numpy as np\n')] |
import re
import os
import pandas as pd
import numpy as np
from .extract_tools import default_tokenizer as _default_tokenizer
def _getDictionnaryKeys(dictionnary):
"""
Function that get keys from a dict object and flatten sub dict.
"""
keys_array = []
for key in dictionnary.keys():
keys_array.append(key)
if (type(dictionnary[key]) == type({})):
keys_array = keys_array+_getDictionnaryKeys(dictionnary[key])
return(keys_array)
class pandasToBrat:
"""
Class for Pandas brat folder management.
For each brat folder, there is an instance of pandasToBrat.
It supports importation and exportation of configurations for relations and entities.
Documents importation and exportation.
Annotations and entities importation and exportation.
Inputs :
folder, str : path of brat folder
"""
def __init__(self, folder):
self.folder = folder
self.conf_file = 'annotation.conf'
self.emptyDFCols = {
"annotations":["id","type_id", "word", "label", "start", "end"],
"relations":["id","type_id","relation","Arg1","Arg2"]
}
# Adding '/' to folder path if missing
if(self.folder[-1] != '/'):
self.folder += '/'
# Creating folder if do not exist
if (os.path.isdir(self.folder)) == False:
os.mkdir(self.folder)
# Loading conf file if exists | creating empty conf file if not
self.read_conf()
def _emptyData(self):
fileList = self._getFileList()
nb_files = fileList.shape[0]
confirmation = input("Deleting all data ({} files), press y to confirm :".format(nb_files))
if confirmation == 'y':
fileList["filename"].apply(lambda x: os.remove(self.folder+x))
print("{} files deleted.".format(nb_files))
def _generateEntitiesStr (self, conf, data = '', level = 0):
if (type(conf) != type({})):
return data
# Parsing keys
for key in conf.keys():
value = conf[key]
if value == True:
data += '\n'+level*'\t'+key
elif value == False:
data += '\n'+level*'\t'+'!'+key
elif type(value) == type({}):
data += '\n'+level*'\t'+key
data = self._generateEntitiesStr(value, data, level+1)
return data
def _writeEntitiesLevel (self, conf, data, last_n = -1):
for n in range(last_n,len(conf)):
# If empty : pass, if not the last line : pass
if (conf[n] != '' and n > last_n):
level = len(conf[n].split("\t"))-1
if (n+1 <= len(conf)): # Level of next item
next_level = len(conf[n+1].split("\t"))-1
else:
next_level = level
splitted_str = conf[n].split("\t")
str_clean = splitted_str[len(splitted_str)-1]
if (level >= next_level): # On écrit les lignes de même niveau
if (str_clean[0] == '!'):
data[str_clean[1:]] = False
else:
data[str_clean] = True
if (level > next_level):
# On casse la boucle
break
elif (level < next_level): # On écrit les lignes inférieurs par récurence
splitted_str = conf[n].split("\t")
last_n, data[str_clean] = self._writeEntitiesLevel(conf, {}, n)
return(n, data)
def _readRelations(self, relations, entities = []):
data = {}
for relation in relations.split("\n"):
if relation != '':
relation_data = relation.split("\t")[0]
args = list(map(lambda x: x.split(":")[1], relation.split("\t")[1].split(", ")))
args_valid = list(filter(lambda x: x in entities, args))
if (len(args_valid) > 0):
data[relation_data] = {"args":args_valid}
return data
def _writeRelations(self, relations, entities = []):
data = ''
for relation in relations:
args_array = list(filter(lambda x: x in entities, relations[relation]["args"]))
if (len(args_array) > 0):
data += '\n'+relation+'\t'
for n in range(0, len(args_array)):
data += int(bool(n))*', '+'Arg'+str(n+1)+':'+args_array[n]
return data
def read_conf (self):
"""
Get the current Brat configuration.
Output :
Dict containing "entities" and "relations" configurations.
"""
if (os.path.isfile(self.folder+self.conf_file)):
# Reading file
file = open(self.folder+self.conf_file)
conf_str = file.read()
file.close()
# Splitting conf_str
conf_data = re.split(re.compile(r"\[[a-zA-Z]+\]", re.DOTALL), conf_str)[1:]
data = {}
# Reading enteties
data["entities"] = self._writeEntitiesLevel(conf_data[0].split("\n"), {})[1]
# Reading relations
entitiesKeys = _getDictionnaryKeys(data["entities"])
data["relations"] = self._readRelations(conf_data[1], entitiesKeys)
return(data)
else:
self.write_conf()
self.read_conf()
def write_conf(self, entities = {}, relations = {}, events = {}, attributes = {}):
"""
Write or overwrite configuration file.
It actually doesn't suppport events and attributes configuration data.
inputs :
entities, dict : dict containing the entities. If an entities do have children, his value is an other dict, otherwise, it is set as True.
relations, dict : dict containing the relations between entities, each key is a relation name, the value is a dict with a "args" key containing the list of related entities.
"""
# TODO : Add events and attributes support.
conf_str = ''
# Entities
conf_str += '\n\n[entities]'
conf_str += self._generateEntitiesStr(entities)
# relations
conf_str += '\n\n[relations]'
entitiesKeys = _getDictionnaryKeys(entities)
conf_str += self._writeRelations(relations, entitiesKeys)
# attributes
conf_str += '\n\n[attributes]'
# events
conf_str += '\n\n[events]'
# Write conf file
file = open(self.folder+self.conf_file,'w')
file.write(conf_str)
file.close()
def _getFileList(self):
# Listing files
filesDF = pd.DataFrame({'filename':pd.Series(os.listdir(self.folder))})
filesDFSplitted = filesDF["filename"].str.split(".", expand = True)
filesDF["id"] = filesDFSplitted[0]
filesDF["filetype"] = filesDFSplitted[1]
filesDF = filesDF[filesDF["filetype"].isin(["txt","ann"])]
return(filesDF)
def _parseData(self):
# Listing files
filesDF = self._getFileList()
# Getting data from txt and ann
filesDF_txt = filesDF.rename(columns = {"filename":"text_data"}).loc[filesDF["filetype"] == "txt", ["id","text_data"]]
filesDF_ann = filesDF.rename(columns = {"filename":"annotation"}).loc[filesDF["filetype"] == "ann", ["id","annotation"]]
dataDF = filesDF_txt.join(filesDF_ann.set_index("id"), on = "id")
dataDF["text_data"] = dataDF["text_data"].apply(lambda x: open(self.folder+x).read())
dataDF["annotation"] = dataDF["annotation"].apply(lambda x: open(self.folder+x).read())
return(dataDF)
def read_text(self):
"""
read_text
Get a pandas DataFrame containing the brat documents.
Input : None
Output : Pandas dataframe
"""
dataDF = self._parseData()
return(dataDF[["id","text_data"]])
def read_annotation(self, ids = []):
"""
read_annotation
Get annotations from the brat folder.
You can get specific annotation by filtering by id.
input :
ids, list (optionnal) : list of id for which you want the annotation data, if empty all annotations are returned.
output :
dict containing an annotations and relations data.
"""
data = {}
data["annotations"] = pd.DataFrame(columns=self.emptyDFCols["annotations"])
data["relations"] = pd.DataFrame(columns=self.emptyDFCols["relations"])
dataDF = self._parseData()[["id","annotation"]]
dataDF = dataDF[(dataDF["annotation"].isna() == False) & (dataDF["annotation"] != '')] # Removing empty annotation
# Filtering by ids
if (len(ids) > 0):
dataDF = dataDF[dataDF["id"].isin(pd.Series(ids).astype(str))]
if (dataDF.shape[0] > 0):
# Ann data to pandas
dataDF = dataDF.join(dataDF["annotation"].str.split("\n").apply(pd.Series).stack().reset_index(level = 0).set_index("level_0")).reset_index(drop = True).drop("annotation", axis = 1).rename(columns = {0: "annotation"})
dataDF = dataDF[dataDF["annotation"].str.len() > 0].reset_index(drop = True)
dataDF = dataDF.join(dataDF["annotation"].str.split("\t", expand = True).rename(columns = {0: 'type_id', 1: 'data', 2: 'word'})).drop("annotation", axis = 1)
dataDF["type"] = dataDF["type_id"].str.slice(0,1)
## Annotations
data["annotations"] = dataDF[dataDF["type"] == 'T']
if (data["annotations"].shape[0] > 0):
data["annotations"] = data["annotations"].join(data["annotations"]["data"].str.split(" ", expand = True).rename(columns = {0: "label", 1: "start", 2: "end"})).drop(columns = ["data","type"])
## Relations
data["relations"] = dataDF[dataDF["type"] == 'R']
if (data["relations"].shape[0] > 0):
tmp_splitted = data["relations"]["data"].str.split(" ", expand = True).rename(columns = {0: "relation"})
### Col names
rename_dict = dict(zip(list(tmp_splitted.columns.values[1:]), list("Arg"+tmp_splitted.columns.values[1:].astype(str).astype(object))))
tmp_splitted = tmp_splitted.rename(columns = rename_dict)
### Merging data
tmp_splitted = tmp_splitted[["relation"]].join(tmp_splitted.loc[:,tmp_splitted.columns[tmp_splitted.columns != 'relation']].applymap(lambda x: x.split(":")[1]))
data["relations"] = data["relations"].join(tmp_splitted).drop(columns = ["data","type","word"])
return(data)
def _write_function(self, x, filetype = "txt", overwrite = False):
filenames = []
if (filetype == 'txt' or filetype == 'both'):
filenames.append(self.folder+str(x["filename"])+'.txt')
if (filetype == 'ann' or filetype == 'both'):
filenames.append(self.folder+str(x["filename"])+'.ann')
for filename in filenames:
try:
open(str(filename), "r")
is_file = True
except FileNotFoundError:
is_file = False
if ((is_file == False) or (overwrite == True)):
file = open(str(filename), "w")
file.write(x["content"])
file.close()
def write_text(self, text_id, text, empty = False, overWriteAnnotations = False):
"""
write_text
Send text data from the brat folder.
input :
text_id, pd.Series : pandas series containing documents ids
text, pd.Series : pandas series containing documents text in the same order as text_id
empty, boolean : if True the brat folder is emptyied of all but configuration data (text and ann files) before writting
overwriteAnnotations, boolean : if True, the current annotation files are replaced by blank one
"""
if overWriteAnnotations == True: # On controle la façon dont la variable est écrite
overwriteAnn = True
else:
overwriteAnn = False
if (type(text) == type(pd.Series()) and type(text_id) == type(pd.Series()) and text.shape[0] == text_id.shape[0]):
# ID check : check should be smaller than text : check if not inverted
if (text_id.astype(str).str.len().max() < text.astype(str).str.len().max()):
# empty : option to erase existing data
if (empty):
self._emptyData()
# Writting data
print("Writting data")
df_text = pd.DataFrame({"filename":text_id, "content":text})
df_ann = pd.DataFrame({"filename":text_id, "content":""})
df_text.apply(lambda x: self._write_function(x, filetype = "txt", overwrite = True), axis = 1)
df_ann.apply(lambda x: self._write_function(x, filetype = "ann", overwrite = overwriteAnn), axis = 1)
print("data written.")
else:
raise ValueError('ID is larger than text, maybe you inverted them.')
else:
raise ValueError('Incorrect variable type, expected two Pandas Series of same shape.')
def write_annotations(self, df, text_id, word, label, start, end, overwrite = False):
"""
write_annotations
Send annotation data from the brat folder. Useful to pre-anotate some data.
input :
df, pd.Dataframe : dataframe containing annotations data, should contains the text id, the annotated word, the annotated label, the start and end offset.
text_id, str : name of the column in df which contains the document id
word, str : name of the column in df which contains the annotated word
label, str : name of the column in df which contains the label of the annotated word
start, str : name of the column in df which contains the start offset
end, str : name of the column in df which contains the end offset
overwrite, boolean : if True, the current annotation files are replaced by new data, otherwise, the new annotations are merged with existing one
"""
# Checking data types
if (type(df) == type(pd.DataFrame())):
# Loading df
df = df.rename(columns = {text_id:"id",word:"word",label:"label",start:"start",end:"end"})
df["type_id"] = df.groupby("id").cumcount()+1
# List of ids
ids = df["id"].unique()
# Loading current data
current_annotation = self.read_annotation(ids)
current_annotations = current_annotation["annotations"]
tmaxDFAnnotations = current_annotations.set_index(["id"])["type_id"].str.slice(1,).astype(int).reset_index().groupby("id").max().rename(columns = {"type_id":"Tmax"})
if (overwrite == True):
df["type_id"] = "T"+df["type_id"].astype(str)
new_annotations = df
else:
df = df.join(tmaxDFAnnotations, on = "id").fillna(0)
df["type_id"] = "T"+(df["type_id"]+df["Tmax"]).astype(int).astype(str)
df = df.drop(columns = ["Tmax"])
new_annotations = pd.concat((current_annotations, df[self.emptyDFCols["annotations"]])).reset_index(drop = True)
new_annotations.drop_duplicates() ## Removing duplicates
# Injecting new annotations
current_annotation["annotations"] = new_annotations
# Calling write function
self._write_annotation(current_annotation["annotations"], current_annotation["relations"])
else:
raise ValueError('Incorrect variable type, expected a Pandas DF.')
def write_relations(self, df, text_id, relation, overwrite = False):
"""
write_relations
Send relations data from the brat folder. Useful to pre-anotate some data.
input :
df, pd.Dataframe : dataframe containing relations data, should contains the text id, the relation name, the if of the linked annotations.
text_id, str : name of the column in df which contains the document id
relation, str : name of the column in df which contains the relation name
overwrite, boolean : if True, the current annotation files are replaced by new data, otherwise, the new annotations are merged with existing one
The other columns should contains the type_id of related entities, as outputed by the read_annotation method.
"""
# Checking data types
if (type(df) == type(pd.DataFrame())):
# Loading df
df = df.rename(columns = {text_id:"id",relation:"relation"})
df["type_id"] = df.groupby("id").cumcount()+1 # type_id
# Columns names
old_columns = df.columns[np.isin(df.columns, ["id", "relation","type_id"]) == False]
new_columns = "Arg"+np.array(list(range(1,len(old_columns)+1))).astype(str).astype(object)
df = df.rename(columns = dict(zip(old_columns, new_columns)))
# List of ids
ids = df["id"].unique()
# Loading current data
current_annotation = self.read_annotation(ids)
current_relations = current_annotation["relations"]
rmaxDFrelations = current_relations.set_index(["id"])["type_id"].str.slice(1,).astype(int).reset_index().groupby("id").max().rename(columns = {"type_id":"Rmax"})
if (overwrite == True):
df["type_id"] = "R"+df["type_id"].astype(str)
new_relations = df
else:
df = df.join(rmaxDFrelations, on = "id").fillna(0)
df["type_id"] = "R"+(df["type_id"]+df["Rmax"]).astype(int).astype(str)
df = df.drop(columns = ["Rmax"])
# Adding missing columns
if (len(df.columns) > len(current_relations.columns)):
for column in df.columns[np.isin(df.columns, current_relations.columns) == False]:
current_relations[column] = np.nan
else:
for column in current_relations.columns[np.isin(current_relations.columns, df.columns) == False]:
df[column] = np.nan
new_relations = pd.concat((current_relations, df[current_relations.columns])).reset_index(drop = True)
new_relations.drop_duplicates() ## Removing duplicates
# Injecting new annotations
current_annotation["relations"] = new_relations
# Calling write function
self._write_annotation(current_annotation["annotations"], current_annotation["relations"])
else:
raise ValueError('Incorrect variable type, expected a Pandas DF.')
def _generate_annotations_str (self, annotations):
annotations["label_span"] = annotations.apply(lambda x: " ".join(x[["label","start","end"]].astype(str).values), axis = 1)
annotations["annotation_str"] = annotations.apply(lambda x: '\t'.join(x[["type_id","label_span","word"]].astype(str).values), axis = 1)
annotations_str = annotations.groupby("id").agg(lambda x: "\n".join(x))["annotation_str"]
return(annotations_str)
def _generate_relations_str (self, relations):
relations = relations.fillna('').applymap(lambda x: '' if x == 'nan' else x) #cleaning data
columns = relations.columns[np.isin(relations.columns, ["id","type_id","relation"]) == False].values.tolist()
boolmap = relations[columns].transpose().applymap(lambda x: int(x != ''))
rct = relations[columns].transpose()
temp_relations = (boolmap*(np.array(np.repeat(rct.index,rct.shape[1])).reshape(rct.shape)+':')+rct.astype(str)).transpose()
relations_str = '\n'.join(relations[["type_id","relation"]].join(temp_relations[columns]).apply(lambda x: '\t'.join(x.values), axis = 1).values)
return(relations_str)
def _write_file(self, data):
file = open(self.folder+str(data["id"])+".ann", "w")
file.write(data["str_to_write"])
file.close()
def _write_annotation(self,annotations,relations):
# Checking data types
if (type(annotations) == type(pd.DataFrame()) and type(relations) == type(pd.DataFrame())):
# Gerenating str
data_annotations = self._generate_annotations_str(annotations)
data_relations = relations.groupby("id").agg(lambda x: self._generate_relations_str(x)).iloc[:,0]
# Merging data
data = pd.DataFrame({"annotations":data_annotations, "relations":data_relations}).fillna('')
data["str_to_write"] = data.apply(lambda x : '\n'.join(x.values), axis = 1)
data = data.reset_index().rename(columns = {"index":"id"})
# Writting files
data.apply(self._write_file, axis = 1)
return(data)
else:
raise ValueError('Incorrect variable type, expected a Pandas DF.')
def _export_conll_2003 (self, data):
'''
Internal function for export in conll format.
'''
# Creating i-label
data["i-label"] = (data["label"] != "O").astype(int)*(data["i-type"]+'-')+data["label"]
# Creating string
data["str"] = data[["token","pos","chunks","i-label"]].apply(lambda x: ' '.join(x), axis = 1)
connll_str = "-DOCSTART- -X- -X- O"+"\n\n"+"\n\n".join(
data.groupby("id").agg(lambda x: "\n".join(x))["str"].values.tolist()
)
return(connll_str)
def _get_tokenized_data(self, text_data, annotations_data, tokenizer = _default_tokenizer, keep_empty = False):
'''
Internal function that process text and annotation data to calculate token, pos and chunks.
Input :
text_data : text data exported from current class
annotations_data : annotations data exported from current class
tokenizer : tokenizer function from extract_tools
keep_empty : default False, parameter boolean, if True empty token are not removed, otherwise they are removed
Output :
Aggreged data in Pandas DataFrame.
'''
# Applying tokenizer to text
text_data["tokens"] = text_data["text_data"].apply(tokenizer)
# Exploding dataframe by tokens and rename column
exploded_text_data = text_data[["id", "tokens"]].explode("tokens").reset_index(drop = True)
exploded_text_data = exploded_text_data.join(
exploded_text_data["tokens"] \
.apply(pd.Series) \
.rename(columns = {
0:'token',1:'start_offset',2:'end_offset', 3:'pos'
})
) \
.drop(columns = ["tokens"])
# Getting entities from annotations
## We merge by offset
### Creating a word id and annotation id
exploded_text_data = exploded_text_data \
.reset_index(drop = True) \
.reset_index() \
.rename(columns = {"index":"word_id"})
annotations_data = annotations_data \
.reset_index() \
.rename(columns = {"index":"ann_id"})
### Offset of string
text_offsets = pd.DataFrame(exploded_text_data[["id","word_id","start_offset","end_offset"]])
text_offsets["start_offset"] = text_offsets["start_offset"].astype(int)
text_offsets["end_offset"] = text_offsets["end_offset"].astype(int)
text_offsets["offsets"] = text_offsets \
.apply(
lambda x: list(range(x["start_offset"], x["end_offset"]+1)), axis = 1
)
text_offsets = text_offsets[["id","word_id", "offsets"]] \
.explode("offsets")
### Offset of annotations
ann_offsets = pd.DataFrame(
annotations_data[["id", "ann_id", "start", "end"]]
)
ann_offsets["start"] = ann_offsets["start"].astype(int)
ann_offsets["end"] = ann_offsets["end"].astype(int)
if (ann_offsets.shape[0] > 0):
ann_offsets["offsets"] = ann_offsets \
.apply(
lambda x: list(range(x["start"], x["end"]+1)), axis = 1
)
else:
ann_offsets["offsets"] = ''
ann_offsets = ann_offsets[["id","ann_id", "offsets"]] \
.explode("offsets")
# Merging by term
text_offsets["uid"] = text_offsets["id"].astype(str) \
+ text_offsets["offsets"].astype(str)
ann_offsets["uid"] = ann_offsets["id"].astype(str) \
+ ann_offsets["offsets"].astype(str)
merged_id = text_offsets \
.join(
ann_offsets[["ann_id","uid"]].set_index("uid"),
on = "uid"
) \
.dropna()
merged_id["ann_id"] = merged_id["ann_id"].astype(int)
merged_id = merged_id[["word_id", "ann_id"]] \
.set_index("ann_id") \
.drop_duplicates() \
.join(annotations_data, on = "ann_id")
# Keeping last when duplicate word_id
merged_id = merged_id \
.drop_duplicates("word_id", keep = "last")
# Joining annotation with word id
output_df = exploded_text_data \
.join(merged_id[["label","word_id"]] \
.set_index("word_id"),
on = "word_id",
how = "left") \
.fillna("O")[["id", "token","label", "pos"]]
# Creation of i-type : if O : i-type is B
output_df["i-type"] = output_df \
.groupby("id").agg(lambda x: ["B"]+["I"]*(len(x)-1))["label"].explode().reset_index()["label"]
output_df.loc[output_df["label"] == "O", "i-type"] = 'B'
# Empty chunks
output_df["chunks"] = 'O'
# Post - processing
if (keep_empty == False):
output_df = output_df[output_df["token"] != ''].reset_index(drop = True)
return(output_df)
def export(self, export_format = "conll-2003", tokenizer = _default_tokenizer, keep_empty = False, entities = None):
'''
Function that generate an export file.
Supported export format are :
- conll-2003
input :
export_format : name of the export format
tokenizer : tokenizer function from extract_tools
keep_empty : default False, parameter boolean, if True empty token are not removed, otherwise they are removed
entities : if None, all entities are send to the export file, if there is the conflict the most recent is used, otherwise the entities are selected before
Output :
str : output string in selected export format
'''
supported_export_format = {
"conll-2003":self._export_conll_2003
}
# Check the export format
if (export_format not in supported_export_format.keys()):
raise Exception(str(export_format)+" format not supported. Export format should be one of these : {}".format(
", ".join(supported_export_format.keys())
))
# Create dataframe of tokenized word associated with annotations
## Getting data from brat
text_data = self.read_text()
annotations_data = self.read_annotation()["annotations"]
## Filtering entities
if entities is not None:
if type(entities) != type(list()):
raise Exception("entities should be of type list")
annotations_data = annotations_data[annotations_data["label"].isin(entities)] \
.reset_index(drop = True)
## Parsing data
data = self._get_tokenized_data(tokenizer=tokenizer,
text_data = text_data,
annotations_data = annotations_data,
keep_empty = keep_empty)
# Execute the export format associated function
data_str = supported_export_format[export_format](data = data)
return(data_str)
| [
"pandas.Series",
"os.listdir",
"numpy.repeat",
"re.compile",
"numpy.isin",
"os.path.isfile",
"os.path.isdir",
"os.mkdir",
"pandas.DataFrame",
"pandas.concat",
"os.remove"
] | [((5008, 5052), 'os.path.isfile', 'os.path.isfile', (['(self.folder + self.conf_file)'], {}), '(self.folder + self.conf_file)\n', (5022, 5052), False, 'import os\n'), ((9026, 9079), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "self.emptyDFCols['annotations']"}), "(columns=self.emptyDFCols['annotations'])\n", (9038, 9079), True, 'import pandas as pd\n'), ((9108, 9159), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "self.emptyDFCols['relations']"}), "(columns=self.emptyDFCols['relations'])\n", (9120, 9159), True, 'import pandas as pd\n'), ((25042, 25127), 'pandas.DataFrame', 'pd.DataFrame', (["exploded_text_data[['id', 'word_id', 'start_offset', 'end_offset']]"], {}), "(exploded_text_data[['id', 'word_id', 'start_offset',\n 'end_offset']])\n", (25054, 25127), True, 'import pandas as pd\n'), ((25632, 25696), 'pandas.DataFrame', 'pd.DataFrame', (["annotations_data[['id', 'ann_id', 'start', 'end']]"], {}), "(annotations_data[['id', 'ann_id', 'start', 'end']])\n", (25644, 25696), True, 'import pandas as pd\n'), ((1398, 1424), 'os.path.isdir', 'os.path.isdir', (['self.folder'], {}), '(self.folder)\n', (1411, 1424), False, 'import os\n'), ((1448, 1469), 'os.mkdir', 'os.mkdir', (['self.folder'], {}), '(self.folder)\n', (1456, 1469), False, 'import os\n'), ((13510, 13562), 'pandas.DataFrame', 'pd.DataFrame', (["{'filename': text_id, 'content': text}"], {}), "({'filename': text_id, 'content': text})\n", (13522, 13562), True, 'import pandas as pd\n'), ((13586, 13636), 'pandas.DataFrame', 'pd.DataFrame', (["{'filename': text_id, 'content': ''}"], {}), "({'filename': text_id, 'content': ''})\n", (13598, 13636), True, 'import pandas as pd\n'), ((15283, 15297), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15295, 15297), True, 'import pandas as pd\n'), ((17873, 17887), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (17885, 17887), True, 'import pandas as pd\n'), ((1885, 1911), 'os.remove', 'os.remove', (['(self.folder + x)'], {}), '(self.folder + x)\n', (1894, 1911), False, 'import os\n'), ((5284, 5324), 're.compile', 're.compile', (['"""\\\\[[a-zA-Z]+\\\\]"""', 're.DOTALL'], {}), "('\\\\[[a-zA-Z]+\\\\]', re.DOTALL)\n", (5294, 5324), False, 'import re\n'), ((7201, 7224), 'os.listdir', 'os.listdir', (['self.folder'], {}), '(self.folder)\n', (7211, 7224), False, 'import os\n'), ((12996, 13007), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (13005, 13007), True, 'import pandas as pd\n'), ((13035, 13046), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (13044, 13046), True, 'import pandas as pd\n'), ((18148, 18198), 'numpy.isin', 'np.isin', (['df.columns', "['id', 'relation', 'type_id']"], {}), "(df.columns, ['id', 'relation', 'type_id'])\n", (18155, 18198), True, 'import numpy as np\n'), ((21762, 21776), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21774, 21776), True, 'import pandas as pd\n'), ((21806, 21820), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21818, 21820), True, 'import pandas as pd\n'), ((22098, 22174), 'pandas.DataFrame', 'pd.DataFrame', (["{'annotations': data_annotations, 'relations': data_relations}"], {}), "({'annotations': data_annotations, 'relations': data_relations})\n", (22110, 22174), True, 'import pandas as pd\n'), ((16350, 16419), 'pandas.concat', 'pd.concat', (["(current_annotations, df[self.emptyDFCols['annotations']])"], {}), "((current_annotations, df[self.emptyDFCols['annotations']]))\n", (16359, 16419), True, 'import pandas as pd\n'), ((19684, 19745), 'pandas.concat', 'pd.concat', (['(current_relations, df[current_relations.columns])'], {}), '((current_relations, df[current_relations.columns]))\n', (19693, 19745), True, 'import pandas as pd\n'), ((9457, 9471), 'pandas.Series', 'pd.Series', (['ids'], {}), '(ids)\n', (9466, 9471), True, 'import pandas as pd\n'), ((19334, 19380), 'numpy.isin', 'np.isin', (['df.columns', 'current_relations.columns'], {}), '(df.columns, current_relations.columns)\n', (19341, 19380), True, 'import numpy as np\n'), ((19533, 19579), 'numpy.isin', 'np.isin', (['current_relations.columns', 'df.columns'], {}), '(current_relations.columns, df.columns)\n', (19540, 19579), True, 'import numpy as np\n'), ((20937, 20994), 'numpy.isin', 'np.isin', (['relations.columns', "['id', 'type_id', 'relation']"], {}), "(relations.columns, ['id', 'type_id', 'relation'])\n", (20944, 20994), True, 'import numpy as np\n'), ((21191, 21225), 'numpy.repeat', 'np.repeat', (['rct.index', 'rct.shape[1]'], {}), '(rct.index, rct.shape[1])\n', (21200, 21225), True, 'import numpy as np\n')] |
# Copyright 2019 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###################### LIBRARIES #################################################
import warnings
warnings.filterwarnings("ignore")
import torch, random, itertools as it, numpy as np, faiss, random
from tqdm import tqdm
from scipy.spatial.distance import cdist
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from PIL import Image
"""================================================================================================="""
############ LOSS SELECTION FUNCTION #####################
def loss_select(loss, opt, to_optim):
"""
Selection function which returns the respective criterion while appending to list of trainable parameters if required.
Args:
loss: str, name of loss function to return.
opt: argparse.Namespace, contains all training-specific parameters.
to_optim: list of trainable parameters. Is extend if loss function contains those as well.
Returns:
criterion (torch.nn.Module inherited), to_optim (optionally appended)
"""
if loss=='triplet':
loss_params = {'margin':opt.margin, 'sampling_method':opt.sampling}
criterion = TripletLoss(**loss_params)
elif loss=='npair':
loss_params = {'l2':opt.l2npair}
criterion = NPairLoss(**loss_params)
elif loss=='marginloss':
loss_params = {'margin':opt.margin, 'nu': opt.nu, 'beta':opt.beta, 'n_classes':opt.num_classes, 'sampling_method':opt.sampling}
criterion = MarginLoss(**loss_params)
to_optim += [{'params':criterion.parameters(), 'lr':opt.beta_lr, 'weight_decay':0}]
elif loss=='proxynca':
loss_params = {'num_proxies':opt.num_classes, 'embedding_dim':opt.classembed if 'num_cluster' in vars(opt).keys() else opt.embed_dim}
criterion = ProxyNCALoss(**loss_params)
to_optim += [{'params':criterion.parameters(), 'lr':opt.proxy_lr}]
elif loss=='crossentropy':
loss_params = {'n_classes':opt.num_classes, 'inp_dim':opt.embed_dim}
criterion = CEClassLoss(**loss_params)
to_optim += [{'params':criterion.parameters(), 'lr':opt.lr, 'weight_decay':0}]
else:
raise Exception('Loss {} not available!'.format(loss))
return criterion, to_optim
"""================================================================================================="""
######### MAIN SAMPLER CLASS #################################
class TupleSampler():
"""
Container for all sampling methods that can be used in conjunction with the respective loss functions.
Based on batch-wise sampling, i.e. given a batch of training data, sample useful data tuples that are
used to train the network more efficiently.
"""
def __init__(self, method='random'):
"""
Args:
method: str, name of sampling method to use.
Returns:
Nothing!
"""
self.method = method
if method=='semihard':
self.give = self.semihardsampling
if method=='softhard':
self.give = self.softhardsampling
elif method=='distance':
self.give = self.distanceweightedsampling
elif method=='npair':
self.give = self.npairsampling
elif method=='random':
self.give = self.randomsampling
def randomsampling(self, batch, labels):
"""
This methods finds all available triplets in a batch given by the classes provided in labels, and randomly
selects <len(batch)> triplets.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().numpy()
unique_classes = np.unique(labels)
indices = np.arange(len(batch))
class_dict = {i:indices[labels==i] for i in unique_classes}
sampled_triplets = [list(it.product([x],[x],[y for y in unique_classes if x!=y])) for x in unique_classes]
sampled_triplets = [x for y in sampled_triplets for x in y]
sampled_triplets = [[x for x in list(it.product(*[class_dict[j] for j in i])) if x[0]!=x[1]] for i in sampled_triplets]
sampled_triplets = [x for y in sampled_triplets for x in y]
#NOTE: The number of possible triplets is given by #unique_classes*(2*(samples_per_class-1)!)*(#unique_classes-1)*samples_per_class
sampled_triplets = random.sample(sampled_triplets, batch.shape[0])
return sampled_triplets
def semihardsampling(self, batch, labels, margin=0.2):
if isinstance(labels, torch.Tensor):
labels = labels.detach().numpy()
bs = batch.size(0)
#Return distance matrix for all elements in batch (BSxBS)
distances = self.pdist(batch.detach()).detach().cpu().numpy()
positives, negatives = [], []
anchors = []
for i in range(bs):
l, d = labels[i], distances[i]
neg = labels!=l; pos = labels==l
anchors.append(i)
pos[i] = False
p = np.random.choice(np.where(pos)[0])
positives.append(p)
#Find negatives that violate tripet constraint semi-negatives
neg_mask = np.logical_and(neg,d>d[p])
neg_mask = np.logical_and(neg_mask,d<margin+d[p])
if neg_mask.sum()>0:
negatives.append(np.random.choice(np.where(neg_mask)[0]))
else:
negatives.append(np.random.choice(np.where(neg)[0]))
sampled_triplets = [[a, p, n] for a, p, n in zip(anchors, positives, negatives)]
return sampled_triplets
def softhardsampling(self, batch, labels):
"""
This methods finds all available triplets in a batch given by the classes provided in labels, and select
triplets based on semihard sampling introduced in 'https://arxiv.org/pdf/1503.03832.pdf'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().numpy()
bs = batch.size(0)
#Return distance matrix for all elements in batch (BSxBS)
distances = self.pdist(batch.detach()).detach().cpu().numpy()
positives, negatives = [], []
anchors = []
for i in range(bs):
l, d = labels[i], distances[i]
anchors.append(i)
#1 for batchelements with label l
neg = labels!=l; pos = labels==l
#0 for current anchor
pos[i] = False
#Find negatives that violate triplet constraint semi-negatives
neg_mask = np.logical_and(neg,d<d[np.where(pos)[0]].max())
#Find positives that violate triplet constraint semi-hardly
pos_mask = np.logical_and(pos,d>d[np.where(neg)[0]].min())
if pos_mask.sum()>0:
positives.append(np.random.choice(np.where(pos_mask)[0]))
else:
positives.append(np.random.choice(np.where(pos)[0]))
if neg_mask.sum()>0:
negatives.append(np.random.choice(np.where(neg_mask)[0]))
else:
negatives.append(np.random.choice(np.where(neg)[0]))
sampled_triplets = [[a, p, n] for a, p, n in zip(anchors, positives, negatives)]
return sampled_triplets
def distanceweightedsampling(self, batch, labels, lower_cutoff=0.5, upper_cutoff=1.4):
"""
This methods finds all available triplets in a batch given by the classes provided in labels, and select
triplets based on distance sampling introduced in 'Sampling Matters in Deep Embedding Learning'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
lower_cutoff: float, lower cutoff value for negatives that are too close to anchor embeddings. Set to literature value. They will be assigned a zero-sample probability.
upper_cutoff: float, upper cutoff value for positives that are too far away from the anchor embeddings. Set to literature value. They will be assigned a zero-sample probability.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().cpu().numpy()
bs = batch.shape[0]
distances = self.pdist(batch.detach()).clamp(min=lower_cutoff)
positives, negatives = [],[]
labels_visited = []
anchors = []
for i in range(bs):
neg = labels!=labels[i]; pos = labels==labels[i]
q_d_inv = self.inverse_sphere_distances(batch, distances[i], labels, labels[i])
#Sample positives randomly
pos[i] = 0
positives.append(np.random.choice(np.where(pos)[0]))
#Sample negatives by distance
negatives.append(np.random.choice(bs,p=q_d_inv))
sampled_triplets = [[a,p,n] for a,p,n in zip(list(range(bs)), positives, negatives)]
return sampled_triplets
def npairsampling(self, batch, labels):
"""
This methods finds N-Pairs in a batch given by the classes provided in labels in the
creation fashion proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().cpu().numpy()
label_set, count = np.unique(labels, return_counts=True)
label_set = label_set[count>=2]
pos_pairs = np.array([np.random.choice(np.where(labels==x)[0], 2, replace=False) for x in label_set])
neg_tuples = []
for idx in range(len(pos_pairs)):
neg_tuples.append(pos_pairs[np.delete(np.arange(len(pos_pairs)),idx),1])
neg_tuples = np.array(neg_tuples)
sampled_npairs = [[a,p,*list(neg)] for (a,p),neg in zip(pos_pairs, neg_tuples)]
return sampled_npairs
def pdist(self, A):
"""
Efficient function to compute the distance matrix for a matrix A.
Args:
A: Matrix/Tensor for which the distance matrix is to be computed.
eps: float, minimal distance/clampling value to ensure no zero values.
Returns:
distance_matrix, clamped to ensure no zero values are passed.
"""
prod = torch.mm(A, A.t())
norm = prod.diag().unsqueeze(1).expand_as(prod)
res = (norm + norm.t() - 2 * prod).clamp(min = 0)
return res.clamp(min = 0).sqrt()
def inverse_sphere_distances(self, batch, dist, labels, anchor_label):
"""
Function to utilise the distances of batch samples to compute their
probability of occurence, and using the inverse to sample actual negatives to the resp. anchor.
Args:
batch: torch.Tensor(), batch for which the sampling probabilities w.r.t to the anchor are computed. Used only to extract the shape.
dist: torch.Tensor(), computed distances between anchor to all batch samples.
labels: np.ndarray, labels for each sample for which distances were computed in dist.
anchor_label: float, anchor label
Returns:
distance_matrix, clamped to ensure no zero values are passed.
"""
bs,dim = len(dist),batch.shape[-1]
#negated log-distribution of distances of unit sphere in dimension <dim>
log_q_d_inv = ((2.0 - float(dim)) * torch.log(dist) - (float(dim-3) / 2) * torch.log(1.0 - 0.25 * (dist.pow(2))))
#Set sampling probabilities of positives to zero
log_q_d_inv[np.where(labels==anchor_label)[0]] = 0
q_d_inv = torch.exp(log_q_d_inv - torch.max(log_q_d_inv)) # - max(log) for stability
#Set sampling probabilities of positives to zero
q_d_inv[np.where(labels==anchor_label)[0]] = 0
### NOTE: Cutting of values with high distances made the results slightly worse.
# q_d_inv[np.where(dist>upper_cutoff)[0]] = 0
#Normalize inverted distance for probability distr.
q_d_inv = q_d_inv/q_d_inv.sum()
return q_d_inv.detach().cpu().numpy()
"""================================================================================================="""
### Standard Triplet Loss, finds triplets in Mini-batches.
class TripletLoss(torch.nn.Module):
def __init__(self, margin=1, sampling_method='random'):
"""
Basic Triplet Loss as proposed in 'FaceNet: A Unified Embedding for Face Recognition and Clustering'
Args:
margin: float, Triplet Margin - Ensures that positives aren't placed arbitrarily close to the anchor.
Similarl, negatives should not be placed arbitrarily far away.
sampling_method: Method to use for sampling training triplets. Used for the TupleSampler-class.
"""
super(TripletLoss, self).__init__()
self.margin = margin
self.sampler = TupleSampler(method=sampling_method)
def triplet_distance(self, anchor, positive, negative):
"""
Compute triplet loss.
Args:
anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
Returns:
triplet loss (torch.Tensor())
"""
return torch.nn.functional.relu((anchor-positive).pow(2).sum()-(anchor-negative).pow(2).sum()+self.margin)
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
triplet loss (torch.Tensor(), batch-averaged)
"""
#Sample triplets to use for training.
sampled_triplets = self.sampler.give(batch, labels)
#Compute triplet loss
loss = torch.stack([self.triplet_distance(batch[triplet[0],:],batch[triplet[1],:],batch[triplet[2],:]) for triplet in sampled_triplets])
return torch.mean(loss)
"""================================================================================================="""
### Standard N-Pair Loss.
class NPairLoss(torch.nn.Module):
def __init__(self, l2=0.02):
"""
Basic N-Pair Loss as proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'
Args:
l2: float, weighting parameter for weight penality due to embeddings not being normalized.
Returns:
Nothing!
"""
super(NPairLoss, self).__init__()
self.sampler = TupleSampler(method='npair')
self.l2 = l2
def npair_distance(self, anchor, positive, negatives):
"""
Compute basic N-Pair loss.
Args:
anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
Returns:
n-pair loss (torch.Tensor())
"""
return torch.log(1+torch.sum(torch.exp(anchor.mm((negatives-positive).transpose(0,1)))))
def weightsum(self, anchor, positive):
"""
Compute weight penalty.
NOTE: Only need to penalize anchor and positive since the negatives are created based on these.
Args:
anchor, positive: torch.Tensor(), resp. embeddings for anchor and positive samples.
Returns:
torch.Tensor(), Weight penalty
"""
return torch.sum(anchor**2+positive**2)
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
n-pair loss (torch.Tensor(), batch-averaged)
"""
#Sample N-Pairs
sampled_npairs = self.sampler.give(batch, labels)
#Compute basic n=pair loss
loss = torch.stack([self.npair_distance(batch[npair[0]:npair[0]+1,:],batch[npair[1]:npair[1]+1,:],batch[npair[2:],:]) for npair in sampled_npairs])
#Include weight penalty
loss = loss + self.l2*torch.mean(torch.stack([self.weightsum(batch[npair[0],:], batch[npair[1],:]) for npair in sampled_npairs]))
return torch.mean(loss)
"""================================================================================================="""
### MarginLoss with trainable class separation margin beta. Runs on Mini-batches as well.
class MarginLoss(torch.nn.Module):
def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'):
"""
Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'.
Args:
margin: float, fixed triplet margin (see also TripletLoss).
nu: float, regularisation weight for beta. Zero by default (in literature as well).
beta: float, initial value for trainable class margins. Set to default literature value.
n_classes: int, number of target class. Required because it dictates the number of trainable class margins.
beta_constant: bool, set to True if betas should not be trained.
sampling_method: str, sampling method to use to generate training triplets.
Returns:
Nothing!
"""
super(MarginLoss, self).__init__()
self.margin = margin
self.n_classes = n_classes
self.beta_constant = beta_constant
self.beta_val = beta
self.beta = beta if beta_constant else torch.nn.Parameter(torch.ones(n_classes)*beta)
self.nu = nu
self.sampling_method = sampling_method
self.sampler = TupleSampler(method=sampling_method)
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
margin loss (torch.Tensor(), batch-averaged)
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().cpu().numpy()
sampled_triplets = self.sampler.give(batch, labels)
#Compute distances between anchor-positive and anchor-negative.
d_ap, d_an = [],[]
for triplet in sampled_triplets:
train_triplet = {'Anchor': batch[triplet[0],:], 'Positive':batch[triplet[1],:], 'Negative':batch[triplet[2]]}
pos_dist = ((train_triplet['Anchor']-train_triplet['Positive']).pow(2).sum()+1e-8).pow(1/2)
neg_dist = ((train_triplet['Anchor']-train_triplet['Negative']).pow(2).sum()+1e-8).pow(1/2)
d_ap.append(pos_dist)
d_an.append(neg_dist)
d_ap, d_an = torch.stack(d_ap), torch.stack(d_an)
#Group betas together by anchor class in sampled triplets (as each beta belongs to one class).
if self.beta_constant:
beta = self.beta
else:
beta = torch.stack([self.beta[labels[triplet[0]]] for triplet in sampled_triplets]).type(torch.cuda.FloatTensor)
#Compute actual margin postive and margin negative loss
pos_loss = torch.nn.functional.relu(d_ap-beta+self.margin)
neg_loss = torch.nn.functional.relu(beta-d_an+self.margin)
#Compute normalization constant
pair_count = torch.sum((pos_loss>0.)+(neg_loss>0.)).type(torch.cuda.FloatTensor)
#Actual Margin Loss
loss = torch.sum(pos_loss+neg_loss) if pair_count==0. else torch.sum(pos_loss+neg_loss)/pair_count
#(Optional) Add regularization penalty on betas.
if self.nu: loss = loss + beta_regularisation_loss.type(torch.cuda.FloatTensor)
return loss
"""================================================================================================="""
### ProxyNCALoss containing trainable class proxies. Works independent of batch size.
class ProxyNCALoss(torch.nn.Module):
def __init__(self, num_proxies, embedding_dim):
"""
Basic ProxyNCA Loss as proposed in 'No Fuss Distance Metric Learning using Proxies'.
Args:
num_proxies: int, number of proxies to use to estimate data groups. Usually set to number of classes.
embedding_dim: int, Required to generate initial proxies which are the same size as the actual data embeddings.
Returns:
Nothing!
"""
super(ProxyNCALoss, self).__init__()
self.num_proxies = num_proxies
self.embedding_dim = embedding_dim
self.PROXIES = torch.nn.Parameter(torch.randn(num_proxies, self.embedding_dim) / 8)
self.all_classes = torch.arange(num_proxies)
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
proxynca loss (torch.Tensor(), batch-averaged)
"""
#Normalize batch in case it is not normalized (which should never be the case for ProxyNCA, but still).
#Same for the PROXIES. Note that the multiplication by 3 seems arbitrary, but helps the actual training.
batch = 3*torch.nn.functional.normalize(batch, dim=1)
PROXIES = 3*torch.nn.functional.normalize(self.PROXIES, dim=1)
#Group required proxies
pos_proxies = torch.stack([PROXIES[pos_label:pos_label+1,:] for pos_label in labels])
neg_proxies = torch.stack([torch.cat([self.all_classes[:class_label],self.all_classes[class_label+1:]]) for class_label in labels])
neg_proxies = torch.stack([PROXIES[neg_labels,:] for neg_labels in neg_proxies])
#Compute Proxy-distances
dist_to_neg_proxies = torch.sum((batch[:,None,:]-neg_proxies).pow(2),dim=-1)
dist_to_pos_proxies = torch.sum((batch[:,None,:]-pos_proxies).pow(2),dim=-1)
#Compute final proxy-based NCA loss
negative_log_proxy_nca_loss = torch.mean(dist_to_pos_proxies[:,0] + torch.logsumexp(-dist_to_neg_proxies, dim=1))
return negative_log_proxy_nca_loss
"""================================================================================================="""
class CEClassLoss(torch.nn.Module):
def __init__(self, inp_dim, n_classes):
"""
Basic Cross Entropy Loss for reference. Can be useful.
Contains its own mapping network, so the actual network can remain untouched.
Args:
inp_dim: int, embedding dimension of network.
n_classes: int, number of target classes.
Returns:
Nothing!
"""
super(CEClassLoss, self).__init__()
self.mapper = torch.nn.Sequential(torch.nn.Linear(inp_dim, n_classes))
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
cross-entropy loss (torch.Tensor(), batch-averaged by default)
"""
return self.ce_loss(self.mapper(batch), labels.type(torch.cuda.LongTensor))
| [
"torch.nn.CrossEntropyLoss",
"torch.max",
"numpy.array",
"torch.sum",
"torch.arange",
"torch.mean",
"numpy.where",
"itertools.product",
"torch.randn",
"random.sample",
"numpy.random.choice",
"torch.nn.functional.normalize",
"torch.nn.functional.relu",
"warnings.filterwarnings",
"torch.ca... | [((762, 795), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (785, 795), False, 'import warnings\n'), ((4640, 4657), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (4649, 4657), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((5326, 5373), 'random.sample', 'random.sample', (['sampled_triplets', 'batch.shape[0]'], {}), '(sampled_triplets, batch.shape[0])\n', (5339, 5373), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((11018, 11055), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (11027, 11055), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((11382, 11402), 'numpy.array', 'np.array', (['neg_tuples'], {}), '(neg_tuples)\n', (11390, 11402), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((15720, 15736), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (15730, 15736), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((17144, 17182), 'torch.sum', 'torch.sum', (['(anchor ** 2 + positive ** 2)'], {}), '(anchor ** 2 + positive ** 2)\n', (17153, 17182), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((17985, 18001), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (17995, 18001), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((21035, 21086), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['(d_ap - beta + self.margin)'], {}), '(d_ap - beta + self.margin)\n', (21059, 21086), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((21102, 21153), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['(beta - d_an + self.margin)'], {}), '(beta - d_an + self.margin)\n', (21126, 21153), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((22529, 22554), 'torch.arange', 'torch.arange', (['num_proxies'], {}), '(num_proxies)\n', (22541, 22554), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((23309, 23383), 'torch.stack', 'torch.stack', (['[PROXIES[pos_label:pos_label + 1, :] for pos_label in labels]'], {}), '([PROXIES[pos_label:pos_label + 1, :] for pos_label in labels])\n', (23320, 23383), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((23543, 23610), 'torch.stack', 'torch.stack', (['[PROXIES[neg_labels, :] for neg_labels in neg_proxies]'], {}), '([PROXIES[neg_labels, :] for neg_labels in neg_proxies])\n', (23554, 23610), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((24698, 24725), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (24723, 24725), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((6140, 6169), 'numpy.logical_and', 'np.logical_and', (['neg', '(d > d[p])'], {}), '(neg, d > d[p])\n', (6154, 6169), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((6190, 6233), 'numpy.logical_and', 'np.logical_and', (['neg_mask', '(d < margin + d[p])'], {}), '(neg_mask, d < margin + d[p])\n', (6204, 6233), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((20611, 20628), 'torch.stack', 'torch.stack', (['d_ap'], {}), '(d_ap)\n', (20622, 20628), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((20630, 20647), 'torch.stack', 'torch.stack', (['d_an'], {}), '(d_an)\n', (20641, 20647), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((21324, 21354), 'torch.sum', 'torch.sum', (['(pos_loss + neg_loss)'], {}), '(pos_loss + neg_loss)\n', (21333, 21354), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((23136, 23179), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['batch'], {'dim': '(1)'}), '(batch, dim=1)\n', (23165, 23179), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((23204, 23254), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['self.PROXIES'], {'dim': '(1)'}), '(self.PROXIES, dim=1)\n', (23233, 23254), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((24638, 24673), 'torch.nn.Linear', 'torch.nn.Linear', (['inp_dim', 'n_classes'], {}), '(inp_dim, n_classes)\n', (24653, 24673), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((4811, 4870), 'itertools.product', 'it.product', (['[x]', '[x]', '[y for y in unique_classes if x != y]'], {}), '([x], [x], [y for y in unique_classes if x != y])\n', (4821, 4870), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((10164, 10195), 'numpy.random.choice', 'np.random.choice', (['bs'], {'p': 'q_d_inv'}), '(bs, p=q_d_inv)\n', (10180, 10195), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((13065, 13080), 'torch.log', 'torch.log', (['dist'], {}), '(dist)\n', (13074, 13080), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((13220, 13252), 'numpy.where', 'np.where', (['(labels == anchor_label)'], {}), '(labels == anchor_label)\n', (13228, 13252), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((13306, 13328), 'torch.max', 'torch.max', (['log_q_d_inv'], {}), '(log_q_d_inv)\n', (13315, 13328), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((13430, 13462), 'numpy.where', 'np.where', (['(labels == anchor_label)'], {}), '(labels == anchor_label)\n', (13438, 13462), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((21212, 21258), 'torch.sum', 'torch.sum', (['((pos_loss > 0.0) + (neg_loss > 0.0))'], {}), '((pos_loss > 0.0) + (neg_loss > 0.0))\n', (21221, 21258), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((21376, 21406), 'torch.sum', 'torch.sum', (['(pos_loss + neg_loss)'], {}), '(pos_loss + neg_loss)\n', (21385, 21406), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((22452, 22496), 'torch.randn', 'torch.randn', (['num_proxies', 'self.embedding_dim'], {}), '(num_proxies, self.embedding_dim)\n', (22463, 22496), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((23416, 23495), 'torch.cat', 'torch.cat', (['[self.all_classes[:class_label], self.all_classes[class_label + 1:]]'], {}), '([self.all_classes[:class_label], self.all_classes[class_label + 1:]])\n', (23425, 23495), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((23933, 23977), 'torch.logsumexp', 'torch.logsumexp', (['(-dist_to_neg_proxies)'], {'dim': '(1)'}), '(-dist_to_neg_proxies, dim=1)\n', (23948, 23977), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((5992, 6005), 'numpy.where', 'np.where', (['pos'], {}), '(pos)\n', (6000, 6005), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((19386, 19407), 'torch.ones', 'torch.ones', (['n_classes'], {}), '(n_classes)\n', (19396, 19407), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((20845, 20921), 'torch.stack', 'torch.stack', (['[self.beta[labels[triplet[0]]] for triplet in sampled_triplets]'], {}), '([self.beta[labels[triplet[0]]] for triplet in sampled_triplets])\n', (20856, 20921), False, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((5007, 5046), 'itertools.product', 'it.product', (['*[class_dict[j] for j in i]'], {}), '(*[class_dict[j] for j in i])\n', (5017, 5046), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((10074, 10087), 'numpy.where', 'np.where', (['pos'], {}), '(pos)\n', (10082, 10087), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((11145, 11166), 'numpy.where', 'np.where', (['(labels == x)'], {}), '(labels == x)\n', (11153, 11166), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((6312, 6330), 'numpy.where', 'np.where', (['neg_mask'], {}), '(neg_mask)\n', (6320, 6330), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((6404, 6417), 'numpy.where', 'np.where', (['neg'], {}), '(neg)\n', (6412, 6417), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((8065, 8083), 'numpy.where', 'np.where', (['pos_mask'], {}), '(pos_mask)\n', (8073, 8083), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((8157, 8170), 'numpy.where', 'np.where', (['pos'], {}), '(pos)\n', (8165, 8170), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((8260, 8278), 'numpy.where', 'np.where', (['neg_mask'], {}), '(neg_mask)\n', (8268, 8278), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((8352, 8365), 'numpy.where', 'np.where', (['neg'], {}), '(neg)\n', (8360, 8365), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((7813, 7826), 'numpy.where', 'np.where', (['pos'], {}), '(pos)\n', (7821, 7826), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n'), ((7956, 7969), 'numpy.where', 'np.where', (['neg'], {}), '(neg)\n', (7964, 7969), True, 'import torch, random, itertools as it, numpy as np, faiss, random\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 13:33:32 2017
@author: saintlyvi
"""
import pandas as pd
import numpy as np
import os
from math import ceil
import colorlover as cl
import plotly.offline as offline
import plotly.graph_objs as go
import plotly as py
offline.init_notebook_mode(connected=True) #set for plotly offline plotting
from .support import dpet_dir, image_dir
def bmDemandSummary(model_dir = dpet_dir):
"""
Retrieves demand summary expert model stored in csv files in model_dir. File names must end with '_summary.csv' .
"""
try:
files = os.listdir(model_dir) #get data files containing expert model
summaryfiles = [f for f in files if "summary" in f]
summary = pd.DataFrame()
for s in summaryfiles:
name = s.split('_summary.csv')[0]
data = pd.read_csv(os.path.join(model_dir, s))
data['class'] = name
summary = summary.append(data)
summary.reset_index(inplace=True, drop=True)
summary.rename(columns={'Year':'YearsElectrified'}, inplace=True)
except:
## TODO: get data from energydata.uct.ac.za
pass
return summary
def bmHourlyProfiles(model_dir = dpet_dir):
"""
Retrieves hourly profiles expert model stored in csv files in model_dir. File names must end with '_hourly.csv' .
"""
try:
files = os.listdir(model_dir) #get data files containing expert model
hourlyfiles = [f for f in files if "hourly" in f]
hourlyprofiles = pd.DataFrame()
for h in hourlyfiles:
name = h.split('_hourly.csv')[0]
data = pd.read_csv(os.path.join(model_dir, h))
data['class'] = name
hourlyprofiles = hourlyprofiles.append(data)
hourlyprofiles.reset_index(inplace=True, drop=True)
hourlyprofiles.rename(columns={'Year':'YearsElectrified',
'Month':'month',
'Day Type':'daytype',
'Time of day [hour]':'hour',}, inplace=True)
hourlyprofiles.month = hourlyprofiles.month.astype('category')
hourlyprofiles.month.cat.rename_categories({
'April':4, 'August':8, 'December':12, 'February':2, 'January':1,
'July':7, 'June':6, 'March':3, 'May':5, 'November':11,
'October':10, 'September':9}, inplace=True)
## TODO
#convert month string to int 1-12
except:
## TODO: get data from energydata.uct.ac.za
pass
return hourlyprofiles
def benchmarkModel():
"""
Fetch data for existing/expert DPET model.
"""
hp = bmHourlyProfiles()
ds = bmDemandSummary()
dsts = 'Energy [kWh]'
hpts = 'Mean [kVA]'
return ds, hp, dsts, hpts
def plotBmDemandSummary(customer_class, model_dir = dpet_dir):
"""
This function plots the average monthly energy consumption for a specified customer class from
1 to 15 years since electrification. Data is based on the DPET model.
"""
summary = bmDemandSummary(model_dir)
df = summary[summary['class']==customer_class][['YearsElectrified','Energy [kWh]']]
data = [go.Bar(
x=df['YearsElectrified'],
y=df['Energy [kWh]'],
name=customer_class
)]
layout = go.Layout(
title='Annualised Monthly Energy Consumption for "' + customer_class + '" Customer Class',
xaxis=dict(
title='years since electrification',
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
),
yaxis=dict(
title='average annual kWh/month',
titlefont=dict(
size=16,
color='rgb(107, 107, 107)'
)
)
)
return offline.iplot({"data":data, "layout":layout}, filename=os.path.join(image_dir,'demand_summary_'+customer_class+'.png'))
def plot15YearBmDemandSummary(model_dir = dpet_dir):
"""
This function plots the average monthly energy consumption for all customer classes from
1 to 15 years since electrification. Data is based on the DPET model.
"""
clrs = ['Greens','RdPu','Blues','YlOrRd','Purples','Reds', 'Greys']
summary = bmDemandSummary(model_dir)
df = summary[['class','YearsElectrified','Energy [kWh]']].sort_values(by='Energy [kWh]')
data = []
count=0
for c in df['class'].unique():
trace = go.Scatter(
x=df.loc[df['class'] == c, 'YearsElectrified'],
y=df.loc[df['class'] == c, 'Energy [kWh]'],
name=c,
fill='tonexty',
mode='lines',
line=dict(color=cl.flipper()['seq']['3'][clrs[count]][1],
width=3)
)
data.append(trace)
count+=1
layout = go.Layout(
title='Annualised Monthly Energy Consumption for Domestic Energy Consumers',
xaxis=dict(
title='years since electrification',
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
),
yaxis=dict(
title='average annual kWh/month',
titlefont=dict(
size=16,
color='rgb(107, 107, 107)'
)
),
)
return offline.iplot({"data":data, "layout":layout}, filename=os.path.join(image_dir,'15year_demand_summary'+'.png'))
def plotBmHourlyHeatmap(customer_class, year_list, daytype='Weekday', model_dir=dpet_dir):
"""
This function plots the hourly load profiles for a specified customer class, day type and list of years since electrification. Data is based on the DPET model.
"""
df = bmHourlyProfiles(model_dir)
maxdemand = df['Mean [kVA]'].max() #get consistent max demand & color scale across classes
df = df[(df['daytype']==daytype) & (df['class']==customer_class)]
#set heatmap colours
colors = cl.flipper()['div']['5']['RdYlBu']
scl = [[0,colors[0]],[0.25,colors[1]],[0.5,colors[2]],[0.75,colors[3]],[1,colors[4]]]
#set subplot parameters
if len(year_list) < 3:
ncol = len(year_list)
else:
ncol = 3
nrow = ceil(len(year_list)/ncol)
fig = py.tools.make_subplots(rows=nrow, cols=ncol,
subplot_titles=['Year ' + str(x) for x in year_list],
horizontal_spacing = 0.1, print_grid=False)
r = 1 #initiate row
c = 1 #initiate column
for yr in year_list:
if c == ncol + 1:
c = 1
ro = ceil(r/ncol)
#set colorbar parameters
if nrow == 1:
cblen = 1
yanc = 'middle'
else:
cblen = 0.5
yanc = 'bottom'
if r == 1: #toggle colorscale
scl_switch=True
else:
scl_switch=False
#generate trace
try:
data = df[df['YearsElectrified']==yr]
z = data['Mean [kVA]'].reset_index(drop=True)
x = data['hour']
y = data.month
hovertext = list()
for yi, yy in enumerate(y.unique()):
hovertext.append(list())
for xi, xx in enumerate(x.unique()):
hovertext[-1].append('hour: {}<br />month: {}<br />{:.3f} kVA'.format(xx, yy, z[24 * yi + xi]))
trace = go.Heatmap(z = z,
x = x,
y = y,
zmin = 0,
zmax = maxdemand,
text = hovertext,
hoverinfo="text",
colorscale=scl,
reversescale=True,
showscale=scl_switch,
colorbar=dict(
title='kVA',
len=cblen,
yanchor=yanc))
fig.append_trace(trace, ro, c)
except:
pass
c += 1
r += 1
fig['layout'].update(showlegend=False,
title='<b>'+customer_class+'</b> mean estimated <b>'+daytype+'</b> energy demand (kVA) <br />' + ', '.join(map(str, year_list[:-1])) + ' and ' + str(year_list[-1]) + ' years after electrification',
height=350+300*(nrow-1))
for k in range(1, len(year_list)+2):
fig['layout'].update({'yaxis{}'.format(k): go.YAxis(type = 'category',
ticktext = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'],#data.month.unique(),
tickvals = np.arange(1, 13, 1),
tickangle = -15,
tickwidth = 0.5
),
'xaxis{}'.format(k): go.XAxis(title = 'Time of day (hours)',
tickvals = np.arange(0, 24, 2))
})
return offline.iplot(fig, filename='testagain')
def plotBmHourlyProfiles(electrified_min, electrified_max, customer_class, model_dir=dpet_dir, title=''):
hourlyprofiles = bmHourlyProfiles()
hourlyprofiles['season'] = hourlyprofiles['month'].apply(lambda x: 'winter' if x in [6, 7, 8, 9] else 'summer')
electrified_range = range(electrified_min+1,electrified_max+1)
df = hourlyprofiles[(hourlyprofiles['class']==customer_class)&(hourlyprofiles['YearsElectrified'].isin(electrified_range)) ].groupby(['season','daytype','hour'])['Mean [kVA]'].mean().unstack()/0.23 #get mean profile values and % by 0.23 to get from kVA to A
experiment_name = 'benchmark_'+customer_class+'_'+str(electrified_min)+'-'+str(electrified_max)+'yrs_electrified'
if title == '':
plot_title = experiment_name
else:
plot_title = title
data = []
#Create colour scale
spectral = cl.scales['11']['div']['Spectral']
colours = [spectral[c] for c in [0,2,3]] + [spectral[c] for c in [8,9,10]]
i = 0
for col in df.T.columns:
trace = go.Scatter(
x = df.T.index.map(lambda x: str(x)+'h00'),
y = df.T.iloc[:,i],
line = {'color':colours[i],'width':3},
mode = 'lines',
name = col[0]+': '+col[1]
)
data.append(trace)
i+=1
fig = go.Figure(data=data, layout= go.Layout(title=plot_title, height=400, font=dict(size=20)))
fig['layout']['xaxis'].update(title='time of day', dtick=2, titlefont=dict(size=18), tickfont=dict(size=16))
fig['layout']['yaxis'].update(title='hourly electricity demand (A)', titlefont=dict(size=18), tickfont=dict(size=16))
fig['layout']['margin'].update(t=50,r=80,b=100,l=90,pad=10),
# fig['layout']['title']['font'] = dict(size=20)
offline.plot(fig, filename='img/benchmark/bm0/'+experiment_name+'.html')
| [
"os.listdir",
"math.ceil",
"plotly.offline.iplot",
"plotly.offline.plot",
"plotly.offline.init_notebook_mode",
"os.path.join",
"colorlover.flipper",
"plotly.graph_objs.Bar",
"pandas.DataFrame",
"numpy.arange"
] | [((296, 338), 'plotly.offline.init_notebook_mode', 'offline.init_notebook_mode', ([], {'connected': '(True)'}), '(connected=True)\n', (322, 338), True, 'import plotly.offline as offline\n'), ((9800, 9840), 'plotly.offline.iplot', 'offline.iplot', (['fig'], {'filename': '"""testagain"""'}), "(fig, filename='testagain')\n", (9813, 9840), True, 'import plotly.offline as offline\n'), ((11692, 11768), 'plotly.offline.plot', 'offline.plot', (['fig'], {'filename': "('img/benchmark/bm0/' + experiment_name + '.html')"}), "(fig, filename='img/benchmark/bm0/' + experiment_name + '.html')\n", (11704, 11768), True, 'import plotly.offline as offline\n'), ((617, 638), 'os.listdir', 'os.listdir', (['model_dir'], {}), '(model_dir)\n', (627, 638), False, 'import os\n'), ((766, 780), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (778, 780), True, 'import pandas as pd\n'), ((1438, 1459), 'os.listdir', 'os.listdir', (['model_dir'], {}), '(model_dir)\n', (1448, 1459), False, 'import os\n'), ((1592, 1606), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1604, 1606), True, 'import pandas as pd\n'), ((3294, 3369), 'plotly.graph_objs.Bar', 'go.Bar', ([], {'x': "df['YearsElectrified']", 'y': "df['Energy [kWh]']", 'name': 'customer_class'}), "(x=df['YearsElectrified'], y=df['Energy [kWh]'], name=customer_class)\n", (3300, 3369), True, 'import plotly.graph_objs as go\n'), ((6998, 7012), 'math.ceil', 'ceil', (['(r / ncol)'], {}), '(r / ncol)\n', (7002, 7012), False, 'from math import ceil\n'), ((4126, 4194), 'os.path.join', 'os.path.join', (['image_dir', "('demand_summary_' + customer_class + '.png')"], {}), "(image_dir, 'demand_summary_' + customer_class + '.png')\n", (4138, 4194), False, 'import os\n'), ((5807, 5864), 'os.path.join', 'os.path.join', (['image_dir', "('15year_demand_summary' + '.png')"], {}), "(image_dir, '15year_demand_summary' + '.png')\n", (5819, 5864), False, 'import os\n'), ((889, 915), 'os.path.join', 'os.path.join', (['model_dir', 's'], {}), '(model_dir, s)\n', (901, 915), False, 'import os\n'), ((1713, 1739), 'os.path.join', 'os.path.join', (['model_dir', 'h'], {}), '(model_dir, h)\n', (1725, 1739), False, 'import os\n'), ((6385, 6397), 'colorlover.flipper', 'cl.flipper', ([], {}), '()\n', (6395, 6397), True, 'import colorlover as cl\n'), ((9254, 9273), 'numpy.arange', 'np.arange', (['(1)', '(13)', '(1)'], {}), '(1, 13, 1)\n', (9263, 9273), True, 'import numpy as np\n'), ((9696, 9715), 'numpy.arange', 'np.arange', (['(0)', '(24)', '(2)'], {}), '(0, 24, 2)\n', (9705, 9715), True, 'import numpy as np\n'), ((4981, 4993), 'colorlover.flipper', 'cl.flipper', ([], {}), '()\n', (4991, 4993), True, 'import colorlover as cl\n')] |
#!/usr/bin/python3
"""
tango colors
"""
import matplotlib.pyplot as plt
import numpy as np
import pdb
def colormap(rgb: bool=False):
"""
Create an array of visually distinctive RGB values.
Args:
- rgb: boolean, whether to return in RGB or BGR order. BGR corresponds to OpenCV default.
Returns:
- color_list: Numpy array of dtype uin8 representing RGB color palette.
"""
color_list = np.array(
[
[252, 233, 79],
# [237, 212, 0],
[196, 160, 0],
[252, 175, 62],
# [245, 121, 0],
[206, 92, 0],
[233, 185, 110],
[193, 125, 17],
[143, 89, 2],
[138, 226, 52],
# [115, 210, 22],
[78, 154, 6],
[114, 159, 207],
# [52, 101, 164],
[32, 74, 135],
[173, 127, 168],
# [117, 80, 123],
[92, 53, 102],
[239, 41, 41],
# [204, 0, 0],
[164, 0, 0],
[238, 238, 236],
# [211, 215, 207],
# [186, 189, 182],
[136, 138, 133],
# [85, 87, 83],
[46, 52, 54],
]
).astype(np.uint8)
assert color_list.shape[1] == 3
assert color_list.ndim == 2
if not rgb:
color_list = color_list[:, ::-1]
return color_list
if __name__ == '__main__':
""" Make sure things work as expected"""
colors = colormap(rgb=True)
num_colors = colors.shape[0]
pdb.set_trace()
semantic_img = np.arange(num_colors)
semantic_img = np.tile(semantic_img, (10,1))
semantic_img = semantic_img.T
img = colors[semantic_img]
plt.imshow(img)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.tile",
"numpy.array",
"pdb.set_trace",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1532, 1547), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1545, 1547), False, 'import pdb\n'), ((1567, 1588), 'numpy.arange', 'np.arange', (['num_colors'], {}), '(num_colors)\n', (1576, 1588), True, 'import numpy as np\n'), ((1608, 1638), 'numpy.tile', 'np.tile', (['semantic_img', '(10, 1)'], {}), '(semantic_img, (10, 1))\n', (1615, 1638), True, 'import numpy as np\n'), ((1708, 1723), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1718, 1723), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1738), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1736, 1738), True, 'import matplotlib.pyplot as plt\n'), ((429, 730), 'numpy.array', 'np.array', (['[[252, 233, 79], [196, 160, 0], [252, 175, 62], [206, 92, 0], [233, 185, \n 110], [193, 125, 17], [143, 89, 2], [138, 226, 52], [78, 154, 6], [114,\n 159, 207], [32, 74, 135], [173, 127, 168], [92, 53, 102], [239, 41, 41],\n [164, 0, 0], [238, 238, 236], [136, 138, 133], [46, 52, 54]]'], {}), '([[252, 233, 79], [196, 160, 0], [252, 175, 62], [206, 92, 0], [233,\n 185, 110], [193, 125, 17], [143, 89, 2], [138, 226, 52], [78, 154, 6],\n [114, 159, 207], [32, 74, 135], [173, 127, 168], [92, 53, 102], [239, \n 41, 41], [164, 0, 0], [238, 238, 236], [136, 138, 133], [46, 52, 54]])\n', (437, 730), True, 'import numpy as np\n')] |
from handy import read
import numpy as np
from numpy.lib.stride_tricks import sliding_window_view
from itertools import combinations
lines = [int(x) for x in read(9)]
def validate(n, last):
last = set((x for x in last if x <= n))
for combo in combinations(last, 2):
if sum(combo) == n:
return True
return False
invalid = 0
for i in range(25, len(lines)):
if not validate(lines[i], lines[i-25:i]):
print('Not valid:',lines[i])
invalid = lines[i]
for window_shape in range(2,100):
window_sums = np.sum(sliding_window_view(lines, window_shape = window_shape), axis = 1)
if invalid in window_sums:
print('Located, window size', window_shape)
ix = np.where(window_sums==invalid)[0][0]
window = sliding_window_view(lines, window_shape = window_shape)[ix]
print(min(window)+max(window))
| [
"itertools.combinations",
"handy.read",
"numpy.lib.stride_tricks.sliding_window_view",
"numpy.where"
] | [((253, 274), 'itertools.combinations', 'combinations', (['last', '(2)'], {}), '(last, 2)\n', (265, 274), False, 'from itertools import combinations\n'), ((159, 166), 'handy.read', 'read', (['(9)'], {}), '(9)\n', (163, 166), False, 'from handy import read\n'), ((558, 611), 'numpy.lib.stride_tricks.sliding_window_view', 'sliding_window_view', (['lines'], {'window_shape': 'window_shape'}), '(lines, window_shape=window_shape)\n', (577, 611), False, 'from numpy.lib.stride_tricks import sliding_window_view\n'), ((767, 820), 'numpy.lib.stride_tricks.sliding_window_view', 'sliding_window_view', (['lines'], {'window_shape': 'window_shape'}), '(lines, window_shape=window_shape)\n', (786, 820), False, 'from numpy.lib.stride_tricks import sliding_window_view\n'), ((715, 747), 'numpy.where', 'np.where', (['(window_sums == invalid)'], {}), '(window_sums == invalid)\n', (723, 747), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from timeit import default_timer as timer
#from matplotlib.pylab import imshow, jet, show, ion
import numpy as np
from numba import jit, int32, float64, njit, prange
import cv2
from numba import jit
# color map
cmap = [ 66, 30, 15, 25, 7, 26, 9, 1, 47, 4, 4, 73, 0, 7, 100, 12, 44, 138,
24, 82, 177, 57, 125, 209, 134, 181, 229, 211, 236, 248, 241, 233, 191,
248, 201, 95, 255, 170, 0, 204, 128, 0, 153, 87, 0, 106, 52, 3, 106, 52, 3 ]
#@jit(int32(float64,float64,int32), nopython=True)
@jit
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return 255
@njit(parallel=True)
def create_fractal(min_x, max_x, min_y, max_y, image, cmap, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
l = len(cmap)
for x in prange(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
val = mandel(real, imag, iters)
# lookup color from val
index = 3*val
if index>l:
index=l-3
image[y,x][2] = cmap[index]
image[y,x][1] = cmap[index+1]
image[y,x][0] = cmap[index+2]
return image
image = np.zeros((2048, 4096, 3), dtype=np.uint8)
create_fractal(-2.0, 1.0, -1.0, 1.0, image, cmap, 20)
n = 100
s = timer()
for i in range(n):
create_fractal(-2.0, 1.0, -1.0, 1.0, image, cmap, 20)
e = timer()
print(e - s, (e-s)/n)
#imshow(image)
#jet()
#ion()
#show()
jpeg_img = cv2.imencode('.png', image, [cv2.IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, 1, cv2.IMWRITE_PNG_STRATEGY_FIXED, 1])[1].tobytes()
fp = open('mandelbrot.png', 'wb')
fp.write(jpeg_img)
fp.close()
| [
"cv2.imencode",
"timeit.default_timer",
"numba.njit",
"numpy.zeros",
"numba.prange"
] | [((1053, 1072), 'numba.njit', 'njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (1057, 1072), False, 'from numba import jit, int32, float64, njit, prange\n'), ((1753, 1794), 'numpy.zeros', 'np.zeros', (['(2048, 4096, 3)'], {'dtype': 'np.uint8'}), '((2048, 4096, 3), dtype=np.uint8)\n', (1761, 1794), True, 'import numpy as np\n'), ((1861, 1868), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1866, 1868), True, 'from timeit import default_timer as timer\n'), ((1948, 1955), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1953, 1955), True, 'from timeit import default_timer as timer\n'), ((1315, 1328), 'numba.prange', 'prange', (['width'], {}), '(width)\n', (1321, 1328), False, 'from numba import jit, int32, float64, njit, prange\n'), ((2027, 2138), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'image', '[cv2.IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, 1, cv2.IMWRITE_PNG_STRATEGY_FIXED, 1]'], {}), "('.png', image, [cv2.IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, 1, cv2.\n IMWRITE_PNG_STRATEGY_FIXED, 1])\n", (2039, 2138), False, 'import cv2\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 15:40:08 2020
plot composite plots sea level & barotropic currents for ROMS sensitivity experiments with uniform wind speed change and closed english channel,
and difference in responses w.r.t. same experiments with an open english channel
@author: thermans
"""
import xarray as xr
import matplotlib.ticker as mticker
import matplotlib.pyplot as plt
import os
import numpy as np
import fnmatch
import cmocean
import cartopy.crs as ccrs
plt.close('all')
out_dir = '/Users/thermans/Documents/PhD/Phase4_seasonal/Figures' #where to store the figure
#initialize figure
my_cmap = cmocean.cm.balance
my_cmap.set_bad('grey')
fig=plt.figure(figsize=(6, 7.5))
gs = fig.add_gridspec(2,2)
gs.update(hspace = .1,wspace=.2,bottom=0.1,top=.98)
plot_titles = ['(a) Exp_SW_cc, DJF','(b) Exp_NE_cc, JJA','(c) Closed minus open','(d) Closed minus open']
exps_dir = '/Users/thermans/Documents/Modeling/ROMS/northsea8/'
exps = ['swWindUniform_sq2ms_chclosed','neWindUniform_sq2ms_chclosed','swWindUniform_sq2ms','neWindUniform_sq2ms'] #experiments to plot
#reference experiments (with open and closed channels)
org_dir = '/Users/thermans/Documents/Modeling/ROMS/northsea8/'
org_exps = ['Exp70_1993_1995_era5_gl12v1_v3e10_rx013_chclosed','Exp70_1993_1995_era5_gl12v1_v3e10_rx013_chclosed',
'Exp39_1993_2018_era5_gl12v1_v3e10_rx013','Exp39_1993_2018_era5_gl12v1_v3e10_rx013']
bathy = xr.open_dataset('/Users/thermans/Documents/Modeling/ROMS/preprocessing/bathymetry/etopo1_bedrock_bigNorthSea1.nc')
for e,exp in enumerate(exps):
dns = fnmatch.filter(os.listdir(exps_dir),"*"+exp)
ds = xr.open_dataset(os.path.join(exps_dir,dns[0],'NorthSea8_avg_timeseries_monthly.nc'))
org_ds = xr.open_dataset(os.path.join(org_dir,org_exps[e],'NorthSea8_avg_timeseries_monthly.nc'))
if np.mod(e,2)==0: #select months from desired season
season='DJF'
else:
season='JJA'
if season == 'DJF':
time_i = [1,11,12,13,23,24,25,35]
elif season == 'MAM':
time_i = [2,3,4,14,15,16,26,27,28]
elif season == 'JJA':
time_i = [5,6,7,17,18,19,29,30,31]
elif season == 'SON':
time_i = [8,9,10,20,21,22,32,33,34]
diff_zeta = (ds-org_ds).zeta.isel(ocean_time=time_i).mean(dim='ocean_time') #calculate diff w.r.t. reference
diff_vbar = (ds-org_ds).vbar.isel(ocean_time=time_i).mean(dim='ocean_time')
diff_ubar = (ds-org_ds).ubar.isel(ocean_time=time_i).mean(dim='ocean_time')
#interpolate u,v points to rho points
diff_vbar_rho = np.empty((218,242))
diff_vbar_rho[:] = np.nan
diff_vbar_rho[1:-1,:] = (diff_vbar[0:-1,:] + diff_vbar[1:,:])/2
diff_ubar_rho = np.empty((218,242))
diff_ubar_rho[:] = np.nan
diff_ubar_rho[:,1:-1] = (diff_ubar[:,0:-1] + diff_ubar[:,1:])/2
if e==0: #store outcomes for closed channel to subtract outcomes from open channel
diff_zeta_sw_cc = diff_zeta
diff_ubar_rho_sw_cc = diff_ubar_rho
diff_vbar_rho_sw_cc = diff_vbar_rho
elif e==1:
diff_zeta_ne_cc = diff_zeta
diff_ubar_rho_ne_cc = diff_ubar_rho
diff_vbar_rho_ne_cc = diff_vbar_rho
if e<2:
ax = fig.add_subplot(gs[0,e], projection=ccrs.Orthographic(0, 50))
else:
ax = fig.add_subplot(gs[1,e-2], projection=ccrs.Orthographic(0, 50))
sl = 100*diff_zeta
u = diff_ubar_rho[::3,::3]
v = diff_vbar_rho[::3,::3]
if e==2:
sl = 100*diff_zeta_sw_cc - sl
u = diff_ubar_rho_sw_cc[::3,::3]-u
v = diff_vbar_rho_sw_cc[::3,::3]-v
elif e==3:
sl = 100*diff_zeta_ne_cc - sl
u = diff_ubar_rho_ne_cc[::3,::3]-u
v = diff_vbar_rho_ne_cc[::3,::3]-v
if e>1:
im=sl.plot.pcolormesh("lon_rho", "lat_rho", ax=ax,vmin=-4,vmax=4,cmap=my_cmap,label='dSSH [m]',transform=ccrs.PlateCarree(),add_colorbar=False,rasterized=True)
else:
im=sl.plot.pcolormesh("lon_rho", "lat_rho", ax=ax,vmin=-12,vmax=12,cmap=my_cmap,label='dSSH [m]',transform=ccrs.PlateCarree(),add_colorbar=False,rasterized=True)
bathy.Band1.plot.contour(transform=ccrs.PlateCarree(),ax=ax,levels=[-200],colors=['white'],add_colorbar=False)
plt.scatter(1.55,51.05,edgecolor='lightgreen',facecolor='none',transform=ccrs.PlateCarree(),marker='o',s=250,linewidth=1.5,zorder=5) #mark closed channel with green circle
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,linewidth=1, color='lightgrey', alpha=0, linestyle='-')
gl.xlocator = mticker.FixedLocator([-5, 0, 5])
gl.top_labels = gl.right_labels = gl.left_labels = gl.bottom_labels = False
if np.mod(e,2)==0:
gl.left_labels = True #don't label top and right axes
if e>1:
gl.bottom_labels = True
gl.xlabel_style = {'color': 'black','rotation':0}
gl.ylabel_style = {'color': 'black','rotation':0}
ax.coastlines(resolution='10m',color='black',zorder=4)
ax.set_extent([-8.5,7.5,47,59.5])
q=ax.quiver(ds.lon_rho.values[::3,::3],ds.lat_rho.values[::3,::3],u,v,
scale=.5,color='black',width=.005,edgecolors='k',transform=ccrs.PlateCarree())
ax.set_title(plot_titles[e])
if e==1:
cax = fig.add_axes([0.26, .57, .5, .02])
fig.colorbar(im, cax=cax,orientation='horizontal',label='Sea-level response [cm]',extend='neither')
qk = ax.quiverkey(q, 0.82, 0.59, 0.05, label='5 cm/s', labelpos='E',coordinates='figure')
if e==3:
cax = fig.add_axes([0.26, .09, .5, .02])
fig.colorbar(im, cax=cax,orientation='horizontal',label='Sea-level difference [cm]',extend='neither')
qk = ax.quiverkey(q, 0.82, 0.11, 0.05, label='5 cm/s', labelpos='E',coordinates='figure')
fig.savefig(os.path.join(out_dir,'Figure7_uniform_wind_cc_roms.pdf'),dpi=300) | [
"os.listdir",
"matplotlib.ticker.FixedLocator",
"cartopy.crs.Orthographic",
"os.path.join",
"cartopy.crs.PlateCarree",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.empty",
"xarray.open_dataset",
"numpy.mod"
] | [((511, 527), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (520, 527), True, 'import matplotlib.pyplot as plt\n'), ((699, 727), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 7.5)'}), '(figsize=(6, 7.5))\n', (709, 727), True, 'import matplotlib.pyplot as plt\n'), ((1455, 1579), 'xarray.open_dataset', 'xr.open_dataset', (['"""/Users/thermans/Documents/Modeling/ROMS/preprocessing/bathymetry/etopo1_bedrock_bigNorthSea1.nc"""'], {}), "(\n '/Users/thermans/Documents/Modeling/ROMS/preprocessing/bathymetry/etopo1_bedrock_bigNorthSea1.nc'\n )\n", (1470, 1579), True, 'import xarray as xr\n'), ((2600, 2620), 'numpy.empty', 'np.empty', (['(218, 242)'], {}), '((218, 242))\n', (2608, 2620), True, 'import numpy as np\n'), ((2738, 2758), 'numpy.empty', 'np.empty', (['(218, 242)'], {}), '((218, 242))\n', (2746, 2758), True, 'import numpy as np\n'), ((4578, 4610), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-5, 0, 5]'], {}), '([-5, 0, 5])\n', (4598, 4610), True, 'import matplotlib.ticker as mticker\n'), ((5861, 5918), 'os.path.join', 'os.path.join', (['out_dir', '"""Figure7_uniform_wind_cc_roms.pdf"""'], {}), "(out_dir, 'Figure7_uniform_wind_cc_roms.pdf')\n", (5873, 5918), False, 'import os\n'), ((1626, 1646), 'os.listdir', 'os.listdir', (['exps_dir'], {}), '(exps_dir)\n', (1636, 1646), False, 'import os\n'), ((1681, 1750), 'os.path.join', 'os.path.join', (['exps_dir', 'dns[0]', '"""NorthSea8_avg_timeseries_monthly.nc"""'], {}), "(exps_dir, dns[0], 'NorthSea8_avg_timeseries_monthly.nc')\n", (1693, 1750), False, 'import os\n'), ((1779, 1852), 'os.path.join', 'os.path.join', (['org_dir', 'org_exps[e]', '"""NorthSea8_avg_timeseries_monthly.nc"""'], {}), "(org_dir, org_exps[e], 'NorthSea8_avg_timeseries_monthly.nc')\n", (1791, 1852), False, 'import os\n'), ((1864, 1876), 'numpy.mod', 'np.mod', (['e', '(2)'], {}), '(e, 2)\n', (1870, 1876), True, 'import numpy as np\n'), ((4703, 4715), 'numpy.mod', 'np.mod', (['e', '(2)'], {}), '(e, 2)\n', (4709, 4715), True, 'import numpy as np\n'), ((4183, 4201), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4199, 4201), True, 'import cartopy.crs as ccrs\n'), ((4337, 4355), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4353, 4355), True, 'import cartopy.crs as ccrs\n'), ((4467, 4485), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4483, 4485), True, 'import cartopy.crs as ccrs\n'), ((5247, 5265), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5263, 5265), True, 'import cartopy.crs as ccrs\n'), ((3285, 3309), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', (['(0)', '(50)'], {}), '(0, 50)\n', (3302, 3309), True, 'import cartopy.crs as ccrs\n'), ((3372, 3396), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', (['(0)', '(50)'], {}), '(0, 50)\n', (3389, 3396), True, 'import cartopy.crs as ccrs\n'), ((3909, 3927), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3925, 3927), True, 'import cartopy.crs as ccrs\n'), ((4089, 4107), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4105, 4107), True, 'import cartopy.crs as ccrs\n')] |
import matplotlib
matplotlib.use('tkagg')
import os
import subprocess
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
import tensorflow as tf
from ampligraph.common.aux import rel_rank_stat, load_data, eigengap
from ampligraph.common.aux_play import get_model, viz_distm
from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score
from sacred import Experiment
from sacred.observers import MongoObserver
import numpy as np
import warnings
from pymongo import MongoClient
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR) # https://stackoverflow.com/questions/48608776/how-to-suppress-tensorflow-warning-displayed-in-result
parser = ArgumentParser("scoring", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("--depth", default=8, type=int, help='method')
parser.add_argument('--rb', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--fix_rel', action='store_true')
parser.add_argument("--viz", action='store_true')
parser.add_argument('--data', type = str, default='single_fam_tree')
parser.add_argument('--n_epoch', type=int, default=200)
parser.add_argument('--prob', type=float, default=0)
parser.add_argument("--seed", default=42, type=int, help='random seed')
parser.add_argument("--add", default=1, type=int, help='additional relation')
parser.add_argument("--extra_rel", default=30, type=int, help='extra relation')
parser.add_argument("--noise_rel", default=1, type=int, help='noise relation (half noise)')
parser.add_argument("--n_node", default=1000, type=int, help='n_nodes')
parser.add_argument("--model", default='ComplEx', type=str, help='model name')
parser.add_argument("--period", default=3, type=int, help='the period of relations')
client = MongoClient('localhost', 27017)
EXPERIMENT_NAME = 'KG_corrupt'
YOUR_CPU = None
DATABASE_NAME = 'KG_corrupt'
ex = Experiment(EXPERIMENT_NAME)
ex.observers.append(MongoObserver.create(url=YOUR_CPU, db_name=DATABASE_NAME))
warnings.filterwarnings("ignore", category=DeprecationWarning)
from ampligraph.common.aux import timefunction
# @timefunction
def topk_tails(model, hr=np.array(['101', '1']), top_k=5, print_f = False):
"""
:param model:
:param hr: h and r
:param top_k:
:return:
"""
# hr = np.array([['101', '1']])
hr = hr.reshape(1, 2)
C = np.array([np.append(hr, x) for x in list(model.ent_to_idx.keys())])
tmp = np.array(model.predict(C)).reshape(C.shape[0], 1) # np.array of shape (n,1)
df = pd.DataFrame(np.hstack([C, tmp]), columns=['head', 'relation', 'tail_', 'score'])
def filter_score(row):
if float(row.score) < 1e-4:
return 0
else:
return float(row.score)
df['score_filter'] = df.apply(filter_score, axis=1)
df = df.sort_values('score_filter', ascending=False, inplace=False)
if print_f:
print('Top %s solutions for query (%s, %s, ?)' % (top_k, hr[0, 0], hr[0, 1]))
print(df[:top_k])
print('-' * 50)
# print(df.tail_)
best_tails = list(df.tail_) # return the best tail
return int(best_tails[0]), int(best_tails[1])
def ranks_summary(ranks):
mrr = mrr_score(ranks)
hits_1, hits_3, hits_10, hits_20, hits_50 = hits_at_n_score(ranks, n=1), hits_at_n_score(ranks,
n=3), hits_at_n_score(
ranks, n=10), hits_at_n_score(ranks, n=20), hits_at_n_score(ranks, n=50)
print("MRR: %f, Hits@1: %f, Hits@3: %f, Hits@10: %f, Hits@20: %f, Hits@50: %f" % (
mrr, hits_1, hits_3, hits_10, hits_20, hits_50))
print('-' * 150)
def eval_corrution(model, filter, args, x, noise = 1, noise_loc = 't'):
corrupt_test = corrupt(x, period=args.n_node, noise=noise, corrput_location= noise_loc)
args_ = {'strict':False, 'model': model, 'filter_triples': filter, 'verbose': args.verbose, 'use_default_protocol': True}
ranks_corrupt = evaluate_performance(corrupt_test, **args_)
for i in range(10):
print('noise ' + str(noise) + ' at ' + noise_loc, corrupt_test[i], ranks_corrupt[2*i], ranks_corrupt[2*i+1])
ranks_summary(ranks_corrupt)
print('-'*150)
def corrupt(x, period = 1000, noise = 1, corrput_location = 'h'):
# array of shape (n, 3)
n, d = x.shape
assert d == 3
res = []
for i in range(n):
h, r, t = x[i]
if corrput_location == 't':
t = str((int(t) + noise)%period) # 1 is noise here
elif corrput_location == 'r':
# todo make sure no new relation is introduced
r = str((int(r) + noise)%period)
elif corrput_location == 'h':
h = str((int(h) + noise)%period)
else:
raise Exception('No such corruption location %s'%corrput_location)
res.append([h, r, t])
return np.array(res)
def traj_graph(lis1, lis2, name='', viz= False, lis1_only = False):
# lis = range(2, 20)
n = len(lis1)
assert len(lis1) == len(lis2)
edges = [(lis1[0], lis1[1])]
for i in range(1, n-1):
edge1 = (lis1[i], lis1[i + 1])
edges.append(edge1)
if not lis1_only:
edge2 = (lis1[i], lis2[i + 1])
edges.append(edge2)
g = nx.Graph()
g.add_edges_from(edges)
pos = nx.spring_layout(g)
# pos = nx.circular_layout(g)
nx.draw(g, pos, node_color='b', node_size=5, with_labels=True, width=1)
try:
os.makedirs('./img')
except FileExistsError:
pass
if viz: plt.show()
plt.savefig('./img/traj_graph' + name)
# lis1 = [2, 3, 4, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 35, 36, 37, 38, 39, 40, 41, 42]
# lis2 = [34, 74, 35, 89, 30, 14, 37, 15, 32, 40, 61, 0, 1, 16, 61, 25, 95, 88, 30, 31, 29, 79, 58, 68, 41, 64, 85, 36, 70, 68, 30, 14, 37, 15, 32, 40, 61, 0, 1, 16, 61, 25, 95, 88, 30, 31, 29, 79, 58, 68, 41, 64, 85, 36, 70, 68, 30, 14, 37, 15, 32, 40, 61, 0, 1, 16, 61, 25, 95, 88, 30, 31, 29, 79, 58, 68, 41, 64, 85, 36, 70, 68, 30, 14, 37, 15, 32, 40, 61, 0, 1, 16, 61, 25, 95, 88, 30, 31, 29, 79]
# traj_graph(lis1, lis2, name='', viz=True, lis1_only=True)
# sys.exit()
@ex.config
def get_config():
# unused params
depth = 8
rb = True
# param
verbose = True
data = 'single_fam_tree'
seed = 42
noise_rel = 8
n_node = 1000
model = 'ComplEx'
rels = '1 2 3 4 5 6 7 8 9 10 11 12'#'1 2 3 4 5 6 7 8 9 10 11 12' #'1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16'
# other param
period = 3
@ex.main
def main(model, n_node, noise_rel, seed, data, verbose, rels, period):
sys.argv = []
args = parser.parse_args()
args.model = model
args.n_node = n_node
args.noise_rel = noise_rel
args.seed = seed
args.data = data
args.verbose = verbose
args.rels = rels
args.period = period
print(args)
if args.data == 'single_fam_tree':
py_server = ['/home/cai.507/anaconda3/envs/ampligraph/bin/python']
py_local = ['~/anaconda3/bin/python']
tmp_command = ['../ampligraph/datasets/single_fam_tree.py',
'--seed', str(args.seed), '--add', str(args.add),
'--extra_rel', str(args.extra_rel),
'--noise_rel', str(args.noise_rel),
'--rels', str(args.rels),
'--n_node', str(args.n_node)]
command_server = py_server + tmp_command
command_local = py_local + tmp_command
else:
raise ('No such data %s'%args.data)
try:
print(command_server)
print(subprocess.check_output(command_server))
except FileNotFoundError:
print(command_local)
print(subprocess.check_output(command_local))
X, _ = load_data(args)
for key, val in X.items():
print(key, len(val),)
# get model
model = get_model(args)
# Fit the model on training and validation set
filter = np.concatenate((X['train'], X['valid'], X['test']))
print(model,'\n')
model.fit(X['train'], early_stopping=True,
early_stopping_params= \
{ 'x_valid': X['valid'], # validation set
'criteria': 'hits10', # Uses hits10 criteria for early stopping
'burn_in': 100, # early stopping kicks in after 100 epochs
'check_interval': 20, # validates every 20th epoch
'stop_interval': 5, # stops if 5 successive validation checks are bad.
'x_filter': filter, # Use filter for filtering out positives
'corruption_entities': 'all', # corrupt using all entities
'corrupt_side': 's+o' # corrupt subject and object (but not at once)
})
# Run the evaluation procedure on the test set (with filtering). Usually, we corrupt subject and object sides separately and compute ranks
# ranks = evaluate_performance(X['test'], model=model, filter_triples=filter, verbose=args.verbose, use_default_protocol=True) # corrupt subj and obj separately while evaluating
eval_corrution(model, filter, args, X['test'], noise=0, noise_loc='t')
eval_corrution(model, filter, args, X['test'], noise=1, noise_loc='t')
# eval_corrution(model, filter, args, X['test'], noise=1, noise_loc='h')
queries = []
best_tails, second_best_tails = [], []
step = 2
edges = []
for i in range(n_node):
query = [str(i), str(step)]
best_tail, second_tail = topk_tails(model, hr = np.array(query), top_k=3, print_f=True)
edge = (i, second_tail)
edges.append(edge)
print(edges)
g = nx.Graph()
g.add_edges_from(edges)
# pos = nx.spring_layout(g, seed=42)
pos = nx.circular_layout(g)
nx.draw(g, pos, node_color='b', node_size=5, with_labels=True, width=1)
name = './img/circular_layout_second/rel_' + str(step) + '_' + str(n_node) + '_' + str(rels) # TODO change circular layout
title = f'{n_node} nodes. Rels {rels}. relation step {step}'
plt.title(title)
plt.savefig(name)
sys.exit()
for i in range(n_step):
query = [str(h), str(step)]
print('iter %s: head %s'%(i, query[0]))
queries.append(query)
best_tail, second_best_tail = topk_tails(model, hr=np.array(query), top_k=5, print_f=False)
best_tails.append(best_tail)
second_best_tails.append(second_best_tail)
h = best_tail
print(best_tails)
print(second_best_tails)
traj_graph(best_tails, second_best_tails, name='_' + str(n_step))
if __name__ == "__main__":
# main('ComplEx', 1000, 1, 42, 'single_fam_tree', True)
ex.run_commandline() # SACRED: this allows you to run Sacred not only from your terminal,
| [
"logging.getLogger",
"numpy.hstack",
"tensorflow.logging.set_verbosity",
"numpy.array",
"sys.exit",
"sacred.observers.MongoObserver.create",
"pymongo.MongoClient",
"argparse.ArgumentParser",
"ampligraph.evaluation.hits_at_n_score",
"networkx.spring_layout",
"numpy.concatenate",
"ampligraph.eva... | [((18, 41), 'matplotlib.use', 'matplotlib.use', (['"""tkagg"""'], {}), "('tkagg')\n", (32, 41), False, 'import matplotlib\n'), ((172, 199), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (189, 199), False, 'import logging\n'), ((725, 767), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (749, 767), True, 'import tensorflow as tf\n'), ((880, 984), 'argparse.ArgumentParser', 'ArgumentParser', (['"""scoring"""'], {'formatter_class': 'ArgumentDefaultsHelpFormatter', 'conflict_handler': '"""resolve"""'}), "('scoring', formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n", (894, 984), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((2002, 2033), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (2013, 2033), False, 'from pymongo import MongoClient\n'), ((2115, 2142), 'sacred.Experiment', 'Experiment', (['EXPERIMENT_NAME'], {}), '(EXPERIMENT_NAME)\n', (2125, 2142), False, 'from sacred import Experiment\n'), ((2222, 2284), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (2245, 2284), False, 'import warnings\n'), ((2163, 2220), 'sacred.observers.MongoObserver.create', 'MongoObserver.create', ([], {'url': 'YOUR_CPU', 'db_name': 'DATABASE_NAME'}), '(url=YOUR_CPU, db_name=DATABASE_NAME)\n', (2183, 2220), False, 'from sacred.observers import MongoObserver\n'), ((2374, 2396), 'numpy.array', 'np.array', (["['101', '1']"], {}), "(['101', '1'])\n", (2382, 2396), True, 'import numpy as np\n'), ((3411, 3427), 'ampligraph.evaluation.mrr_score', 'mrr_score', (['ranks'], {}), '(ranks)\n', (3420, 3427), False, 'from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score\n'), ((4201, 4244), 'ampligraph.evaluation.evaluate_performance', 'evaluate_performance', (['corrupt_test'], {}), '(corrupt_test, **args_)\n', (4221, 4244), False, 'from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score\n'), ((5090, 5103), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (5098, 5103), True, 'import numpy as np\n'), ((5488, 5498), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5496, 5498), True, 'import networkx as nx\n'), ((5537, 5556), 'networkx.spring_layout', 'nx.spring_layout', (['g'], {}), '(g)\n', (5553, 5556), True, 'import networkx as nx\n'), ((5595, 5666), 'networkx.draw', 'nx.draw', (['g', 'pos'], {'node_color': '"""b"""', 'node_size': '(5)', 'with_labels': '(True)', 'width': '(1)'}), "(g, pos, node_color='b', node_size=5, with_labels=True, width=1)\n", (5602, 5666), True, 'import networkx as nx\n'), ((5773, 5811), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./img/traj_graph' + name)"], {}), "('./img/traj_graph' + name)\n", (5784, 5811), True, 'import matplotlib.pyplot as plt\n'), ((8363, 8378), 'ampligraph.common.aux.load_data', 'load_data', (['args'], {}), '(args)\n', (8372, 8378), False, 'from ampligraph.common.aux import rel_rank_stat, load_data, eigengap\n'), ((8469, 8484), 'ampligraph.common.aux_play.get_model', 'get_model', (['args'], {}), '(args)\n', (8478, 8484), False, 'from ampligraph.common.aux_play import get_model, viz_distm\n'), ((8551, 8602), 'numpy.concatenate', 'np.concatenate', (["(X['train'], X['valid'], X['test'])"], {}), "((X['train'], X['valid'], X['test']))\n", (8565, 8602), True, 'import numpy as np\n'), ((10278, 10288), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (10286, 10288), True, 'import networkx as nx\n'), ((10368, 10389), 'networkx.circular_layout', 'nx.circular_layout', (['g'], {}), '(g)\n', (10386, 10389), True, 'import networkx as nx\n'), ((10394, 10465), 'networkx.draw', 'nx.draw', (['g', 'pos'], {'node_color': '"""b"""', 'node_size': '(5)', 'with_labels': '(True)', 'width': '(1)'}), "(g, pos, node_color='b', node_size=5, with_labels=True, width=1)\n", (10401, 10465), True, 'import networkx as nx\n'), ((10662, 10678), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (10671, 10678), True, 'import matplotlib.pyplot as plt\n'), ((10683, 10700), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {}), '(name)\n', (10694, 10700), True, 'import matplotlib.pyplot as plt\n'), ((10705, 10715), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10713, 10715), False, 'import sys\n'), ((2759, 2778), 'numpy.hstack', 'np.hstack', (['[C, tmp]'], {}), '([C, tmp])\n', (2768, 2778), True, 'import numpy as np\n'), ((3476, 3503), 'ampligraph.evaluation.hits_at_n_score', 'hits_at_n_score', (['ranks'], {'n': '(1)'}), '(ranks, n=1)\n', (3491, 3503), False, 'from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score\n'), ((3505, 3532), 'ampligraph.evaluation.hits_at_n_score', 'hits_at_n_score', (['ranks'], {'n': '(3)'}), '(ranks, n=3)\n', (3520, 3532), False, 'from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score\n'), ((3627, 3655), 'ampligraph.evaluation.hits_at_n_score', 'hits_at_n_score', (['ranks'], {'n': '(10)'}), '(ranks, n=10)\n', (3642, 3655), False, 'from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score\n'), ((3666, 3694), 'ampligraph.evaluation.hits_at_n_score', 'hits_at_n_score', (['ranks'], {'n': '(20)'}), '(ranks, n=20)\n', (3681, 3694), False, 'from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score\n'), ((3696, 3724), 'ampligraph.evaluation.hits_at_n_score', 'hits_at_n_score', (['ranks'], {'n': '(50)'}), '(ranks, n=50)\n', (3711, 3724), False, 'from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score\n'), ((5684, 5704), 'os.makedirs', 'os.makedirs', (['"""./img"""'], {}), "('./img')\n", (5695, 5704), False, 'import os\n'), ((5758, 5768), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5766, 5768), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2609), 'numpy.append', 'np.append', (['hr', 'x'], {}), '(hr, x)\n', (2602, 2609), True, 'import numpy as np\n'), ((8197, 8236), 'subprocess.check_output', 'subprocess.check_output', (['command_server'], {}), '(command_server)\n', (8220, 8236), False, 'import subprocess\n'), ((8311, 8349), 'subprocess.check_output', 'subprocess.check_output', (['command_local'], {}), '(command_local)\n', (8334, 8349), False, 'import subprocess\n'), ((10154, 10169), 'numpy.array', 'np.array', (['query'], {}), '(query)\n', (10162, 10169), True, 'import numpy as np\n'), ((10918, 10933), 'numpy.array', 'np.array', (['query'], {}), '(query)\n', (10926, 10933), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""This module provides the base classes for pyflamestk."""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2016,2017"
__license__ = "Simplified BSD License"
__version__ = "1.0"
import copy, subprocess
import numpy as np
class Atom(object):
"""description of an atom"""
def __init__(self, symbol, position, magmom = 0):
"""constructor
Args:
symbol (str): the standard ISO symbol for an element
position (list of float): the position of the atom the units
are dependent upon use.
magmom (float): the magnetic moment of the atom"""
self._symbol = symbol
self._position = position
self._magnetic_moment = magmom
@property
def symbol(self):
"""str: the standard ISO symbol for an element"""
return self._symbol
@symbol.setter
def symbol(self,s):
self._symbol = s
@property
def position(self):
"""list of float: the position of the atom."""
return np.array(self._position)
@position.setter
def position(self,p):
self._position = np.aray(p)
@property
def magnetic_moment(self):
"""float: the magnetic moment of the atom."""
return self._magnetic_moment
@magnetic_moment.setter
def magnetic_moment(self,m):
self._magnetic_moment = m
class CrystalStructure(object):
"""A structural representation of a material system
Note:
The position of the atom must be the same as the basis vectors
defined by the H-matrix.
Todo:
This module is only written to deal with cells with orthogonal
unit vectors.
"""
def __init__(self, obj=None):
"""__init__
Args:
obj (pyflamestk.base.Structure,optional): if this argument is set
then the this constructor acts as a copy constructor.
"""
if obj is None:
assert type(object),pyflamestk.base.Structure
self._noncopy_init()
else:
self._copy_init(obj)
self._ptol = 1.e-5 #tolerance in which to find an atom
self._vacancies = []
self._interstitials = []
def _noncopy_init(self):
self._atoms = []
self._structure_comment = ""
self._lattice_parameter = 1.0
self._h_matrix = np.zeros(shape=[3,3])
def _copy_init(self, obj):
self._structure_comment = obj.structure_comment
self._lattice_parameter = obj.lattice_parameter
self._h_matrix = np.array(obj.h_matrix)
self._atoms = copy.deepcopy(obj.atoms)
@property
def a0(self):
"""float: the lattice parameter of this structure"""
return self._lattice_parameter
@property
def a1(self):
"""float: the length of h1 lattice vector"""
a0 = self.lattice_parameter
h1 = self.h1
a1 = a0 * h1.dot(h1)**0.5
return a1
@property
def a2(self):
"""float: the length of the h2 lattice vector"""
a0 = self.lattice_parameter
h2 = self.h2
a2 = a0 * h2.dot(h2)**0.5
return a2
@property
def a3(self):
"""float: the length of the h3 lattice vector"""
a0 = self.lattice_parameter
h3 = self.h3
a3 = a0*h3.dot(h3)**0.5
return a3
@property
def lattice_parameter(self):
"""float: the length of the lattice parameter, usually in Angstroms."""
return self._lattice_parameter
@lattice_parameter.setter
def lattice_parameter(self, a):
if a > 0:
self._lattice_parameter = a
else:
raise ValueError
@property
def structure_comment(self):
"""str: a comment about the structure"""
return self._structure_comment
@structure_comment.setter
def structure_comment(self, s):
self._structure_comment = s
@property
def h_matrix(self):
"""numpy.array: this is a 3x3 numpy array"""
return self._h_matrix
@h_matrix.setter
def h_matrix(self,h):
self._h_matrix = np.array(h)
@property
def h1(self):
"""numpy.array: this is a 3x1 numpy array"""
return np.array(self._h_matrix[0,:])
@h1.setter
def h1(self, h1):
self._h_matrix[0,:] = np.array(h1)
@property
def h2(self):
"""numpy.array: this is a 3x1 numpy array"""
return np.array(self._h_matrix[1,:])
@h2.setter
def h2(self,h2):
self._h_matrix[1,:] = np.array(h2)
@property
def h3(self):
"""numpy.array: this is a 3x1 numpy array"""
return np.array(self._h_matrix[2,:])
@h3.setter
def h3(self,h3):
self._h_matrix[2,:] = np.array(h3)
@property
def b1(self):
"""numpy.array: this is a 3x1 numpy array, in reciprocal space"""
a1 = np.array(self.h1)
a2 = np.array(self.h2)
a3 = np.array(self.h3)
b1 = 2 * np.pi * np.cross(a2,a3) / np.dot(a1, np.cross(a2,a3))
return b1
@property
def b2(self):
"""numpy.array: this is a 3x1 numpy array, in reciprocal space"""
a1 = np.array(self.h1)
a2 = np.array(self.h2)
a3 = np.array(self.h3)
b2 = 2 * np.pi * np.cross(a3,a1) / np.dot(a2, np.cross(a3,a1))
return b2
@property
def b3(self):
"""numpy.array: this is a 3x1 numpy array, in reciprocal space"""
a1 = np.array(self.h1)
a2 = np.array(self.h2)
a3 = np.array(self.h3)
b3 = 2 * np.pi * np.cross(a1,a2) / np.dot(a3, np.cross(a1,a2))
return b3
@property
def n_atoms(self):
"""float: the number of atoms in the structure"""
n_atoms = len(self._atoms)
return n_atoms
@property
def symbols(self):
"""list of str: a list of the symbols in the structure"""
symbols = []
for a in self._atoms:
if not(a.symbol in symbols):
symbols.append(a.symbol)
return symbols
@property
def atoms(self):
"""list of pyflamestk.base.Atom: a list of the atoms in the structure"""
return self._atoms
@atoms.setter
def atoms(self, atoms):
# this does a deep copy of the atoms
self._atoms = copy.deepcopy(atoms)
@property
def vacancies(self):
"""list of pyflamestk.base.Atom: a list of vacancies in the structure"""
return self._vacancies
@property
def interstitials(self):
return self._interstitials
def check_if_atom_exists_at_position(self,symbol,position):
is_atom_exists = False # initialize return variable
# check to see if atom exists
for a in self._atoms:
diff = [abs(position[i]-a.position[i]) for i in range(3)]
if max(diff) < self._ptol:
print('new: ',symbol,position)
print('exist:',a.symbol,a.position)
is_atom_exists = True
return is_atom_exists # = True
return is_atom_exists # = False
def add_atom(self, symbol, position):
"""add an atom to the structure
Checks to see if an existing atom exists at the position we are trying
to add an atom, then if the position is empty. The atom is added then
added to the list of interstitials.
Args:
symbol (str): the symbol of the atom to be added
position (str): the position of the atom to be added
Raises:
ValueError: If an atom already exists in the position.
"""
is_atom_exists = self.check_if_atom_exists_at_position(symbol,position)
if is_atom_exists is True:
err_msg = "Tried to add {} @ {} an atom already there"
err_msg = err_msg.format(symbol,position)
raise ValueError(err_msg)
else:
self._atoms.append(Atom(symbol,position))
def remove_atom(self,symbol, position):
""" remove an atom from the structure
This method checks for atom at the position, if an atom exists. It is
removed from the structure then the position is recorded as a vacancy.
Args:
symbol (str): the symbol of the atom
position (list of float): the position of the atom
"""
for i,a in enumerate(self._atoms):
if (a.symbol == symbol):
diff = [abs(position[j]-a.position[j]) for j in range(3)]
print(diff)
is_atom = True
for j in range(3):
if diff[j] >= self._ptol:
is_atom = False
if is_atom:
self._atoms.remove(self._atoms[i])
return
# no atom found
err_msg = "Tried to remove {} @ {}, no atom found"
err_msg = err_msg.format(symbol,position)
raise ValueError(err_msg)
def add_interstitial(self,symbol,position):
self.add_atom(symbol,position)
self._interstitiials.append([symbol,position])
def add_vacancy(self,symbol,position):
self.remove_atom(symbol,position)
self._vacancy.append([symbol,position])
def get_number_of_atoms(self, symbol=None):
if symbol is None:
n_atoms = len(self._atoms)
else:
n_atoms = 0
for atom in self._atoms:
if (atom.symbol == symbol):
n_atoms += 1
return n_atoms
def normalize_h_matrix(self):
# change the h_matrix where the lattice parameter is 1.0
for i in range(3):
self._h_matrix[i,:] = self._h_matrix[i,:] * self._lattice_parameter
self._lattice_parameter = 1.0
# calculate the norm of the h1 vector
norm_h1 = np.sqrt(self._h_matrix[i,:].dot(self._h_matrix[i,:]))
# the norm of the h1 vector is the lattice parameter
self._lattice_parameter = norm_h1
# normalize the h-matrix by the norm of h1
for i in range(3):
self._h_matrix[i,:] = self._h_matrix[i,:] / norm_h1
def set_lattice_parameter(self, a1):
# change the h_matrix where the lattice parameter is 1.0
for i in range(3):
self._h_matrix[i,:] = self._h_matrix[i,:] * self._lattice_parameter
self._lattice_parameter = a1
for i in range(3):
self._h_matrix[i,:] = self._h_matrix[i,:] / self._lattice_parameter
def __str__(self):
str_out = "a = {}\n".format(self._lattice_parameter)
str_out += "atom list:\n"
for a in self._atoms:
str_t = "{} {:10.6f} {:10.6f} {:10.6f} {:10.6f}"
str_out += str_t.format(a.symbol,
a.position[0],
a.position[1],
a.position[2],
a.magnetic_moment)
def make_super_cell(structure, sc):
"""makes a supercell from a given cell
Args:
structure (pyflamestk.base.Structure): the base structure from which
the supercell will be made from.
sc (list of int): the number of repeat units in the h1, h2, and h3
directions
"""
supercell = Structure()
supercell.structure_comment = "{}x{}x{}".format(sc[0],sc[1],sc[2])
# set lattice parameter
supercell.lattice_parameter = structure.lattice_parameter
# set h_matrix
h = np.zeros(shape=[3,3])
for i in range(3):
h[i,:] = structure.h_matrix[i,:] * sc[i]
supercell.h_matrix = h
# add supercell atoms
for i in range(sc[0]):
for j in range(sc[1]):
for k in range(sc[2]):
for atom in structure.atoms:
symbol = atom.symbol
position = atom.position
position = [(i+position[0])/sc[0],\
(j+position[1])/sc[1],\
(k+position[2])/sc[2]]
supercell.add_atom(symbol,position)
# return a copy of the supercell
return copy.deepcopy(supercell)
class Simulation(object):
""" an abstract class for simulations
not currently using this yet
"""
def __init__(self):
raise NotImplementedError
def run(self):
raise NotImplementedError
def tail(fname, n_lines, offset=None):
""" replicates the tail command from unix like operations systems
Args:
fname (str): filename
n_lines (int): the number of lines
Note:
this is dependent upon the tail command in the unx operating system
this should actually be rewritten as to be OS agnostic.
"""
cmd_str = "/usr/bin/tail -n {} {}".format(str(n_lines), fname)
p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
lines = stdout.decode('ascii').splitlines()
return lines
| [
"numpy.cross",
"subprocess.Popen",
"numpy.array",
"numpy.zeros",
"copy.deepcopy",
"numpy.aray"
] | [((11638, 11660), 'numpy.zeros', 'np.zeros', ([], {'shape': '[3, 3]'}), '(shape=[3, 3])\n', (11646, 11660), True, 'import numpy as np\n'), ((12282, 12306), 'copy.deepcopy', 'copy.deepcopy', (['supercell'], {}), '(supercell)\n', (12295, 12306), False, 'import copy, subprocess\n'), ((12961, 13051), 'subprocess.Popen', 'subprocess.Popen', (['cmd_str'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (12977, 13051), False, 'import copy, subprocess\n'), ((1047, 1071), 'numpy.array', 'np.array', (['self._position'], {}), '(self._position)\n', (1055, 1071), True, 'import numpy as np\n'), ((1152, 1162), 'numpy.aray', 'np.aray', (['p'], {}), '(p)\n', (1159, 1162), True, 'import numpy as np\n'), ((2425, 2447), 'numpy.zeros', 'np.zeros', ([], {'shape': '[3, 3]'}), '(shape=[3, 3])\n', (2433, 2447), True, 'import numpy as np\n'), ((2624, 2646), 'numpy.array', 'np.array', (['obj.h_matrix'], {}), '(obj.h_matrix)\n', (2632, 2646), True, 'import numpy as np\n'), ((2678, 2702), 'copy.deepcopy', 'copy.deepcopy', (['obj.atoms'], {}), '(obj.atoms)\n', (2691, 2702), False, 'import copy, subprocess\n'), ((4211, 4222), 'numpy.array', 'np.array', (['h'], {}), '(h)\n', (4219, 4222), True, 'import numpy as np\n'), ((4324, 4354), 'numpy.array', 'np.array', (['self._h_matrix[0, :]'], {}), '(self._h_matrix[0, :])\n', (4332, 4354), True, 'import numpy as np\n'), ((4422, 4434), 'numpy.array', 'np.array', (['h1'], {}), '(h1)\n', (4430, 4434), True, 'import numpy as np\n'), ((4536, 4566), 'numpy.array', 'np.array', (['self._h_matrix[1, :]'], {}), '(self._h_matrix[1, :])\n', (4544, 4566), True, 'import numpy as np\n'), ((4633, 4645), 'numpy.array', 'np.array', (['h2'], {}), '(h2)\n', (4641, 4645), True, 'import numpy as np\n'), ((4748, 4778), 'numpy.array', 'np.array', (['self._h_matrix[2, :]'], {}), '(self._h_matrix[2, :])\n', (4756, 4778), True, 'import numpy as np\n'), ((4845, 4857), 'numpy.array', 'np.array', (['h3'], {}), '(h3)\n', (4853, 4857), True, 'import numpy as np\n'), ((4978, 4995), 'numpy.array', 'np.array', (['self.h1'], {}), '(self.h1)\n', (4986, 4995), True, 'import numpy as np\n'), ((5009, 5026), 'numpy.array', 'np.array', (['self.h2'], {}), '(self.h2)\n', (5017, 5026), True, 'import numpy as np\n'), ((5040, 5057), 'numpy.array', 'np.array', (['self.h3'], {}), '(self.h3)\n', (5048, 5057), True, 'import numpy as np\n'), ((5268, 5285), 'numpy.array', 'np.array', (['self.h1'], {}), '(self.h1)\n', (5276, 5285), True, 'import numpy as np\n'), ((5299, 5316), 'numpy.array', 'np.array', (['self.h2'], {}), '(self.h2)\n', (5307, 5316), True, 'import numpy as np\n'), ((5330, 5347), 'numpy.array', 'np.array', (['self.h3'], {}), '(self.h3)\n', (5338, 5347), True, 'import numpy as np\n'), ((5559, 5576), 'numpy.array', 'np.array', (['self.h1'], {}), '(self.h1)\n', (5567, 5576), True, 'import numpy as np\n'), ((5590, 5607), 'numpy.array', 'np.array', (['self.h2'], {}), '(self.h2)\n', (5598, 5607), True, 'import numpy as np\n'), ((5621, 5638), 'numpy.array', 'np.array', (['self.h3'], {}), '(self.h3)\n', (5629, 5638), True, 'import numpy as np\n'), ((6419, 6439), 'copy.deepcopy', 'copy.deepcopy', (['atoms'], {}), '(atoms)\n', (6432, 6439), False, 'import copy, subprocess\n'), ((5084, 5100), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (5092, 5100), True, 'import numpy as np\n'), ((5113, 5129), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (5121, 5129), True, 'import numpy as np\n'), ((5375, 5391), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (5383, 5391), True, 'import numpy as np\n'), ((5404, 5420), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (5412, 5420), True, 'import numpy as np\n'), ((5665, 5681), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (5673, 5681), True, 'import numpy as np\n'), ((5694, 5710), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (5702, 5710), True, 'import numpy as np\n')] |
import time
from garage.misc import logger
from garage.misc import ext
from garage.misc.overrides import overrides
from garage.tf.algos import BatchPolopt
from garage.tf.optimizers.cg_optimizer import CGOptimizer
from garage.tf.misc import tensor_utils
from garage.core.serializable import Serializable
import tensorflow as tf
import numpy as np
import copy
class CATRPO(BatchPolopt, Serializable):
"""
Curvature-aided Trust Region Policy Gradient.
"""
def __init__(
self,
env,
policy,
backup_policy,
mix_policy,
pos_eps_policy,
neg_eps_policy,
baseline,
minibatch_size=500,
n_sub_itr=10,
optimizer=None,
optimizer_args=None,
delta=0.01,
**kwargs):
Serializable.quick_init(self, locals())
self.optimizer = optimizer
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
self.optimizer = CGOptimizer(**optimizer_args)
self.opt_info = None
self.backup_policy = backup_policy
self.mix_policy = mix_policy
self.pos_eps_policy = pos_eps_policy
self.neg_eps_policy = neg_eps_policy
self.minibatch_size = minibatch_size
self.n_sub_itr = n_sub_itr
self.delta = delta
super(CATRPO, self).__init__(
env=env, policy=policy, baseline=baseline, **kwargs)
def generate_mix_policy(self):
a = np.random.uniform(0.0, 1.0)
mix = a * self.policy.get_param_values() + (1 - a) * self.backup_policy.get_param_values()
self.mix_policy.set_param_values(mix, trainable=True)
def sample_paths(self, traj_num, sample_policy):
paths = []
# Sample Trajectories
for _ in range(traj_num):
observations = []
actions = []
rewards = []
observation = self.env.reset()
for _ in range(self.max_path_length):
# policy.get_action() returns a pair of values. The second
# one returns a dictionary, whose values contains
# sufficient statistics for the action distribution. It
# should at least contain entries that would be returned
# by calling policy.dist_info(), which is the non-symbolic
# analog of policy.dist_info_sym(). Storing these
# statistics is useful, e.g., when forming importance
# sampling ratios. In our case it is not needed.
action, _ = sample_policy.get_action(observation)
# Recall that the last entry of the tuple stores diagnostic
# information about the environment. In our case it is not needed.
next_observation, reward, terminal, _ = self.env.step(action)
observations.append(observation)
actions.append(action)
rewards.append(reward)
observation = next_observation
if terminal:
# Finish rollout if terminal state reached
break
# We need to compute the empirical return for each time step along the
# trajectory
path = dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
)
path_baseline = self.baseline.predict(path)
advantages = []
returns = []
return_so_far = 0
for t in range(len(rewards) - 1, -1, -1):
return_so_far = rewards[t] + self.discount * return_so_far
returns.append(return_so_far)
advantage = return_so_far - path_baseline[t]
advantages.append(advantage)
# The advantages are stored backwards in time, so we need to revert it
advantages = np.array(advantages[::-1])
# And we need to do the same thing for the list of returns
returns = np.array(returns[::-1])
advantages = (advantages - np.mean(advantages)) / (
np.std(advantages) + 1e-8)
path["advantages"] = advantages
path["returns"] = returns
paths.append(path)
return paths
@staticmethod
def grad_norm(s_g):
res = s_g[0].flatten()
for i in range(1,len(s_g)):
res = np.concatenate((res, s_g[i].flatten()))
l2_norm = np.linalg.norm(res)
return l2_norm
@staticmethod
def normalize_gradient(s_g):
res = s_g[0].flatten()
for i in range(1, len(s_g)):
res = np.concatenate((res, s_g[i].flatten()))
l2_norm = np.linalg.norm(res)
return [x/l2_norm for x in s_g]
@staticmethod
def flatten_parameters(params):
return np.concatenate([p.flatten() for p in params])
@overrides
def init_opt(self):
observations_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1,
)
actions_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1,
)
advantages_var = tensor_utils.new_tensor(
name='advantage',
ndim=1,
dtype=tf.float32,
)
dist = self.policy.distribution
old_dist_info_vars = self.backup_policy.dist_info_sym(observations_var)
dist_info_vars = self.policy.dist_info_sym(observations_var)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
mean_kl = tf.reduce_mean(kl)
max_kl = tf.reduce_max(kl)
pos_eps_dist_info_vars = self.pos_eps_policy.dist_info_sym(observations_var)
neg_eps_dist_info_vars = self.neg_eps_policy.dist_info_sym(observations_var)
mix_dist_info_vars = self.mix_policy.dist_info_sym(observations_var)
# formulate as a minimization problem
# The gradient of the surrogate objective is the policy gradient
surr = -tf.reduce_mean(dist.log_likelihood_sym(actions_var, dist_info_vars) * advantages_var)
surr_pos_eps = -tf.reduce_mean(dist.log_likelihood_sym(actions_var, pos_eps_dist_info_vars) * advantages_var)
surr_neg_eps = -tf.reduce_mean(dist.log_likelihood_sym(actions_var, neg_eps_dist_info_vars) * advantages_var)
surr_mix = -tf.reduce_mean(dist.log_likelihood_sym(actions_var, mix_dist_info_vars) * advantages_var)
surr_loglikelihood = tf.reduce_sum(dist.log_likelihood_sym(actions_var, mix_dist_info_vars))
params = self.policy.get_params(trainable=True)
mix_params = self.mix_policy.get_params(trainable=True)
pos_eps_params = self.pos_eps_policy.get_params(trainable=True)
neg_eps_params = self.neg_eps_policy.get_params(trainable=True)
grads = tf.gradients(surr, params)
grad_pos_eps = tf.gradients(surr_pos_eps, pos_eps_params)
grad_neg_eps = tf.gradients(surr_neg_eps, neg_eps_params)
grad_mix = tf.gradients(surr_mix, mix_params)
grad_mix_lh = tf.gradients(surr_loglikelihood, mix_params)
inputs_list = [observations_var, actions_var, advantages_var]
self.optimizer.update_opt(loss=surr, target=self.policy,
leq_constraint=(mean_kl, self.delta),
inputs=inputs_list)
self._opt_fun = ext.LazyDict(
f_loss=lambda: tensor_utils.compile_function(
inputs=inputs_list,
outputs=surr,
log_name="f_loss",
),
f_train=lambda: tensor_utils.compile_function(
inputs=inputs_list,
outputs=grads,
log_name="f_grad"
),
f_mix_grad=lambda: tensor_utils.compile_function(
inputs=inputs_list,
outputs=grad_mix,
log_name="f_mix_grad"
),
f_pos_grad=lambda: tensor_utils.compile_function(
inputs=inputs_list,
outputs=grad_pos_eps
),
f_neg_grad=lambda: tensor_utils.compile_function(
inputs=inputs_list,
outputs=grad_neg_eps
),
f_mix_lh=lambda: tensor_utils.compile_function(
inputs=inputs_list,
outputs=grad_mix_lh
),
f_kl=lambda: tensor_utils.compile_function(
inputs=inputs_list,
outputs=[mean_kl, max_kl],
)
)
@overrides
def train(self):
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
self.start_worker(sess)
start_time = time.time()
self.num_samples = 0
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix('itr #%d | ' % itr):
logger.log("Obtaining new samples...")
paths = self.obtain_samples(itr)
for path in paths:
self.num_samples += len(path["rewards"])
logger.log("total num samples..." + str(self.num_samples))
logger.log("Processing samples...")
samples_data = self.process_samples(itr, paths)
logger.log("Logging diagnostics...")
self.log_diagnostics(paths)
logger.log("Optimizing policy...")
self.outer_optimize(samples_data)
for sub_itr in range(self.n_sub_itr):
logger.log("Minibatch Optimizing...")
self.inner_optimize(samples_data)
logger.log("Saving snapshot...")
params = self.get_itr_snapshot(
itr, samples_data) # , **kwargs)
if self.store_paths:
params["paths"] = samples_data["paths"]
logger.save_itr_params(itr, params)
logger.log("Saved")
logger.record_tabular('Time', time.time() - start_time)
logger.record_tabular(
'ItrTime', time.time() - itr_start_time)
logger.dump_tabular(with_prefix=False)
#if self.plot:
# self.update_plot()
# if self.pause_for_plot:
# input("Plotting evaluation run: Press Enter to "
# "continue...")
self.shutdown_worker()
def outer_optimize(self, samples_data):
logger.log("optimizing policy")
observations = ext.extract(samples_data, "observations")
actions = ext.extract(samples_data, "actions")
advantages = ext.extract(samples_data, "advantages")
num_traj = len(samples_data["paths"])
observations = observations[0].reshape(-1, self.env.spec.observation_space.shape[0])
actions = actions[0].reshape(-1,self.env.spec.action_space.shape[0])
advantages = advantages[0].reshape(-1)
inputs = tuple([observations, actions, advantages])
s_g = self._opt_fun["f_train"](*(list(inputs)))
#s_g = [x / num_traj for x in s_g]
self.gradient_backup = copy.deepcopy(s_g)
g_flat = self.flatten_parameters(s_g)
loss_before = self._opt_fun["f_loss"](*(list(inputs)))
self.backup_policy.set_param_values(self.policy.get_param_values(trainable=True), trainable=True)
self.optimizer.optimize(inputs, g_flat)
loss_after = self._opt_fun["f_loss"](*(list(inputs)))
logger.record_tabular("LossBefore", loss_before)
logger.record_tabular("LossAfter", loss_after)
mean_kl, max_kl = self._opt_fun['f_kl'](*(list(inputs)))
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('MaxKL', max_kl)
def inner_optimize(self, outer_sample):
observations = ext.extract(outer_sample, "observations")
actions = ext.extract(outer_sample, "actions")
advantages = ext.extract(outer_sample, "advantages")
outer_observations = observations[0].reshape(-1, self.env.spec.observation_space.shape[0])
outer_actions = actions[0].reshape(-1,self.env.spec.action_space.shape[0])
outer_advantages = advantages[0].reshape(-1)
n_sub = 0
sub_paths_all = []
self.generate_mix_policy()
sub_paths = self.sample_paths(1, self.mix_policy)
sub_paths_all.append(sub_paths[0])
n_sub += len(sub_paths[0]["rewards"])
self.num_samples += len(sub_paths[0]["rewards"])
sub_observations = [p["observations"] for p in sub_paths]
sub_actions = [p["actions"] for p in sub_paths]
sub_advantages = [p["advantages"] for p in sub_paths]
eps = 1e-6
d_vector = self.policy.get_param_values() - self.backup_policy.get_param_values()
pos_params = self.mix_policy.get_param_values() + d_vector * eps
neg_params = self.mix_policy.get_param_values() - d_vector * eps
self.pos_eps_policy.set_param_values(pos_params, trainable=True)
self.neg_eps_policy.set_param_values(neg_params, trainable=True)
# first component: dot(likelihood, theta_t - theta_t-1) * policy gradient
g_mix = self._opt_fun["f_mix_grad"](sub_observations[0], sub_actions[0], sub_advantages[0])
g_lh = self._opt_fun["f_mix_lh"](sub_observations[0], sub_actions[0])
g_lh = self.flatten_parameters(g_lh)
inner_product = np.dot(g_lh, d_vector)
fst = [inner_product * g for g in g_mix]
# second component: dot(Hessian, theta_t - theta_t-1)
g_pos = self._opt_fun["f_pos_grad"](sub_observations[0], sub_actions[0], sub_advantages[0])
g_neg = self._opt_fun["f_neg_grad"](sub_observations[0], sub_actions[0], sub_advantages[0])
hv = [(pos - neg) / (2 * eps) for pos, neg in zip(g_pos, g_neg)]
while (n_sub < self.minibatch_size):
self.generate_mix_policy()
sub_paths = self.sample_paths(1, self.mix_policy)
n_sub += len(sub_paths[0]["rewards"])
self.num_samples += len(sub_paths[0]["rewards"])
sub_paths_all.append(sub_paths[0])
sub_observations = [p["observations"] for p in sub_paths]
sub_actions = [p["actions"] for p in sub_paths]
sub_advantages = [p["advantages"] for p in sub_paths]
pos_params = self.mix_policy.get_param_values() + d_vector * eps
neg_params = self.mix_policy.get_param_values() - d_vector * eps
self.pos_eps_policy.set_param_values(pos_params, trainable=True)
self.neg_eps_policy.set_param_values(neg_params, trainable=True)
# first component: dot(likelihood, theta_t - theta_t-1) * policy gradient
g_mix = self._opt_fun["f_mix_grad"](sub_observations[0], sub_actions[0], sub_advantages[0])
g_lh = self._opt_fun["f_mix_lh"](sub_observations[0], sub_actions[0])
g_lh = self.flatten_parameters(g_lh)
inner_product = np.dot(g_lh, d_vector)
fst_i = [inner_product * g for g in g_mix]
fst = [sum(x) for x in zip(fst, fst_i)]
# second component: dot(Hessian, theta_t - theta_t-1)
g_pos = self._opt_fun["f_pos_grad"](sub_observations[0], sub_actions[0], sub_advantages[0])
g_neg = self._opt_fun["f_neg_grad"](sub_observations[0], sub_actions[0], sub_advantages[0])
hv_i = [(pos - neg) / (2 * eps) for pos, neg in zip(g_pos, g_neg)]
hv = [sum(x) for x in zip(hv, hv_i)]
fst = [x / len(sub_paths_all) for x in fst]
hv = [x / len(sub_paths_all) for x in hv]
fst = [x/10 for x in fst]
# gradient as sum
fst_norm = self.grad_norm(fst)
hv_norm = self.grad_norm(hv)
backup_gradient_norm = self.grad_norm(self.gradient_backup)
#self.writer.add_scalar("first_component_norm", fst_norm, j)
#self.writer.add_scalar("hv_norm", hv_norm, j)
#self.writer.add_scalar("back_gradient_norm", backup_gradient_norm, j)
g_d = [sum(x) for x in zip(fst, hv, self.gradient_backup)]
self.gradient_backup = copy.deepcopy(g_d)
avg_returns = np.mean([sum(p["rewards"]) for p in sub_paths_all])
#self.writer.add_scalar("AverageReturn", avg_returns, j)
#self.writer.add_scalar("Gradient norm", self.grad_norm(g_d), j)
print("timesteps: " + str(self.num_samples) + " average return: " + str(avg_returns))
sub_observations = np.concatenate([p["observations"] for p in sub_paths_all])
sub_actions = np.concatenate([p["actions"] for p in sub_paths_all])
sub_advantages = np.concatenate([p["advantages"] for p in sub_paths_all])
sub_observations = sub_observations.reshape(-1, self.env.spec.observation_space.shape[0])
sub_actions = sub_actions.reshape(-1, self.env.spec.action_space.shape[0])
sub_advantages = sub_advantages.reshape(-1)
#sub_observations = np.concatenate((sub_observations, outer_observations))
#sub_actions = np.concatenate((sub_actions, outer_actions))
#sub_advantages = np.concatenate((sub_advantages, outer_advantages))
print(sub_observations.shape)
inputs = tuple([sub_observations, sub_actions, sub_advantages])
self.backup_policy.set_param_values(self.policy.get_param_values(trainable=True), trainable=True)
flat_g_d = self.flatten_parameters(g_d)
self.optimizer.optimize(inputs, flat_g_d)
# Compute KL divergence after updated
#sub_observations = [p["observations"] for p in sub_paths]
#mean_kl, max_kl = self.f_kl(sub_observations[0])
#self.writer.add_scalar("MeanKL", mean_kl, j)
#self.writer.add_scalar("MaxKL", max_kl, j)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
)
| [
"garage.misc.logger.dump_tabular",
"tensorflow.gradients",
"garage.tf.misc.tensor_utils.new_tensor",
"numpy.array",
"garage.tf.optimizers.cg_optimizer.CGOptimizer",
"copy.deepcopy",
"numpy.linalg.norm",
"tensorflow.reduce_mean",
"garage.misc.logger.record_tabular",
"numpy.mean",
"tensorflow.Sess... | [((1545, 1572), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1562, 1572), True, 'import numpy as np\n'), ((4591, 4610), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {}), '(res)\n', (4605, 4610), True, 'import numpy as np\n'), ((4829, 4848), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {}), '(res)\n', (4843, 4848), True, 'import numpy as np\n'), ((5322, 5389), 'garage.tf.misc.tensor_utils.new_tensor', 'tensor_utils.new_tensor', ([], {'name': '"""advantage"""', 'ndim': '(1)', 'dtype': 'tf.float32'}), "(name='advantage', ndim=1, dtype=tf.float32)\n", (5345, 5389), False, 'from garage.tf.misc import tensor_utils\n'), ((5707, 5725), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl'], {}), '(kl)\n', (5721, 5725), True, 'import tensorflow as tf\n'), ((5743, 5760), 'tensorflow.reduce_max', 'tf.reduce_max', (['kl'], {}), '(kl)\n', (5756, 5760), True, 'import tensorflow as tf\n'), ((6960, 6986), 'tensorflow.gradients', 'tf.gradients', (['surr', 'params'], {}), '(surr, params)\n', (6972, 6986), True, 'import tensorflow as tf\n'), ((7010, 7052), 'tensorflow.gradients', 'tf.gradients', (['surr_pos_eps', 'pos_eps_params'], {}), '(surr_pos_eps, pos_eps_params)\n', (7022, 7052), True, 'import tensorflow as tf\n'), ((7076, 7118), 'tensorflow.gradients', 'tf.gradients', (['surr_neg_eps', 'neg_eps_params'], {}), '(surr_neg_eps, neg_eps_params)\n', (7088, 7118), True, 'import tensorflow as tf\n'), ((7138, 7172), 'tensorflow.gradients', 'tf.gradients', (['surr_mix', 'mix_params'], {}), '(surr_mix, mix_params)\n', (7150, 7172), True, 'import tensorflow as tf\n'), ((7195, 7239), 'tensorflow.gradients', 'tf.gradients', (['surr_loglikelihood', 'mix_params'], {}), '(surr_loglikelihood, mix_params)\n', (7207, 7239), True, 'import tensorflow as tf\n'), ((10830, 10861), 'garage.misc.logger.log', 'logger.log', (['"""optimizing policy"""'], {}), "('optimizing policy')\n", (10840, 10861), False, 'from garage.misc import logger\n'), ((10885, 10926), 'garage.misc.ext.extract', 'ext.extract', (['samples_data', '"""observations"""'], {}), "(samples_data, 'observations')\n", (10896, 10926), False, 'from garage.misc import ext\n'), ((10945, 10981), 'garage.misc.ext.extract', 'ext.extract', (['samples_data', '"""actions"""'], {}), "(samples_data, 'actions')\n", (10956, 10981), False, 'from garage.misc import ext\n'), ((11003, 11042), 'garage.misc.ext.extract', 'ext.extract', (['samples_data', '"""advantages"""'], {}), "(samples_data, 'advantages')\n", (11014, 11042), False, 'from garage.misc import ext\n'), ((11499, 11517), 'copy.deepcopy', 'copy.deepcopy', (['s_g'], {}), '(s_g)\n', (11512, 11517), False, 'import copy\n'), ((11852, 11900), 'garage.misc.logger.record_tabular', 'logger.record_tabular', (['"""LossBefore"""', 'loss_before'], {}), "('LossBefore', loss_before)\n", (11873, 11900), False, 'from garage.misc import logger\n'), ((11909, 11955), 'garage.misc.logger.record_tabular', 'logger.record_tabular', (['"""LossAfter"""', 'loss_after'], {}), "('LossAfter', loss_after)\n", (11930, 11955), False, 'from garage.misc import logger\n'), ((12030, 12070), 'garage.misc.logger.record_tabular', 'logger.record_tabular', (['"""MeanKL"""', 'mean_kl'], {}), "('MeanKL', mean_kl)\n", (12051, 12070), False, 'from garage.misc import logger\n'), ((12079, 12117), 'garage.misc.logger.record_tabular', 'logger.record_tabular', (['"""MaxKL"""', 'max_kl'], {}), "('MaxKL', max_kl)\n", (12100, 12117), False, 'from garage.misc import logger\n'), ((12187, 12228), 'garage.misc.ext.extract', 'ext.extract', (['outer_sample', '"""observations"""'], {}), "(outer_sample, 'observations')\n", (12198, 12228), False, 'from garage.misc import ext\n'), ((12247, 12283), 'garage.misc.ext.extract', 'ext.extract', (['outer_sample', '"""actions"""'], {}), "(outer_sample, 'actions')\n", (12258, 12283), False, 'from garage.misc import ext\n'), ((12305, 12344), 'garage.misc.ext.extract', 'ext.extract', (['outer_sample', '"""advantages"""'], {}), "(outer_sample, 'advantages')\n", (12316, 12344), False, 'from garage.misc import ext\n'), ((13782, 13804), 'numpy.dot', 'np.dot', (['g_lh', 'd_vector'], {}), '(g_lh, d_vector)\n', (13788, 13804), True, 'import numpy as np\n'), ((16492, 16510), 'copy.deepcopy', 'copy.deepcopy', (['g_d'], {}), '(g_d)\n', (16505, 16510), False, 'import copy\n'), ((16845, 16903), 'numpy.concatenate', 'np.concatenate', (["[p['observations'] for p in sub_paths_all]"], {}), "([p['observations'] for p in sub_paths_all])\n", (16859, 16903), True, 'import numpy as np\n'), ((16926, 16979), 'numpy.concatenate', 'np.concatenate', (["[p['actions'] for p in sub_paths_all]"], {}), "([p['actions'] for p in sub_paths_all])\n", (16940, 16979), True, 'import numpy as np\n'), ((17005, 17061), 'numpy.concatenate', 'np.concatenate', (["[p['advantages'] for p in sub_paths_all]"], {}), "([p['advantages'] for p in sub_paths_all])\n", (17019, 17061), True, 'import numpy as np\n'), ((1057, 1086), 'garage.tf.optimizers.cg_optimizer.CGOptimizer', 'CGOptimizer', ([], {}), '(**optimizer_args)\n', (1068, 1086), False, 'from garage.tf.optimizers.cg_optimizer import CGOptimizer\n'), ((4018, 4044), 'numpy.array', 'np.array', (['advantages[::-1]'], {}), '(advantages[::-1])\n', (4026, 4044), True, 'import numpy as np\n'), ((4138, 4161), 'numpy.array', 'np.array', (['returns[::-1]'], {}), '(returns[::-1])\n', (4146, 4161), True, 'import numpy as np\n'), ((8733, 8745), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8743, 8745), True, 'import tensorflow as tf\n'), ((8868, 8879), 'time.time', 'time.time', ([], {}), '()\n', (8877, 8879), False, 'import time\n'), ((15351, 15373), 'numpy.dot', 'np.dot', (['g_lh', 'd_vector'], {}), '(g_lh, d_vector)\n', (15357, 15373), True, 'import numpy as np\n'), ((8776, 8805), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (8803, 8805), True, 'import tensorflow as tf\n'), ((9004, 9015), 'time.time', 'time.time', ([], {}), '()\n', (9013, 9015), False, 'import time\n'), ((3366, 3388), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (3374, 3388), True, 'import numpy as np\n'), ((3414, 3431), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (3422, 3431), True, 'import numpy as np\n'), ((3457, 3474), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (3465, 3474), True, 'import numpy as np\n'), ((4202, 4221), 'numpy.mean', 'np.mean', (['advantages'], {}), '(advantages)\n', (4209, 4221), True, 'import numpy as np\n'), ((4243, 4261), 'numpy.std', 'np.std', (['advantages'], {}), '(advantages)\n', (4249, 4261), True, 'import numpy as np\n'), ((7569, 7656), 'garage.tf.misc.tensor_utils.compile_function', 'tensor_utils.compile_function', ([], {'inputs': 'inputs_list', 'outputs': 'surr', 'log_name': '"""f_loss"""'}), "(inputs=inputs_list, outputs=surr, log_name=\n 'f_loss')\n", (7598, 7656), False, 'from garage.tf.misc import tensor_utils\n'), ((7744, 7832), 'garage.tf.misc.tensor_utils.compile_function', 'tensor_utils.compile_function', ([], {'inputs': 'inputs_list', 'outputs': 'grads', 'log_name': '"""f_grad"""'}), "(inputs=inputs_list, outputs=grads, log_name=\n 'f_grad')\n", (7773, 7832), False, 'from garage.tf.misc import tensor_utils\n'), ((7922, 8016), 'garage.tf.misc.tensor_utils.compile_function', 'tensor_utils.compile_function', ([], {'inputs': 'inputs_list', 'outputs': 'grad_mix', 'log_name': '"""f_mix_grad"""'}), "(inputs=inputs_list, outputs=grad_mix,\n log_name='f_mix_grad')\n", (7951, 8016), False, 'from garage.tf.misc import tensor_utils\n'), ((8107, 8178), 'garage.tf.misc.tensor_utils.compile_function', 'tensor_utils.compile_function', ([], {'inputs': 'inputs_list', 'outputs': 'grad_pos_eps'}), '(inputs=inputs_list, outputs=grad_pos_eps)\n', (8136, 8178), False, 'from garage.tf.misc import tensor_utils\n'), ((8257, 8328), 'garage.tf.misc.tensor_utils.compile_function', 'tensor_utils.compile_function', ([], {'inputs': 'inputs_list', 'outputs': 'grad_neg_eps'}), '(inputs=inputs_list, outputs=grad_neg_eps)\n', (8286, 8328), False, 'from garage.tf.misc import tensor_utils\n'), ((8405, 8475), 'garage.tf.misc.tensor_utils.compile_function', 'tensor_utils.compile_function', ([], {'inputs': 'inputs_list', 'outputs': 'grad_mix_lh'}), '(inputs=inputs_list, outputs=grad_mix_lh)\n', (8434, 8475), False, 'from garage.tf.misc import tensor_utils\n'), ((8548, 8624), 'garage.tf.misc.tensor_utils.compile_function', 'tensor_utils.compile_function', ([], {'inputs': 'inputs_list', 'outputs': '[mean_kl, max_kl]'}), '(inputs=inputs_list, outputs=[mean_kl, max_kl])\n', (8577, 8624), False, 'from garage.tf.misc import tensor_utils\n'), ((9037, 9070), 'garage.misc.logger.prefix', 'logger.prefix', (["('itr #%d | ' % itr)"], {}), "('itr #%d | ' % itr)\n", (9050, 9070), False, 'from garage.misc import logger\n'), ((9092, 9130), 'garage.misc.logger.log', 'logger.log', (['"""Obtaining new samples..."""'], {}), "('Obtaining new samples...')\n", (9102, 9130), False, 'from garage.misc import logger\n'), ((9387, 9422), 'garage.misc.logger.log', 'logger.log', (['"""Processing samples..."""'], {}), "('Processing samples...')\n", (9397, 9422), False, 'from garage.misc import logger\n'), ((9511, 9547), 'garage.misc.logger.log', 'logger.log', (['"""Logging diagnostics..."""'], {}), "('Logging diagnostics...')\n", (9521, 9547), False, 'from garage.misc import logger\n'), ((9616, 9650), 'garage.misc.logger.log', 'logger.log', (['"""Optimizing policy..."""'], {}), "('Optimizing policy...')\n", (9626, 9650), False, 'from garage.misc import logger\n'), ((9903, 9935), 'garage.misc.logger.log', 'logger.log', (['"""Saving snapshot..."""'], {}), "('Saving snapshot...')\n", (9913, 9935), False, 'from garage.misc import logger\n'), ((10171, 10206), 'garage.misc.logger.save_itr_params', 'logger.save_itr_params', (['itr', 'params'], {}), '(itr, params)\n', (10193, 10206), False, 'from garage.misc import logger\n'), ((10227, 10246), 'garage.misc.logger.log', 'logger.log', (['"""Saved"""'], {}), "('Saved')\n", (10237, 10246), False, 'from garage.misc import logger\n'), ((10451, 10489), 'garage.misc.logger.dump_tabular', 'logger.dump_tabular', ([], {'with_prefix': '(False)'}), '(with_prefix=False)\n', (10470, 10489), False, 'from garage.misc import logger\n'), ((9787, 9824), 'garage.misc.logger.log', 'logger.log', (['"""Minibatch Optimizing..."""'], {}), "('Minibatch Optimizing...')\n", (9797, 9824), False, 'from garage.misc import logger\n'), ((10297, 10308), 'time.time', 'time.time', ([], {}), '()\n', (10306, 10308), False, 'import time\n'), ((10401, 10412), 'time.time', 'time.time', ([], {}), '()\n', (10410, 10412), False, 'import time\n')] |
import numpy as np
import csv
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import os
###### Read in the data
raw=[]
with open('../data/spambase.data') as cf:
readcsv = csv.reader(cf, delimiter=',')
for row in readcsv:
raw.append(row)
data = np.array(raw).astype(np.float)
x = data[:, :-1]
y = data[:, -1]
# plot the tree##
ctree = tree.DecisionTreeClassifier().fit(x, y)
plt.figure()
tree.plot_tree(ctree, max_depth=3, filled=True)
# save outputs
cwd = os.getcwd()
output_path = os.path.join(cwd,'output')
if not os.path.exists(output_path):
os.makedirs(output_path)
# saves image to output folder
fname = "ctree.pdf"
pout = os.path.join(output_path,fname)
plt.savefig(pout)
plt.show()
score_forest = []
# training both tree and forest with different number of trees
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.2)
for ntree in range(1,60):
cforest = RandomForestClassifier(n_estimators=ntree, max_depth=20).fit(xtrain, ytrain)
ypre_forest = cforest.predict(xtest)
acc = np.float((ytest==ypre_forest).sum())/len(ytest)
score_forest.append(acc)
ctree2 = tree.DecisionTreeClassifier(max_depth=20).fit(xtrain, ytrain)
ypre_tree = ctree2.predict(xtest)
acc2 = np.float((ytest==ypre_tree).sum())/len(ytest)
plt.figure()
plt.plot(score_forest, label='Random Forest')
plt.plot([1, 60],[acc2,acc2], label='decision tree')
plt.title("test accuracy vs number of trees")
plt.xlabel("number of trees")
plt.ylabel("test arrucary")
plt.xlim([1,60])
plt.legend()
# save outputs
cwd = os.getcwd()
output_path = os.path.join(cwd,'output')
if not os.path.exists(output_path):
os.makedirs(output_path)
# saves image to output folder
fname = "forest.pdf"
pout = os.path.join(output_path,fname)
plt.savefig(pout)
plt.show()
print('accuracy for decision tree: {:.3}'.format(acc2))
print('accuracy for decision random forest: {:.3}'.format(max(score_forest)))
##### Gets Test Error
with open("../data/spambase.data", "r") as f:
data = np.array([ [ float(d) for d in line.strip("\n").split(",") ] for line in f.readlines() ])
label = data[:, 57]
label = label*-2 +1
xtrain, xtest, ytrain, ytest = train_test_split(data[:, 0:57],label, test_size = 0.2)
idx_normal = np.array(np.where(ytrain==1)).reshape(-1)
xtrain_normal = xtrain[idx_normal, :]
mdl = OneClassSVM(gamma='auto').fit(xtrain_normal)
ypred = mdl.predict(xtest)
matched = ypred==ytest
acc = matched.sum()/len(matched)
print('The test error: {:.2%}'.format(1-acc)) | [
"matplotlib.pyplot.ylabel",
"numpy.array",
"os.path.exists",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.svm.OneClassSVM",
"csv.reader",
"matplotlib.pyplot.savefig",
"sklearn.model_selection.train_test_split",
"sklearn.en... | [((555, 567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (565, 567), True, 'import matplotlib.pyplot as plt\n'), ((569, 616), 'sklearn.tree.plot_tree', 'tree.plot_tree', (['ctree'], {'max_depth': '(3)', 'filled': '(True)'}), '(ctree, max_depth=3, filled=True)\n', (583, 616), False, 'from sklearn import tree\n'), ((640, 651), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (649, 651), False, 'import os\n'), ((666, 693), 'os.path.join', 'os.path.join', (['cwd', '"""output"""'], {}), "(cwd, 'output')\n", (678, 693), False, 'import os\n'), ((816, 848), 'os.path.join', 'os.path.join', (['output_path', 'fname'], {}), '(output_path, fname)\n', (828, 848), False, 'import os\n'), ((848, 865), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pout'], {}), '(pout)\n', (859, 865), True, 'import matplotlib.pyplot as plt\n'), ((866, 876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (874, 876), True, 'import matplotlib.pyplot as plt\n'), ((991, 1028), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)'}), '(x, y, test_size=0.2)\n', (1007, 1028), False, 'from sklearn.model_selection import train_test_split\n'), ((1445, 1457), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1455, 1457), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1503), 'matplotlib.pyplot.plot', 'plt.plot', (['score_forest'], {'label': '"""Random Forest"""'}), "(score_forest, label='Random Forest')\n", (1466, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1558), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 60]', '[acc2, acc2]'], {'label': '"""decision tree"""'}), "([1, 60], [acc2, acc2], label='decision tree')\n", (1512, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1602), 'matplotlib.pyplot.title', 'plt.title', (['"""test accuracy vs number of trees"""'], {}), "('test accuracy vs number of trees')\n", (1566, 1602), True, 'import matplotlib.pyplot as plt\n'), ((1603, 1632), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of trees"""'], {}), "('number of trees')\n", (1613, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1633, 1660), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""test arrucary"""'], {}), "('test arrucary')\n", (1643, 1660), True, 'import matplotlib.pyplot as plt\n'), ((1661, 1678), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[1, 60]'], {}), '([1, 60])\n', (1669, 1678), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1690), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1688, 1690), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1724), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1722, 1724), False, 'import os\n'), ((1739, 1766), 'os.path.join', 'os.path.join', (['cwd', '"""output"""'], {}), "(cwd, 'output')\n", (1751, 1766), False, 'import os\n'), ((1890, 1922), 'os.path.join', 'os.path.join', (['output_path', 'fname'], {}), '(output_path, fname)\n', (1902, 1922), False, 'import os\n'), ((1922, 1939), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pout'], {}), '(pout)\n', (1933, 1939), True, 'import matplotlib.pyplot as plt\n'), ((1940, 1950), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1948, 1950), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2386), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data[:, 0:57]', 'label'], {'test_size': '(0.2)'}), '(data[:, 0:57], label, test_size=0.2)\n', (2349, 2386), False, 'from sklearn.model_selection import train_test_split\n'), ((328, 357), 'csv.reader', 'csv.reader', (['cf'], {'delimiter': '""","""'}), "(cf, delimiter=',')\n", (338, 357), False, 'import csv\n'), ((700, 727), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (714, 727), False, 'import os\n'), ((733, 757), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (744, 757), False, 'import os\n'), ((1773, 1800), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (1787, 1800), False, 'import os\n'), ((1806, 1830), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (1817, 1830), False, 'import os\n'), ((420, 433), 'numpy.array', 'np.array', (['raw'], {}), '(raw)\n', (428, 433), True, 'import numpy as np\n'), ((515, 544), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (542, 544), False, 'from sklearn import tree\n'), ((1291, 1332), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'max_depth': '(20)'}), '(max_depth=20)\n', (1318, 1332), False, 'from sklearn import tree\n'), ((2489, 2514), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (2500, 2514), False, 'from sklearn.svm import OneClassSVM\n'), ((1076, 1132), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'ntree', 'max_depth': '(20)'}), '(n_estimators=ntree, max_depth=20)\n', (1098, 1132), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2411, 2432), 'numpy.where', 'np.where', (['(ytrain == 1)'], {}), '(ytrain == 1)\n', (2419, 2432), True, 'import numpy as np\n')] |
import os
import os.path as osp
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.utils import normalized_cut
from torch_geometric.nn import (NNConv, graclus, max_pool, max_pool_x,
global_mean_pool)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
from datasets.hitgraphs import HitGraphDataset
import tqdm
import argparse
directed = False
sig_weight = 1.0
bkg_weight = 1.0
train_batch_size = 1
valid_batch_size = 1
n_epochs = 1
lr = 0.01
hidden_dim = 64
n_iters = 6
from training.gnn import GNNTrainer
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('using device %s'%device)
import logging
def main(args):
path = osp.join(os.environ['GNN_TRAINING_DATA_ROOT'], args.dataset)
print(path)
full_dataset = HitGraphDataset(path, directed=directed, categorical=args.categorized)
fulllen = len(full_dataset)
tv_frac = 0.20
tv_num = math.ceil(fulllen*tv_frac)
splits = np.cumsum([fulllen-tv_num,0,tv_num])
print(fulllen, splits)
train_dataset = torch.utils.data.Subset(full_dataset,np.arange(start=0,stop=splits[0]))
valid_dataset = torch.utils.data.Subset(full_dataset,np.arange(start=splits[1],stop=splits[2]))
train_loader = DataLoader(train_dataset, batch_size=train_batch_size, pin_memory=True)
valid_loader = DataLoader(valid_dataset, batch_size=valid_batch_size, shuffle=False)
train_samples = len(train_dataset)
valid_samples = len(valid_dataset)
d = full_dataset
num_features = d.num_features
num_classes = d[0].y.dim() if d[0].y.dim() == 1 else d[0].y.size(1)
if args.categorized:
if not args.forcecats:
num_classes = int(d[0].y.max().item()) + 1 if d[0].y.dim() == 1 else d[0].y.size(1)
else:
num_classes = args.cats
#the_weights = np.array([1., 1., 1., 1.]) #[0.017, 1., 1., 10.]
the_weights = np.array([1., 1.])
trainer = GNNTrainer(category_weights = the_weights,
output_dir='/data/gnn_code/hgcal_ldrd/output/'+args.dataset, device=device)
trainer.logger.setLevel(logging.DEBUG)
strmH = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
strmH.setFormatter(formatter)
trainer.logger.addHandler(strmH)
#example lr scheduling definition
def lr_scaling(optimizer):
from torch.optim.lr_scheduler import ReduceLROnPlateau
return ReduceLROnPlateau(optimizer, mode='min', verbose=True,
min_lr=5e-7, factor=0.2,
threshold=0.01, patience=5)
trainer.build_model(name=args.model, loss_func=args.loss,
optimizer=args.optimizer, learning_rate=args.lr, lr_scaling=lr_scaling,
input_dim=num_features, hidden_dim=args.hidden_dim, n_iters=args.n_iters,
output_dim=num_classes)
trainer.print_model_summary()
train_summary = trainer.train(train_loader, n_epochs, valid_data_loader=valid_loader)
print(train_summary)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--categorized', '-c', action='store_true', default=False, help='Does the model you want to train have explicit categories?')
parser.add_argument('--forcecats', action='store_true', default=False, help='Do we want to force the number of categories?')
parser.add_argument('--cats', default=1, type=int, help='Number of categories to force')
parser.add_argument('--optimizer', '-o', default='Adam', help='Optimizer to use for training.')
parser.add_argument('--model', '-m', default='EdgeNet2', help='The model to train.')
parser.add_argument('--loss', '-l', default='binary_cross_entropy', help='Loss function to use in training.')
parser.add_argument('--lr', default=0.001, type=float, help='The starting learning rate.')
parser.add_argument('--hidden_dim', default=64, type=int, help='Latent space size.')
parser.add_argument('--n_iters', default=6, type=int, help='Number of times to iterate the graph.')
parser.add_argument('--dataset', '-d', default='single_photon')
args = parser.parse_args()
main(args)
| [
"logging.StreamHandler",
"math.ceil",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"argparse.ArgumentParser",
"torch_geometric.data.DataLoader",
"logging.Formatter",
"os.path.join",
"numpy.array",
"training.gnn.GNNTrainer",
"torch.cuda.is_available",
"numpy.cumsum",
"datasets.hitgraphs.HitGra... | [((865, 925), 'os.path.join', 'osp.join', (["os.environ['GNN_TRAINING_DATA_ROOT']", 'args.dataset'], {}), "(os.environ['GNN_TRAINING_DATA_ROOT'], args.dataset)\n", (873, 925), True, 'import os.path as osp\n'), ((961, 1031), 'datasets.hitgraphs.HitGraphDataset', 'HitGraphDataset', (['path'], {'directed': 'directed', 'categorical': 'args.categorized'}), '(path, directed=directed, categorical=args.categorized)\n', (976, 1031), False, 'from datasets.hitgraphs import HitGraphDataset\n'), ((1096, 1124), 'math.ceil', 'math.ceil', (['(fulllen * tv_frac)'], {}), '(fulllen * tv_frac)\n', (1105, 1124), False, 'import math\n'), ((1136, 1176), 'numpy.cumsum', 'np.cumsum', (['[fulllen - tv_num, 0, tv_num]'], {}), '([fulllen - tv_num, 0, tv_num])\n', (1145, 1176), True, 'import numpy as np\n'), ((1412, 1483), 'torch_geometric.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'train_batch_size', 'pin_memory': '(True)'}), '(train_dataset, batch_size=train_batch_size, pin_memory=True)\n', (1422, 1483), False, 'from torch_geometric.data import DataLoader\n'), ((1503, 1572), 'torch_geometric.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'valid_batch_size', 'shuffle': '(False)'}), '(valid_dataset, batch_size=valid_batch_size, shuffle=False)\n', (1513, 1572), False, 'from torch_geometric.data import DataLoader\n'), ((2071, 2091), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2079, 2091), True, 'import numpy as np\n'), ((2104, 2227), 'training.gnn.GNNTrainer', 'GNNTrainer', ([], {'category_weights': 'the_weights', 'output_dir': "('/data/gnn_code/hgcal_ldrd/output/' + args.dataset)", 'device': 'device'}), "(category_weights=the_weights, output_dir=\n '/data/gnn_code/hgcal_ldrd/output/' + args.dataset, device=device)\n", (2114, 2227), False, 'from training.gnn import GNNTrainer\n'), ((2304, 2327), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2325, 2327), False, 'import logging\n'), ((2344, 2417), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (2361, 2417), False, 'import logging\n'), ((3312, 3337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3335, 3337), False, 'import argparse\n'), ((750, 775), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (773, 775), False, 'import torch\n'), ((1258, 1292), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'splits[0]'}), '(start=0, stop=splits[0])\n', (1267, 1292), True, 'import numpy as np\n'), ((1350, 1392), 'numpy.arange', 'np.arange', ([], {'start': 'splits[1]', 'stop': 'splits[2]'}), '(start=splits[1], stop=splits[2])\n', (1359, 1392), True, 'import numpy as np\n'), ((2637, 2750), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'verbose': '(True)', 'min_lr': '(5e-07)', 'factor': '(0.2)', 'threshold': '(0.01)', 'patience': '(5)'}), "(optimizer, mode='min', verbose=True, min_lr=5e-07, factor\n =0.2, threshold=0.01, patience=5)\n", (2654, 2750), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n')] |
import pandas as pd
import numpy as np
from .basecomparison import BaseTwoSorterComparison
from .comparisontools import (do_score_labels, make_possible_match,
make_best_match, make_hungarian_match, do_confusion_matrix, do_count_score,
compute_performance)
class GroundTruthComparison(BaseTwoSorterComparison):
"""
Compares a sorter to a ground truth.
This class can:
* compute a "match between gt_sorting and tested_sorting
* compute optionally the score label (TP, FN, CL, FP) for each spike
* count by unit of GT the total of each (TP, FN, CL, FP) into a Dataframe
GroundTruthComparison.count
* compute the confusion matrix .get_confusion_matrix()
* compute some performance metric with several strategy based on
the count score by unit
* count well detected units
* count false positive detected units
* count redundant units
* count overmerged units
* summary all this
Parameters
----------
gt_sorting: SortingExtractor
The first sorting for the comparison
tested_sorting: SortingExtractor
The second sorting for the comparison
gt_name: str
The name of sorter 1
tested_name: : str
The name of sorter 2
delta_time: float
Number of ms to consider coincident spikes (default 0.4 ms) match_score: float
Minimum agreement score to match units (default 0.5)
chance_score: float
Minimum agreement score to for a possible match (default 0.1)
redundant_score: float
Agreement score above which units are redundant (default 0.2)
overmerged_score: float
Agreement score above which units can be overmerged (default 0.2)
well_detected_score: float
Agreement score above which units are well detected (default 0.8)
exhaustive_gt: bool (default True)
Tell if the ground true is "exhaustive" or not. In other world if the
GT have all possible units. It allows more performance measurement.
For instance, MEArec simulated dataset have exhaustive_gt=True
match_mode: 'hungarian', or 'best'
What is match used for counting : 'hungarian' or 'best match'.
n_jobs: int
Number of cores to use in parallel. Uses all available if -1
compute_labels: bool
If True, labels are computed at instantiation (default False)
compute_misclassifications: bool
If True, misclassifications are computed at instantiation (default False)
verbose: bool
If True, output is verbose
Returns
-------
sorting_comparison: SortingComparison
The SortingComparison object
"""
def __init__(self, gt_sorting, tested_sorting, gt_name=None, tested_name=None,
delta_time=0.4, sampling_frequency=None, match_score=0.5, well_detected_score=0.8,
redundant_score=0.2, overmerged_score=0.2, chance_score=0.1, exhaustive_gt=False, n_jobs=-1,
match_mode='hungarian', compute_labels=False, compute_misclassifications=False, verbose=False):
if gt_name is None:
gt_name = 'ground truth'
if tested_name is None:
tested_name = 'tested'
BaseTwoSorterComparison.__init__(self, gt_sorting, tested_sorting, sorting1_name=gt_name,
sorting2_name=tested_name, delta_time=delta_time,
match_score=match_score, # sampling_frequency=sampling_frequency,
chance_score=chance_score, n_jobs=n_jobs,
verbose=verbose)
self.exhaustive_gt = exhaustive_gt
self._compute_misclassifications = compute_misclassifications
self.redundant_score = redundant_score
self.overmerged_score = overmerged_score
self.well_detected_score = well_detected_score
assert match_mode in ['hungarian', 'best']
self.match_mode = match_mode
self._compute_labels = compute_labels
self._do_count()
self._labels_st1 = None
self._labels_st2 = None
if self._compute_labels:
self._do_score_labels()
# confusion matrix is compute on demand
self._confusion_matrix = None
def get_labels1(self, unit_id):
if self._labels_st1 is None:
self._do_score_labels()
if unit_id in self.sorting1.get_unit_ids():
return self._labels_st1[unit_id]
else:
raise Exception("Unit_id is not a valid unit")
def get_labels2(self, unit_id):
if self._labels_st1 is None:
self._do_score_labels()
if unit_id in self.sorting2.get_unit_ids():
return self._labels_st2[unit_id]
else:
raise Exception("Unit_id is not a valid unit")
def _do_matching(self):
if self._verbose:
print("Matching...")
self.possible_match_12, self.possible_match_21 = make_possible_match(self.agreement_scores, self.chance_score)
self.best_match_12, self.best_match_21 = make_best_match(self.agreement_scores, self.chance_score)
self.hungarian_match_12, self.hungarian_match_21 = make_hungarian_match(self.agreement_scores,
self.match_score)
def _do_count(self):
"""
Do raw count into a dataframe.
Internally use hungarian match or best match.
"""
if self.match_mode == 'hungarian':
match_12 = self.hungarian_match_12
elif self.match_mode == 'best':
match_12 = self.best_match_12
self.count_score = do_count_score(self.event_counts1, self.event_counts2,
match_12, self.match_event_count)
def _do_confusion_matrix(self):
if self._verbose:
print("Computing confusion matrix...")
if self.match_mode == 'hungarian':
match_12 = self.hungarian_match_12
elif self.match_mode == 'best':
match_12 = self.best_match_12
self._confusion_matrix = do_confusion_matrix(self.event_counts1, self.event_counts2, match_12,
self.match_event_count)
def get_confusion_matrix(self):
"""
Computes the confusion matrix.
Returns
-------
confusion_matrix: pandas.DataFrame
The confusion matrix
"""
if self._confusion_matrix is None:
self._do_confusion_matrix()
return self._confusion_matrix
def _do_score_labels(self):
assert self.match_mode == 'hungarian', \
'Labels (TP, FP, FN) can be computed only with hungarian match'
if self._verbose:
print("Adding labels...")
self._labels_st1, self._labels_st2 = do_score_labels(self.sorting1, self.sorting2,
self.delta_frames, self.hungarian_match_12,
self._compute_misclassifications)
def get_performance(self, method='by_unit', output='pandas'):
"""
Get performance rate with several method:
* 'raw_count' : just render the raw count table
* 'by_unit' : render perf as rate unit by unit of the GT
* 'pooled_with_average' : compute rate unit by unit and average
Parameters
----------
method: str
'by_unit', or 'pooled_with_average'
output: str
'pandas' or 'dict'
Returns
-------
perf: pandas dataframe/series (or dict)
dataframe/series (based on 'output') with performance entries
"""
possibles = ('raw_count', 'by_unit', 'pooled_with_average')
if method not in possibles:
raise Exception("'method' can be " + ' or '.join(possibles))
if method == 'raw_count':
perf = self.count_score
elif method == 'by_unit':
perf = compute_performance(self.count_score)
elif method == 'pooled_with_average':
perf = self.get_performance(method='by_unit').mean(axis=0)
if output == 'dict' and isinstance(perf, pd.Series):
perf = perf.to_dict()
return perf
def print_performance(self, method='pooled_with_average'):
"""
Print performance with the selected method
"""
template_txt_performance = _template_txt_performance
if method == 'by_unit':
perf = self.get_performance(method=method, output='pandas')
perf = perf * 100
# ~ print(perf)
d = {k: perf[k].tolist() for k in perf.columns}
txt = template_txt_performance.format(method=method, **d)
print(txt)
elif method == 'pooled_with_average':
perf = self.get_performance(method=method, output='pandas')
perf = perf * 100
txt = template_txt_performance.format(method=method, **perf.to_dict())
print(txt)
def print_summary(self, well_detected_score=None, redundant_score=None, overmerged_score=None):
"""
Print a global performance summary that depend on the context:
* exhaustive= True/False
* how many gt units (one or several)
This summary mix several performance metrics.
"""
txt = _template_summary_part1
d = dict(
num_gt=len(self.unit1_ids),
num_tested=len(self.unit2_ids),
num_well_detected=self.count_well_detected_units(well_detected_score),
num_redundant=self.count_redundant_units(redundant_score),
num_overmerged=self.count_overmerged_units(overmerged_score),
)
if self.exhaustive_gt:
txt = txt + _template_summary_part2
d['num_false_positive_units'] = self.count_false_positive_units()
d['num_bad'] = self.count_bad_units()
txt = txt.format(**d)
print(txt)
def get_well_detected_units(self, well_detected_score=None):
"""
Return units list of "well detected units" from tested_sorting.
"well detected units" are defined as units in tested that
are well matched to GT units.
Parameters
----------
well_detected_score: float (default 0.8)
The agreement score above which tested units
are counted as "well detected".
"""
if well_detected_score is not None:
self.well_detected_score = well_detected_score
matched_units2 = self.hungarian_match_12
well_detected_ids = []
for u2 in self.unit2_ids:
if u2 in list(matched_units2.values):
u1 = self.hungarian_match_21[u2]
score = self.agreement_scores.at[u1, u2]
if score >= self.well_detected_score:
well_detected_ids.append(u2)
return well_detected_ids
def count_well_detected_units(self, well_detected_score):
"""
Count how many well detected units.
kwargs are the same as get_well_detected_units.
"""
return len(self.get_well_detected_units(well_detected_score=well_detected_score))
def get_false_positive_units(self, redundant_score=None):
"""
Return units list of "false positive units" from tested_sorting.
"false positive units" are defined as units in tested that
are not matched at all in GT units.
Need exhaustive_gt=True
Parameters
----------
redundant_score: float (default 0.2)
The agreement score below which tested units
are counted as "false positive"" (and not "redundant").
"""
assert self.exhaustive_gt, 'false_positive_units list is valid only if exhaustive_gt=True'
if redundant_score is not None:
self.redundant_score = redundant_score
matched_units2 = list(self.hungarian_match_12.values)
false_positive_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2:
if self.best_match_21[u2] == -1:
false_positive_ids.append(u2)
else:
u1 = self.best_match_21[u2]
score = self.agreement_scores.at[u1, u2]
if score < self.redundant_score:
false_positive_ids.append(u2)
return false_positive_ids
def count_false_positive_units(self, redundant_score=None):
"""
See get_false_positive_units().
"""
return len(self.get_false_positive_units(redundant_score))
def get_redundant_units(self, redundant_score=None):
"""
Return "redundant units"
"redundant units" are defined as units in tested
that match a GT units with a big agreement score
but it is not the best match.
In other world units in GT that detected twice or more.
Parameters
----------
redundant_score=None: float (default 0.2)
The agreement score above which tested units
are counted as "redundant" (and not "false positive" ).
"""
assert self.exhaustive_gt, 'redundant_units list is valid only if exhaustive_gt=True'
if redundant_score is not None:
self.redundant_score = redundant_score
matched_units2 = list(self.hungarian_match_12.values)
redundant_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2 and self.best_match_21[u2] != -1:
u1 = self.best_match_21[u2]
if u2 != self.best_match_12[u1]:
score = self.agreement_scores.at[u1, u2]
if score >= self.redundant_score:
redundant_ids.append(u2)
return redundant_ids
def count_redundant_units(self, redundant_score=None):
"""
See get_redundant_units().
"""
return len(self.get_redundant_units(redundant_score=redundant_score))
def get_overmerged_units(self, overmerged_score=None):
"""
Return "overmerged units"
"overmerged units" are defined as units in tested
that match more than one GT unit with an agreement score larger than overmerged_score.
Parameters
----------
overmerged_score: float (default 0.4)
Tested units with 2 or more agreement scores above 'overmerged_score'
are counted as "overmerged".
"""
assert self.exhaustive_gt, 'overmerged_units list is valid only if exhaustive_gt=True'
if overmerged_score is not None:
self.overmerged_score = overmerged_score
overmerged_ids = []
for u2 in self.unit2_ids:
scores = self.agreement_scores.loc[:, u2]
if len(np.where(scores > self.overmerged_score)[0]) > 1:
overmerged_ids.append(u2)
return overmerged_ids
def count_overmerged_units(self, overmerged_score=None):
"""
See get_overmerged_units().
"""
return len(self.get_overmerged_units(overmerged_score=overmerged_score))
def get_bad_units(self):
"""
Return units list of "bad units".
"bad units" are defined as units in tested that are not
in the best match list of GT units.
So it is the union of "false positive units" + "redundant units".
Need exhaustive_gt=True
"""
assert self.exhaustive_gt, 'bad_units list is valid only if exhaustive_gt=True'
matched_units2 = list(self.hungarian_match_12.values)
bad_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2:
bad_ids.append(u2)
return bad_ids
def count_bad_units(self):
"""
See get_bad_units
"""
return len(self.get_bad_units())
# usefull also for gathercomparison
_template_txt_performance = """PERFORMANCE ({method})
-----------
ACCURACY: {accuracy}
RECALL: {recall}
PRECISION: {precision}
FALSE DISCOVERY RATE: {false_discovery_rate}
MISS RATE: {miss_rate}
"""
_template_summary_part1 = """SUMMARY
-------
GT num_units: {num_gt}
TESTED num_units: {num_tested}
num_well_detected: {num_well_detected}
num_redundant: {num_redundant}
num_overmerged: {num_overmerged}
"""
_template_summary_part2 = """num_false_positive_units {num_false_positive_units}
num_bad: {num_bad}
"""
def compare_sorter_to_ground_truth(*args, **kwargs):
return GroundTruthComparison(*args, **kwargs)
compare_sorter_to_ground_truth.__doc__ = GroundTruthComparison.__doc__
| [
"numpy.where"
] | [((15093, 15133), 'numpy.where', 'np.where', (['(scores > self.overmerged_score)'], {}), '(scores > self.overmerged_score)\n', (15101, 15133), True, 'import numpy as np\n')] |
import numpy as np
from scipy.integrate import quad
import random
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
# Example for Monte Carlo method.
point_in=0
point_out=0
for i in range(10000):
x=random.uniform(0,1)
y=random.uniform(0,1)
if y<=np.sqrt(1-x**2):
point_in=point_in+1
else:
point_out=point_out+1
pi=4*point_in/(point_out+point_in)
print(pi)
| [
"random.uniform",
"numpy.sqrt"
] | [((223, 243), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (237, 243), False, 'import random\n'), ((249, 269), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (263, 269), False, 'import random\n'), ((279, 298), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (286, 298), True, 'import numpy as np\n')] |
import simulation.quadrotor3 as quad
import simulation.config as cfg
import simulation.animation as ani
import matplotlib.pyplot as pl
import numpy as np
import random
from math import pi, sin, cos
import gym
from gym import error, spaces, utils
from gym.utils import seeding
"""
Environment wrapper for a climb & hover task. The goal of this task is for the agent to climb from [0, 0, 0]^T
to [0, 0, 1.5]^T, and to remain at that altitude until the the episode terminates at T=15s.
"""
class StaticWaypointEnv(gym.Env):
def __init__(self):
metadata = {'render.modes': ['human']}
# environment parameters
self.goal_xyz = np.array([[1.5],
[0.],
[0.]])
self.goal_zeta_sin = np.sin(np.array([[0.],
[0.],
[0.]]))
self.goal_zeta_cos = np.cos(np.array([[0.],
[0.],
[0.]]))
self.goal_pqr = np.array([[0.],
[0.],
[0.]])
self.goal_thresh = 0.05
self.t = 0
self.T = 5
self.action_space = np.zeros((4,))
self.observation_space = np.zeros((31,))
# simulation parameters
self.params = cfg.params
self.iris = quad.Quadrotor(self.params)
self.sim_dt = self.params["dt"]
self.ctrl_dt = 0.05
self.steps = range(int(self.ctrl_dt/self.sim_dt))
self.action_bound = [0, self.iris.max_rpm]
self.H = int(self.T/self.ctrl_dt)
self.hov_rpm = self.iris.hov_rpm
self.trim = [self.hov_rpm, self.hov_rpm,self.hov_rpm, self.hov_rpm]
self.trim_np = np.array(self.trim)
self.bandwidth = 25.
self.iris.set_state(self.goal_xyz, np.arcsin(self.goal_zeta_sin), np.array([[0.],[0.],[0.]]), np.array([[0.],[0.],[0.]]))
xyz, zeta, _, pqr = self.iris.get_state()
self.vec_xyz = xyz-self.goal_xyz
self.vec_zeta_sin = np.sin(zeta)-self.goal_zeta_sin
self.vec_zeta_cos = np.cos(zeta)-self.goal_zeta_cos
self.vec_pqr = pqr-self.goal_pqr
self.dist_norm = np.linalg.norm(self.vec_xyz)
self.att_norm_sin = np.linalg.norm(self.vec_zeta_sin)
self.att_norm_cos = np.linalg.norm(self.vec_zeta_cos)
self.ang_norm = np.linalg.norm(self.vec_pqr)
self.fig = None
self.axis3d = None
self.v = None
def reward(self, state, action):
xyz, zeta, _, pqr = state
s_zeta = np.sin(zeta)
c_zeta = np.cos(zeta)
curr_dist = xyz-self.goal_xyz
curr_att_sin = s_zeta-self.goal_zeta_sin
curr_att_cos = c_zeta-self.goal_zeta_cos
curr_ang = pqr-self.goal_pqr
dist_hat = np.linalg.norm(curr_dist)
att_hat_sin = np.linalg.norm(curr_att_sin)
att_hat_cos = np.linalg.norm(curr_att_cos)
ang_hat = np.linalg.norm(curr_ang)
# agent gets a negative reward based on how far away it is from the desired goal state
if dist_hat > self.goal_thresh:
dist_rew = 1/dist_hat
else:
dist_rew = 1/self.goal_thresh
att_rew = 0*((self.att_norm_sin-att_hat_sin)+(self.att_norm_cos-att_hat_cos))
ang_rew = 0*(self.ang_norm-ang_hat)
if dist_hat < 0.05:
dist_rew += 0
self.dist_norm = dist_hat
self.att_norm_sin = att_hat_sin
self.att_norm_cos = att_hat_cos
self.ang_norm = ang_hat
self.vec_xyz = curr_dist
self.vec_zeta_sin = curr_att_sin
self.vec_zeta_cos = curr_att_cos
self.vec_pqr = curr_ang
ctrl_rew = 0#-np.sum(((action/self.action_bound[1])**2))
time_rew = 0#1.
return dist_rew, att_rew, ang_rew, ctrl_rew, time_rew
def terminal(self, pos):
xyz, zeta = pos
mask1 = 0#zeta[0:2] > pi/2
mask2 = 0#zeta[0:2] < -pi/2
mask3 = self.dist_norm > 2
if np.sum(mask1) > 0 or np.sum(mask2) > 0 or np.sum(mask3) > 0:
return True
#elif self.goal_achieved:
#print("Goal Achieved!")
# return True
elif self.t >= self.T:
print("Sim time reached")
return True
else:
return False
def step(self, action):
"""
Parameters
----------
action :
Returns
-------
ob, reward, episode_over, info : tuple
ob (object) :
an environment-specific object representing your observation of
the environment.
reward (float) :
amount of reward achieved by the previous action. The scale
varies between environments, but the goal is always to increase
your total reward.
episode_over (bool) :
whether it's time to reset the environment again. Most (but not
all) tasks are divided up into well-defined episodes, and done
being True indicates the episode has terminated. (For example,
perhaps the pole tipped too far, or you lost your last life.)
info (dict) :
diagnostic information useful for debugging. It can sometimes
be useful for learning (for example, it might contain the raw
probabilities behind the environment's last state change).
However, official evaluations of your agent are not allowed to
use this for learning.
"""
for _ in self.steps:
xyz, zeta, uvw, pqr = self.iris.step(self.trim_np+action*self.bandwidth)
sin_zeta = np.sin(zeta)
cos_zeta = np.cos(zeta)
a = (action/self.action_bound[1]).tolist()
next_state = xyz.T.tolist()[0]+sin_zeta.T.tolist()[0]+cos_zeta.T.tolist()[0]+uvw.T.tolist()[0]+pqr.T.tolist()[0]
info = self.reward((xyz, zeta, uvw, pqr), action)
done = self.terminal((xyz, zeta))
reward = sum(info)
goals = self.vec_xyz.T.tolist()[0]+self.vec_zeta_sin.T.tolist()[0]+self.vec_zeta_cos.T.tolist()[0]+self.vec_pqr.T.tolist()[0]
next_state = next_state+a+goals
self.t += self.ctrl_dt
return next_state, reward, done, info
def reset(self):
self.t = 0.
self.iris.set_state(np.array([[0.],[0.],[0.]]), np.sin(self.goal_zeta_sin), np.array([[0.],[0.],[0.]]), np.array([[0.],[0.],[0.]]))
xyz, zeta, uvw, pqr = self.iris.get_state()
self.iris.set_rpm(np.array(self.trim))
sin_zeta = np.sin(zeta)
cos_zeta = np.cos(zeta)
self.vec_xyz = xyz-self.goal_xyz
self.vec_zeta_sin = sin_zeta-self.goal_zeta_sin
self.vec_zeta_cos = cos_zeta-self.goal_zeta_cos
a = [x/self.action_bound[1] for x in self.trim]
goals = self.vec_xyz.T.tolist()[0]+self.vec_zeta_sin.T.tolist()[0]+self.vec_zeta_cos.T.tolist()[0]+self.vec_pqr.T.tolist()[0]
state = xyz.T.tolist()[0]+sin_zeta.T.tolist()[0]+cos_zeta.T.tolist()[0]+uvw.T.tolist()[0]+pqr.T.tolist()[0]+a+goals
return state
def render(self, mode='human', close=False):
if self.fig is None:
# rendering parameters
pl.close("all")
pl.ion()
self.fig = pl.figure("Hover")
self.axis3d = self.fig.add_subplot(111, projection='3d')
self.vis = ani.Visualization(self.iris, 6, quaternion=True)
pl.figure("Hover")
self.axis3d.cla()
self.vis.draw3d_quat(self.axis3d)
self.vis.draw_goal(self.axis3d, self.goal_xyz)
self.axis3d.set_xlim(-3, 3)
self.axis3d.set_ylim(-3, 3)
self.axis3d.set_zlim(-3, 3)
self.axis3d.set_xlabel('West/East [m]')
self.axis3d.set_ylabel('South/North [m]')
self.axis3d.set_zlabel('Down/Up [m]')
self.axis3d.set_title("Time %.3f s" %(self.t))
pl.pause(0.001)
pl.draw()
| [
"simulation.animation.Visualization",
"matplotlib.pyplot.ion",
"numpy.arcsin",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.sum",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.pause",
"simulation.q... | [((669, 700), 'numpy.array', 'np.array', (['[[1.5], [0.0], [0.0]]'], {}), '([[1.5], [0.0], [0.0]])\n', (677, 700), True, 'import numpy as np\n'), ((1095, 1126), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (1103, 1126), True, 'import numpy as np\n'), ((1286, 1300), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (1294, 1300), True, 'import numpy as np\n'), ((1334, 1349), 'numpy.zeros', 'np.zeros', (['(31,)'], {}), '((31,))\n', (1342, 1349), True, 'import numpy as np\n'), ((1436, 1463), 'simulation.quadrotor3.Quadrotor', 'quad.Quadrotor', (['self.params'], {}), '(self.params)\n', (1450, 1463), True, 'import simulation.quadrotor3 as quad\n'), ((1823, 1842), 'numpy.array', 'np.array', (['self.trim'], {}), '(self.trim)\n', (1831, 1842), True, 'import numpy as np\n'), ((2282, 2310), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vec_xyz'], {}), '(self.vec_xyz)\n', (2296, 2310), True, 'import numpy as np\n'), ((2339, 2372), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vec_zeta_sin'], {}), '(self.vec_zeta_sin)\n', (2353, 2372), True, 'import numpy as np\n'), ((2401, 2434), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vec_zeta_cos'], {}), '(self.vec_zeta_cos)\n', (2415, 2434), True, 'import numpy as np\n'), ((2459, 2487), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vec_pqr'], {}), '(self.vec_pqr)\n', (2473, 2487), True, 'import numpy as np\n'), ((2660, 2672), 'numpy.sin', 'np.sin', (['zeta'], {}), '(zeta)\n', (2666, 2672), True, 'import numpy as np\n'), ((2690, 2702), 'numpy.cos', 'np.cos', (['zeta'], {}), '(zeta)\n', (2696, 2702), True, 'import numpy as np\n'), ((2905, 2930), 'numpy.linalg.norm', 'np.linalg.norm', (['curr_dist'], {}), '(curr_dist)\n', (2919, 2930), True, 'import numpy as np\n'), ((2953, 2981), 'numpy.linalg.norm', 'np.linalg.norm', (['curr_att_sin'], {}), '(curr_att_sin)\n', (2967, 2981), True, 'import numpy as np\n'), ((3004, 3032), 'numpy.linalg.norm', 'np.linalg.norm', (['curr_att_cos'], {}), '(curr_att_cos)\n', (3018, 3032), True, 'import numpy as np\n'), ((3051, 3075), 'numpy.linalg.norm', 'np.linalg.norm', (['curr_ang'], {}), '(curr_ang)\n', (3065, 3075), True, 'import numpy as np\n'), ((5836, 5848), 'numpy.sin', 'np.sin', (['zeta'], {}), '(zeta)\n', (5842, 5848), True, 'import numpy as np\n'), ((5868, 5880), 'numpy.cos', 'np.cos', (['zeta'], {}), '(zeta)\n', (5874, 5880), True, 'import numpy as np\n'), ((6731, 6743), 'numpy.sin', 'np.sin', (['zeta'], {}), '(zeta)\n', (6737, 6743), True, 'import numpy as np\n'), ((6763, 6775), 'numpy.cos', 'np.cos', (['zeta'], {}), '(zeta)\n', (6769, 6775), True, 'import numpy as np\n'), ((7635, 7653), 'matplotlib.pyplot.figure', 'pl.figure', (['"""Hover"""'], {}), "('Hover')\n", (7644, 7653), True, 'import matplotlib.pyplot as pl\n'), ((8092, 8107), 'matplotlib.pyplot.pause', 'pl.pause', (['(0.001)'], {}), '(0.001)\n', (8100, 8107), True, 'import matplotlib.pyplot as pl\n'), ((8116, 8125), 'matplotlib.pyplot.draw', 'pl.draw', ([], {}), '()\n', (8123, 8125), True, 'import matplotlib.pyplot as pl\n'), ((799, 830), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (807, 830), True, 'import numpy as np\n'), ((953, 984), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (961, 984), True, 'import numpy as np\n'), ((1916, 1945), 'numpy.arcsin', 'np.arcsin', (['self.goal_zeta_sin'], {}), '(self.goal_zeta_sin)\n', (1925, 1945), True, 'import numpy as np\n'), ((1947, 1978), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (1955, 1978), True, 'import numpy as np\n'), ((1975, 2006), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (1983, 2006), True, 'import numpy as np\n'), ((2123, 2135), 'numpy.sin', 'np.sin', (['zeta'], {}), '(zeta)\n', (2129, 2135), True, 'import numpy as np\n'), ((2183, 2195), 'numpy.cos', 'np.cos', (['zeta'], {}), '(zeta)\n', (2189, 2195), True, 'import numpy as np\n'), ((6501, 6532), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (6509, 6532), True, 'import numpy as np\n'), ((6529, 6555), 'numpy.sin', 'np.sin', (['self.goal_zeta_sin'], {}), '(self.goal_zeta_sin)\n', (6535, 6555), True, 'import numpy as np\n'), ((6557, 6588), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (6565, 6588), True, 'import numpy as np\n'), ((6585, 6616), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (6593, 6616), True, 'import numpy as np\n'), ((6691, 6710), 'numpy.array', 'np.array', (['self.trim'], {}), '(self.trim)\n', (6699, 6710), True, 'import numpy as np\n'), ((7394, 7409), 'matplotlib.pyplot.close', 'pl.close', (['"""all"""'], {}), "('all')\n", (7402, 7409), True, 'import matplotlib.pyplot as pl\n'), ((7422, 7430), 'matplotlib.pyplot.ion', 'pl.ion', ([], {}), '()\n', (7428, 7430), True, 'import matplotlib.pyplot as pl\n'), ((7454, 7472), 'matplotlib.pyplot.figure', 'pl.figure', (['"""Hover"""'], {}), "('Hover')\n", (7463, 7472), True, 'import matplotlib.pyplot as pl\n'), ((7565, 7613), 'simulation.animation.Visualization', 'ani.Visualization', (['self.iris', '(6)'], {'quaternion': '(True)'}), '(self.iris, 6, quaternion=True)\n', (7582, 7613), True, 'import simulation.animation as ani\n'), ((4112, 4125), 'numpy.sum', 'np.sum', (['mask1'], {}), '(mask1)\n', (4118, 4125), True, 'import numpy as np\n'), ((4133, 4146), 'numpy.sum', 'np.sum', (['mask2'], {}), '(mask2)\n', (4139, 4146), True, 'import numpy as np\n'), ((4154, 4167), 'numpy.sum', 'np.sum', (['mask3'], {}), '(mask3)\n', (4160, 4167), True, 'import numpy as np\n')] |
import os
import time
import random
import torch
import logging
import numpy as np
import torch.nn as nn
from pathlib import Path
from args import get_parser
from models.model import MLMBaseline
from data.data_loader import MLMLoader
from utils import IRLoss, LELoss, MTLLoss, AverageMeter, rank, classify
# define criteria
criteria = {
'ir': IRLoss,
'le': LELoss,
'mtl': MTLLoss
}
ROOT_PATH = Path(os.path.dirname(__file__))
# read parser
parser = get_parser()
args = parser.parse_args()
# create directories for train experiments
logging_path = f'{args.path_results}/{args.data_path.split("/")[-1]}/{args.task}'
Path(logging_path).mkdir(parents=True, exist_ok=True)
# set logger
logging.basicConfig(format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p',
level=logging.INFO,
handlers=[
logging.FileHandler(f'{logging_path}/test.log', 'w'),
logging.StreamHandler()
])
logger = logging.getLogger(__name__)
# set a seed value
random.seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# define device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main():
# set model
model = MLMBaseline()
model.to(device)
# define loss function
criterion = criteria[args.task]()
model_path = f'{ROOT_PATH}/{args.snapshots}/{args.data_path.split("/")[-1]}/{args.task}/{args.model_name}'
logger.info(f"=> loading checkpoint '{model_path}'")
if device.type == 'cpu':
checkpoint = torch.load(model_path, encoding='latin1', map_location='cpu')
else:
checkpoint = torch.load(model_path, encoding='latin1')
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
logger.info(f"=> loaded checkpoint '{model_path}' (epoch {checkpoint['epoch']})")
# prepare test loader
test_loader = torch.utils.data.DataLoader(
MLMLoader(data_path=f'{ROOT_PATH}/{args.data_path}', partition='test'),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
logger.info('Test loader prepared.')
# run test
test(test_loader, model, criterion)
def test(val_loader, model, criterion):
losses = {
'ir': AverageMeter(),
'le': AverageMeter(),
'mtl': AverageMeter()
}
le_img = []
le_txt = []
# switch to evaluate mode
model.eval()
for i, val_input in enumerate(val_loader):
# inputs
images = torch.stack([val_input['image'][j].to(device) for j in range(len(val_input['image']))])
summaries = torch.stack([val_input['summary'][j].to(device) for j in range(len(val_input['summary']))])
classes = torch.stack([val_input['classes'][j].to(device) for j in range(len(val_input['classes']))])
# target
target = {
'ir': torch.stack([val_input['target_ir'][j].to(device) for j in range(len(val_input['target_ir']))]),
'le': torch.stack([val_input['target_le'][j].to(device) for j in range(len(val_input['target_le']))]),
'ids': torch.stack([val_input['id'][j].to(device) for j in range(len(val_input['id']))])
}
# compute output
output = model(images, summaries, classes)
# compute loss
loss = criterion(output, target)
# measure performance and record loss
if args.task == 'mtl':
losses['mtl'].update(loss['mtl'].data, args.batch_size)
losses['ir'].update(loss['ir'].data, args.batch_size)
losses['le'].update(loss['le'].data, args.batch_size)
log_loss = f'IR: {losses["ir"].val:.4f} ({losses["ir"].avg:.4f}) - LE: {losses["le"].val:.4f} ({losses["le"].avg:.4f})'
else:
losses[args.task].update(loss.data, args.batch_size)
log_loss = f'{losses[args.task].val:.4f} ({losses[args.task].avg:.4f})'
if args.task in ['ir', 'mtl']:
if i==0:
data0 = output['ir'][0].data.cpu().numpy()
data1 = output['ir'][1].data.cpu().numpy()
data2 = target['ids'].data.cpu().numpy()
else:
data0 = np.concatenate((data0, output['ir'][0].data.cpu().numpy()), axis=0)
data1 = np.concatenate((data1, output['ir'][1].data.cpu().numpy()), axis=0)
data2 = np.concatenate((data2, target['ids'].data.cpu().numpy()), axis=0)
if args.task in ['le', 'mtl']:
le_img.append([[t, torch.topk(o, k=1)[1], torch.topk(o, k=5)[1], torch.topk(o, k=10)[1]] for o, t in zip(output['le'][0], target['le'])])
le_txt.append([[t, torch.topk(o, k=1)[1], torch.topk(o, k=5)[1], torch.topk(o, k=10)[1]] for o, t in zip(output['le'][1], target['le'])])
results = {
'log': {
'Loss': log_loss
}
}
if args.task in ['ir', 'mtl']:
rank_results = rank(data0, data1, data2)
results['log']['IR Median Rank'] = rank_results['median_rank']
results['log']['IR Recall'] = ' - '.join([f'{k}: {v}' for k, v in rank_results['recall'].items()])
if args.task in ['le', 'mtl']:
classify_results = classify(le_img, le_txt)
results['log']['LE Image'] = ' - '.join([f'{k}: {v}' for k, v in classify_results['image'].items()])
results['log']['LE Text'] = ' - '.join([f'{k}: {v}' for k, v in classify_results['text'].items()])
# log results
for k, v in results['log'].items():
logger.info(f'** Test {k} - {v}')
if __name__ == '__main__':
main() | [
"logging.getLogger",
"logging.StreamHandler",
"models.model.MLMBaseline",
"torch.cuda.is_available",
"pathlib.Path",
"logging.FileHandler",
"numpy.random.seed",
"torch.topk",
"os.path.dirname",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.load",
"random.seed",
"args.get_parser... | [((464, 476), 'args.get_parser', 'get_parser', ([], {}), '()\n', (474, 476), False, 'from args import get_parser\n'), ((1061, 1088), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1078, 1088), False, 'import logging\n'), ((1109, 1131), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1120, 1131), False, 'import random\n'), ((1132, 1157), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1146, 1157), True, 'import numpy as np\n'), ((1161, 1186), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1184, 1186), False, 'import torch\n'), ((413, 438), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (428, 438), False, 'import os\n'), ((1192, 1220), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1209, 1220), False, 'import torch\n'), ((1225, 1258), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1247, 1258), False, 'import torch\n'), ((1263, 1300), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (1289, 1300), False, 'import torch\n'), ((1429, 1442), 'models.model.MLMBaseline', 'MLMBaseline', ([], {}), '()\n', (1440, 1442), False, 'from models.model import MLMBaseline\n'), ((630, 648), 'pathlib.Path', 'Path', (['logging_path'], {}), '(logging_path)\n', (634, 648), False, 'from pathlib import Path\n'), ((1350, 1375), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1373, 1375), False, 'import torch\n'), ((1749, 1810), 'torch.load', 'torch.load', (['model_path'], {'encoding': '"""latin1"""', 'map_location': '"""cpu"""'}), "(model_path, encoding='latin1', map_location='cpu')\n", (1759, 1810), False, 'import torch\n'), ((1842, 1883), 'torch.load', 'torch.load', (['model_path'], {'encoding': '"""latin1"""'}), "(model_path, encoding='latin1')\n", (1852, 1883), False, 'import torch\n'), ((2147, 2217), 'data.data_loader.MLMLoader', 'MLMLoader', ([], {'data_path': 'f"""{ROOT_PATH}/{args.data_path}"""', 'partition': '"""test"""'}), "(data_path=f'{ROOT_PATH}/{args.data_path}', partition='test')\n", (2156, 2217), False, 'from data.data_loader import MLMLoader\n'), ((2505, 2519), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2517, 2519), False, 'from utils import IRLoss, LELoss, MTLLoss, AverageMeter, rank, classify\n'), ((2535, 2549), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2547, 2549), False, 'from utils import IRLoss, LELoss, MTLLoss, AverageMeter, rank, classify\n'), ((2566, 2580), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2578, 2580), False, 'from utils import IRLoss, LELoss, MTLLoss, AverageMeter, rank, classify\n'), ((5159, 5184), 'utils.rank', 'rank', (['data0', 'data1', 'data2'], {}), '(data0, data1, data2)\n', (5163, 5184), False, 'from utils import IRLoss, LELoss, MTLLoss, AverageMeter, rank, classify\n'), ((5426, 5450), 'utils.classify', 'classify', (['le_img', 'le_txt'], {}), '(le_img, le_txt)\n', (5434, 5450), False, 'from utils import IRLoss, LELoss, MTLLoss, AverageMeter, rank, classify\n'), ((927, 979), 'logging.FileHandler', 'logging.FileHandler', (['f"""{logging_path}/test.log"""', '"""w"""'], {}), "(f'{logging_path}/test.log', 'w')\n", (946, 979), False, 'import logging\n'), ((1005, 1028), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1026, 1028), False, 'import logging\n'), ((4751, 4769), 'torch.topk', 'torch.topk', (['o'], {'k': '(1)'}), '(o, k=1)\n', (4761, 4769), False, 'import torch\n'), ((4774, 4792), 'torch.topk', 'torch.topk', (['o'], {'k': '(5)'}), '(o, k=5)\n', (4784, 4792), False, 'import torch\n'), ((4797, 4816), 'torch.topk', 'torch.topk', (['o'], {'k': '(10)'}), '(o, k=10)\n', (4807, 4816), False, 'import torch\n'), ((4901, 4919), 'torch.topk', 'torch.topk', (['o'], {'k': '(1)'}), '(o, k=1)\n', (4911, 4919), False, 'import torch\n'), ((4924, 4942), 'torch.topk', 'torch.topk', (['o'], {'k': '(5)'}), '(o, k=5)\n', (4934, 4942), False, 'import torch\n'), ((4947, 4966), 'torch.topk', 'torch.topk', (['o'], {'k': '(10)'}), '(o, k=10)\n', (4957, 4966), False, 'import torch\n')] |
import os
import tarfile
import numpy as np
import pandas as pd
from catalyst.data.bundles.core import download_without_progress
from catalyst.exchange.utils.exchange_utils import get_exchange_bundles_folder
EXCHANGE_NAMES = ['bitfinex', 'bittrex', 'poloniex', 'binance']
API_URL = 'http://data.enigma.co/api/v1'
def get_bcolz_chunk(exchange_name, symbol, data_frequency, period):
"""
Download and extract a bcolz bundle.
Parameters
----------
exchange_name: str
symbol: str
data_frequency: str
period: str
Returns
-------
str
Filename: bitfinex-daily-neo_eth-2017-10.tar.gz
"""
root = get_exchange_bundles_folder(exchange_name)
name = '{exchange}-{frequency}-{symbol}-{period}'.format(
exchange=exchange_name,
frequency=data_frequency,
symbol=symbol,
period=period
)
path = os.path.join(root, name)
if not os.path.isdir(path):
url = 'https://s3.amazonaws.com/enigmaco/catalyst-bundles/' \
'exchange-{exchange}/{name}.tar.gz'.format(
exchange=exchange_name,
name=name)
bytes = download_without_progress(url)
with tarfile.open('r', fileobj=bytes) as tar:
tar.extractall(path)
return path
def get_df_from_arrays(arrays, periods):
"""
A DataFrame from the specified OHCLV arrays.
Parameters
----------
arrays: Object
periods: DateTimeIndex
Returns
-------
DataFrame
"""
ohlcv = dict()
for index, field in enumerate(
['open', 'high', 'low', 'close', 'volume']):
ohlcv[field] = arrays[index].flatten()
df = pd.DataFrame(
data=ohlcv,
index=periods
)
return df
def range_in_bundle(asset, start_dt, end_dt, reader):
"""
Evaluate whether price data of an asset is included has been ingested in
the exchange bundle for the given date range.
Parameters
----------
asset: TradingPair
start_dt: datetime
end_dt: datetime
reader: BcolzBarMinuteReader
Returns
-------
bool
"""
has_data = True
dates = [start_dt, end_dt]
while dates and has_data:
try:
dt = dates.pop(0)
close = reader.get_value(asset.sid, dt, 'close')
if np.isnan(close):
has_data = False
except Exception:
has_data = False
return has_data
def get_assets(exchange, include_symbols, exclude_symbols):
"""
Get assets from an exchange, including or excluding the specified
symbols.
Parameters
----------
exchange: Exchange
include_symbols: str
exclude_symbols: str
Returns
-------
list[TradingPair]
"""
if include_symbols is not None:
include_symbols_list = include_symbols.split(',')
return exchange.get_assets(include_symbols_list)
else:
all_assets = exchange.get_assets()
if exclude_symbols is not None:
exclude_symbols_list = exclude_symbols.split(',')
assets = []
for asset in all_assets:
if asset.symbol not in exclude_symbols_list:
assets.append(asset)
return assets
else:
return all_assets
| [
"tarfile.open",
"catalyst.data.bundles.core.download_without_progress",
"os.path.join",
"catalyst.exchange.utils.exchange_utils.get_exchange_bundles_folder",
"os.path.isdir",
"numpy.isnan",
"pandas.DataFrame"
] | [((686, 728), 'catalyst.exchange.utils.exchange_utils.get_exchange_bundles_folder', 'get_exchange_bundles_folder', (['exchange_name'], {}), '(exchange_name)\n', (713, 728), False, 'from catalyst.exchange.utils.exchange_utils import get_exchange_bundles_folder\n'), ((926, 950), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (938, 950), False, 'import os\n'), ((1758, 1797), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ohlcv', 'index': 'periods'}), '(data=ohlcv, index=periods)\n', (1770, 1797), True, 'import pandas as pd\n'), ((965, 984), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (978, 984), False, 'import os\n'), ((1204, 1234), 'catalyst.data.bundles.core.download_without_progress', 'download_without_progress', (['url'], {}), '(url)\n', (1229, 1234), False, 'from catalyst.data.bundles.core import download_without_progress\n'), ((1249, 1281), 'tarfile.open', 'tarfile.open', (['"""r"""'], {'fileobj': 'bytes'}), "('r', fileobj=bytes)\n", (1261, 1281), False, 'import tarfile\n'), ((2433, 2448), 'numpy.isnan', 'np.isnan', (['close'], {}), '(close)\n', (2441, 2448), True, 'import numpy as np\n')] |
import numpy as np
def Ent_MS_Plus20201001(x, tau, m, r):
"""
(RCMSE, CMSE, MSE, MSFE) = RCMS_Ent( x, tau, m, r )
inputs - x, single column time seres
- tau, greatest scale factor
- m, length of vectors to be compared
- R, radius for accepting matches (as a proportion of the
standard deviation)
output - RCMSE, Refined Composite Multiscale Entropy
- CMSE, Composite Multiscale Entropy
- MSE, Multiscale Entropy
- MSFE, Multiscale Fuzzy Entropy
- GMSE, Generalized Multiscale Entropy
Remarks
- This code finds the Refined Composite Multiscale Sample Entropy,
Composite Multiscale Entropy, Multiscale Entropy, Multiscale Fuzzy
Entropy and Generalized Multiscale Entropy of a data series using the
methods described by - Wu, Shuen-De, et al. 2014. "Analysis of complex
time series using refined composite multiscale entropy." Physics
Letters A. 378, 1369-1374.
- Each of these methods calculates entropy at different scales. These
scales range from 1 to tau in increments of 1.
- The Complexity Index (CI) is not calculated by this code. Because the scales
are incremented by 1 the C is the summation of all the elements in each
array. For example the CI of MSE would be sum(MSE).
20170828 Created by <NAME>, <EMAIL>
20201001 Modified by <NAME>, <EMAIL>
- Modifed to calculate all scales in this single code instead of
needing to be in an external for loop.
"""
R = r*np.std(x)
N = len(x)
GMSE = np.zeros(tau, dtype=object)
MSE = np.zeros(tau, dtype=object)
MSFE = np.zeros(tau, dtype=object)
CMSE = np.zeros(tau, dtype=object)
RCMSE = np.zeros(tau, dtype=object)
for i in range(tau):
# Coarse-graining for GMSE
Ndivi = int(N/i) # defining this now because we use it a lot later on.
o2 = np.zeros((i, Ndivi))
for j in range(Ndivi):
for k in range(i):
try:
# NOTE: May have a one off issue.
o2[k,j] = np.var(x[(j-1)*i+k:j*i+k])
except:
o2[k,j] = np.nan
GMSE[i] = Samp_Ent(o2[0,:],m,r)
# Coarse-graining for MSE and derivatives
y_tau_kj = np.zeros((i,Ndivi))
for j in range(Ndivi):
for k in range(i):
try:
y_tau_kj[k,j] = 1/i*np.sum(x[(j-1)*i+k:j*i+k])
except:
y_tau_kj[k,j] = np.nan
# Multiscale Entropy (MSE)
MSE[i] = Samp_Ent(y_tau_kj[0, not np.isnan(y_tau_kj[0,:])],m,R)
#Multiscale Fuzzy Entropy (MFE)
MSFE[i] = Fuzzy_Ent(y_tau_kj[0, not np.isnan(y_tau_kj[0,:])],m,R,2)
# Composite Multiscale Entropy (CMSE)
CMSE[i] = 0
nm = np.zeros(i)
nm1 = np.zeros(i)
for k in range(i):
_, nm[k], nm1[k] = Samp_Ent(y_tau_kj[k, not np.isnan(y_tau_kj[k,:])],m,R)
CMSE[i] = CMSE[i]+1/i*-np.log(nm1[k]/nm[k])
# Refined Composite Multiscale Entropy (RCMSE)
n_m1_ktau = 1/i*np.sum(nm1)
n_m_ktau = 1/i*np.sum(nm)
RCMSE[i] = -np.log(n_m1_ktau/n_m_ktau)
return RCMSE, CMSE, MSE, MSFE, GMSE
def Samp_Ent(data, m, r):
"""
[SE,sum_nm,sum_nm1] = Samp_Ent(data,m,r)
This is a faster version of the previous code - Samp_En.m
inputs - data, single column time seres
- m, length of vectors to be compared
- R, radius for accepting matches (as a proportion of the
standard deviation)
output - SE, sample entropy
- sum_nm, total number of matches for vector length m
- sum_nm1, total number of matches for vector length m+1
Remarks
This code finds the sample entropy of a data series using the method
described by - <NAME>., <NAME>., 2000. "Physiological
time-series analysis using approximate entropy and sample entropy."
Am. J. Physiol. Heart Circ. Physiol. 278, H2039–H2049.
<NAME>, 2016
<NAME>, 2017 (Made count total number of matches for each vector length, necessary for CMSE and RCMSE)
"""
R = r * np.std(data)
N = len(data)
data = np.array(data)
dij = np.zeros((N-m,m+1))
dj = np.zeros((N-m,1))
dj1 = np.zeros((N-m,1))
Bm = np.zeros((N-m,1))
Am = np.zeros((N-m,1))
for i in range(N-m):
for k in range(m+1):
dij[:,k] = np.abs(data[k:N-m+k]-data[i+k])
dj = np.max(dij[:,0:m],axis=1)
dj1 = np.max(dij,axis=1)
d = np.where(dj <= R)
d1 = np.where(dj1 <= R)
nm = d[0].shape[0]
sum_nm = sum_nm + nm
Bm[i] = nm/(N-m)
nm1 = d1[0].shape[0]
sum_nm1 = sum_nm1 + nm1
Am[i] = nm1/(N-m)
Bmr = np.sum(Bm)/(N-m)
Amr = np.sum(Am)/(N-m)
return (-np.log(Amr/Bmr), sum_nm, sum_nm1)
def Fuzzy_Ent(series, dim, r, n):
"""
Function which computes the Fuzzy Entropy (FuzzyEn) of a time series. The
algorithm presented by Chen et al. at "Charactirization of surface EMG
signal based on fuzzy entropy" (DOI: 10.1109/TNSRE.2007.897025) has been
followed.
INPUT:
series: the time series.
dim: the embedding dimesion employed in the SampEn algorithm.
r: the width of the fuzzy exponential function.
n: the step of the fuzzy exponential function.
OUTPUT:
FuzzyEn: the FuzzyEn value.
PROJECT: Research Master in signal theory and bioengineering - University of Valladolid
DATE: 11/10/2014
VERSION: 1
AUTHOR: <NAME>
"""
# Checking the input parameters:
# Processing:
# Normalization of the input time series:
# series = (series-mean(series))/std(series);
N = len(series)
phi = np.zeros((1,2))
# Value of 'r' in case of not normalized time series:
r = r*np.std(series)
for j in range(0,2):
m = dim+j-1 # 'm' is the embbeding dimension used each iteration
# Pre-definition of the varialbes for computational efficiency:
patterns = np.zeros((m,N-m+1))
aux = np.zeros((1,N-m+1))
# First, we compose the patterns
# The columns of the matrix 'patterns' will be the (N-m+1) patterns of 'm' length:
if m == 1: # If the embedding dimension is 1, each sample is a pattern
patterns = series
else: # Otherwise, we build the patterns of length 'm':
for i in range(m):
patterns[i,:] = series[i:N-m+i]
# We substract the baseline of each pattern to itself:
for i in range(N-m+1):
patterns[:,i] = patterns[:,i] - (np.mean(patterns[:,i]))
# This loop goes over the columns of matrix 'patterns':
# NOTE: May need to swap out these regular python math functions for the Numpy functions
# With input from NumPy arrays, the python math functions may be slower than Numpy's
for i in range(N-m):
if m == 1:
dist = np.abs(patterns - np.tile(patterns[:,i],(1,N-m+1)))
else:
dist = np.max(np.abs(patterns - np.tile(patterns[:,i],(1,N-m+1))))
# Second, we compute the maximum absolut distance between the
# scalar components of the current pattern and the rest:
# Third, we get the degree of similarity:
simi = np.exp(((-1)*((dist)**n))/r)
# We average all the degrees of similarity for the current pattern:
aux[i] = (np.sum(simi)-1)/(N-m-1) # We substract 1 to the sum to avoid the self-comparison
# Finally, we get the 'phy' parameter as the as the mean of the first
# 'N-m' averaged drgees of similarity:
phi[j] = np.sum(aux)/(N-m)
# This is our FuzzyEn
return np.log(phi[0]) - np.log(phi[1])
| [
"numpy.abs",
"numpy.mean",
"numpy.tile",
"numpy.where",
"numpy.log",
"numpy.max",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.isnan",
"numpy.std",
"numpy.var"
] | [((1622, 1649), 'numpy.zeros', 'np.zeros', (['tau'], {'dtype': 'object'}), '(tau, dtype=object)\n', (1630, 1649), True, 'import numpy as np\n'), ((1660, 1687), 'numpy.zeros', 'np.zeros', (['tau'], {'dtype': 'object'}), '(tau, dtype=object)\n', (1668, 1687), True, 'import numpy as np\n'), ((1699, 1726), 'numpy.zeros', 'np.zeros', (['tau'], {'dtype': 'object'}), '(tau, dtype=object)\n', (1707, 1726), True, 'import numpy as np\n'), ((1738, 1765), 'numpy.zeros', 'np.zeros', (['tau'], {'dtype': 'object'}), '(tau, dtype=object)\n', (1746, 1765), True, 'import numpy as np\n'), ((1778, 1805), 'numpy.zeros', 'np.zeros', (['tau'], {'dtype': 'object'}), '(tau, dtype=object)\n', (1786, 1805), True, 'import numpy as np\n'), ((4353, 4367), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4361, 4367), True, 'import numpy as np\n'), ((4379, 4403), 'numpy.zeros', 'np.zeros', (['(N - m, m + 1)'], {}), '((N - m, m + 1))\n', (4387, 4403), True, 'import numpy as np\n'), ((4409, 4429), 'numpy.zeros', 'np.zeros', (['(N - m, 1)'], {}), '((N - m, 1))\n', (4417, 4429), True, 'import numpy as np\n'), ((4437, 4457), 'numpy.zeros', 'np.zeros', (['(N - m, 1)'], {}), '((N - m, 1))\n', (4445, 4457), True, 'import numpy as np\n'), ((4464, 4484), 'numpy.zeros', 'np.zeros', (['(N - m, 1)'], {}), '((N - m, 1))\n', (4472, 4484), True, 'import numpy as np\n'), ((4491, 4511), 'numpy.zeros', 'np.zeros', (['(N - m, 1)'], {}), '((N - m, 1))\n', (4499, 4511), True, 'import numpy as np\n'), ((5953, 5969), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (5961, 5969), True, 'import numpy as np\n'), ((1585, 1594), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1591, 1594), True, 'import numpy as np\n'), ((1959, 1979), 'numpy.zeros', 'np.zeros', (['(i, Ndivi)'], {}), '((i, Ndivi))\n', (1967, 1979), True, 'import numpy as np\n'), ((2353, 2373), 'numpy.zeros', 'np.zeros', (['(i, Ndivi)'], {}), '((i, Ndivi))\n', (2361, 2373), True, 'import numpy as np\n'), ((2920, 2931), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (2928, 2931), True, 'import numpy as np\n'), ((2946, 2957), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (2954, 2957), True, 'import numpy as np\n'), ((4310, 4322), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (4316, 4322), True, 'import numpy as np\n'), ((4633, 4660), 'numpy.max', 'np.max', (['dij[:, 0:m]'], {'axis': '(1)'}), '(dij[:, 0:m], axis=1)\n', (4639, 4660), True, 'import numpy as np\n'), ((4673, 4692), 'numpy.max', 'np.max', (['dij'], {'axis': '(1)'}), '(dij, axis=1)\n', (4679, 4692), True, 'import numpy as np\n'), ((4704, 4721), 'numpy.where', 'np.where', (['(dj <= R)'], {}), '(dj <= R)\n', (4712, 4721), True, 'import numpy as np\n'), ((4736, 4754), 'numpy.where', 'np.where', (['(dj1 <= R)'], {}), '(dj1 <= R)\n', (4744, 4754), True, 'import numpy as np\n'), ((4938, 4948), 'numpy.sum', 'np.sum', (['Bm'], {}), '(Bm)\n', (4944, 4948), True, 'import numpy as np\n'), ((4965, 4975), 'numpy.sum', 'np.sum', (['Am'], {}), '(Am)\n', (4971, 4975), True, 'import numpy as np\n'), ((6037, 6051), 'numpy.std', 'np.std', (['series'], {}), '(series)\n', (6043, 6051), True, 'import numpy as np\n'), ((6242, 6266), 'numpy.zeros', 'np.zeros', (['(m, N - m + 1)'], {}), '((m, N - m + 1))\n', (6250, 6266), True, 'import numpy as np\n'), ((6276, 6300), 'numpy.zeros', 'np.zeros', (['(1, N - m + 1)'], {}), '((1, N - m + 1))\n', (6284, 6300), True, 'import numpy as np\n'), ((7984, 7998), 'numpy.log', 'np.log', (['phi[0]'], {}), '(phi[0])\n', (7990, 7998), True, 'import numpy as np\n'), ((8001, 8015), 'numpy.log', 'np.log', (['phi[1]'], {}), '(phi[1])\n', (8007, 8015), True, 'import numpy as np\n'), ((3215, 3226), 'numpy.sum', 'np.sum', (['nm1'], {}), '(nm1)\n', (3221, 3226), True, 'import numpy as np\n'), ((3250, 3260), 'numpy.sum', 'np.sum', (['nm'], {}), '(nm)\n', (3256, 3260), True, 'import numpy as np\n'), ((3281, 3309), 'numpy.log', 'np.log', (['(n_m1_ktau / n_m_ktau)'], {}), '(n_m1_ktau / n_m_ktau)\n', (3287, 3309), True, 'import numpy as np\n'), ((4587, 4626), 'numpy.abs', 'np.abs', (['(data[k:N - m + k] - data[i + k])'], {}), '(data[k:N - m + k] - data[i + k])\n', (4593, 4626), True, 'import numpy as np\n'), ((4996, 5013), 'numpy.log', 'np.log', (['(Amr / Bmr)'], {}), '(Amr / Bmr)\n', (5002, 5013), True, 'import numpy as np\n'), ((7565, 7591), 'numpy.exp', 'np.exp', (['(-1 * dist ** n / r)'], {}), '(-1 * dist ** n / r)\n', (7571, 7591), True, 'import numpy as np\n'), ((7928, 7939), 'numpy.sum', 'np.sum', (['aux'], {}), '(aux)\n', (7934, 7939), True, 'import numpy as np\n'), ((6828, 6851), 'numpy.mean', 'np.mean', (['patterns[:, i]'], {}), '(patterns[:, i])\n', (6835, 6851), True, 'import numpy as np\n'), ((2147, 2183), 'numpy.var', 'np.var', (['x[(j - 1) * i + k:j * i + k]'], {}), '(x[(j - 1) * i + k:j * i + k])\n', (2153, 2183), True, 'import numpy as np\n'), ((7696, 7708), 'numpy.sum', 'np.sum', (['simi'], {}), '(simi)\n', (7702, 7708), True, 'import numpy as np\n'), ((2496, 2532), 'numpy.sum', 'np.sum', (['x[(j - 1) * i + k:j * i + k]'], {}), '(x[(j - 1) * i + k:j * i + k])\n', (2502, 2532), True, 'import numpy as np\n'), ((2676, 2700), 'numpy.isnan', 'np.isnan', (['y_tau_kj[0, :]'], {}), '(y_tau_kj[0, :])\n', (2684, 2700), True, 'import numpy as np\n'), ((2799, 2823), 'numpy.isnan', 'np.isnan', (['y_tau_kj[0, :]'], {}), '(y_tau_kj[0, :])\n', (2807, 2823), True, 'import numpy as np\n'), ((3106, 3128), 'numpy.log', 'np.log', (['(nm1[k] / nm[k])'], {}), '(nm1[k] / nm[k])\n', (3112, 3128), True, 'import numpy as np\n'), ((7214, 7253), 'numpy.tile', 'np.tile', (['patterns[:, i]', '(1, N - m + 1)'], {}), '(patterns[:, i], (1, N - m + 1))\n', (7221, 7253), True, 'import numpy as np\n'), ((3041, 3065), 'numpy.isnan', 'np.isnan', (['y_tau_kj[k, :]'], {}), '(y_tau_kj[k, :])\n', (3049, 3065), True, 'import numpy as np\n'), ((7314, 7353), 'numpy.tile', 'np.tile', (['patterns[:, i]', '(1, N - m + 1)'], {}), '(patterns[:, i], (1, N - m + 1))\n', (7321, 7353), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Following along with: https://www.learnopencv.com/pytorch-for-beginners-semantic-segmentation-using-torchvision/
# Generated by https://traingenerator.jrieke.com/
# Before running, install required packages:
# pip install numpy torch torchvision pytorch-ignite
from pathlib import Path
import shutil
import urllib
import zipfile
import numpy as np
import torch
from torch import optim, nn
from torch.utils.data import DataLoader, TensorDataset
from torchvision import models, datasets, transforms
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
DATA_DIR = "../data/external/image-data"
# ----------------------------------- Setup -----------------------------------
# INSERT YOUR DATA HERE
# Expected format: One folder per class, e.g.
# train
# --- dogs
# | +-- lassie.jpg
# | +-- komissar-rex.png
# --- cats
# | +-- garfield.png
# | +-- smelly-cat.png
#
# Example: https://github.com/jrieke/traingenerator/tree/main/data/image-data
example_url = "https://github.com/jrieke/traingenerator/raw/main/data/fake-image-data.zip"
train_data = DATA_DIR # required
val_data = DATA_DIR # optional
test_data = None # optional
use_cuda = torch.cuda.is_available()
def download_data():
"""Download data if needed
"""
# COMMENT THIS OUT IF YOU USE YOUR OWN DATA.
# Download example data into ./data/image-data (4 image files, 2 for "dog", 2 for "cat").
dpath = Path(DATA_DIR)
if not (dpath.exists()):
zip_path, _ = urllib.request.urlretrieve(example_url)
with zipfile.ZipFile(zip_path, "r") as f:
f.extractall(DATA_DIR)
# Manual cleanup
osx_junkdir = (dpath / "__MACOSX")
if osx_junkdir.exists():
shutil.rmtree(osx_junkdir)
def preprocess(data, name, batch_size):
"""Preprocess data
"""
if data is None: # val/test can be empty
return None
# Read image files to pytorch dataset.
transform = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset = datasets.ImageFolder(data, transform=transform)
# Wrap in data loader.
if use_cuda:
kwargs = {"pin_memory": True, "num_workers": 1}
else:
kwargs = {}
loader = DataLoader(dataset, batch_size=batch_size, shuffle=(name=="train"), **kwargs)
return loader
def log_results(name, metrics, epoch):
print(
f"{name + ':':6} loss: {metrics['loss']:.3f}, "
f"accuracy: {metrics['accuracy']:.3f}"
)
def main():
"""Main entry point
"""
download_data()
# Set up hyperparameters.
lr = 0.001
batch_size = 128
num_epochs = 3
# Set up logging.
print_every = 1 # batches
# Set up GPU.
device = torch.device("cuda" if use_cuda else "cpu")
# ------------------------------- Preprocessing -------------------------------
train_loader = preprocess(train_data, "train", batch_size)
val_loader = preprocess(val_data, "val", batch_size)
test_loader = preprocess(test_data, "test", batch_size)
# ----------------------------------- Model -----------------------------------
# Set up model, loss, optimizer.
# Note: putting model into eval mode right away.
model = models.segmentation.fcn_resnet101(pretrained=True).eval()
model = models.segmentation.deeplabv3_resnet101(pretrained=True).eval()
# model = model.to(device)
# loss_func = nn.CrossEntropyLoss()
# optimizer = optim.Adam(model.parameters(), lr=lr)
# Our test image
cat = '/home/aardvark/best_cat.jpg'
dog = '/home/aardvark/dog.jpg'
dishwasher = '/home/aardvark/dishwasher.jpg'
dog_park = '../data/pexels-photo-1485799.jpeg'
man = '../data/pexels-photo-5648380.jpeg'
family_in_masks = '../data/pexels-photo-4127449.jpeg'
woman_in_supermarket = '../data/pexels-photo-4177708.jpeg'
img = Image.open(woman_in_supermarket)
plt.imshow(img)
plt.show()
trf = transforms.Compose([transforms.Resize(256),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])])
inp = trf(img).unsqueeze(0)
# Now to do a forward pass
out = model(inp)['out']
print(out.shape)
om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()
print(om.shape)
print(np.unique(om))
# Define helper function
def decode_segmap(image, nc=21):
"""Decode segmentation map as appropriate
"""
label_colors = np.array([(0, 0, 0), # 0=background,
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
(128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
(0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),
# 11=dining table, 12=dog, 13=horse, 14=motorbike, 15=person
(192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
(0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128)
])
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, nc):
idx = image == l
r[idx] = label_colors[l, 0]
g[idx] = label_colors[l, 1]
b[idx] = label_colors[l, 2]
rgb = np.stack([r, g, b], axis=2)
return rgb
rgb = decode_segmap(om)
plt.imshow(rgb)
plt.show()
# # --------------------------------- Training ----------------------------------
# # Set up pytorch-ignite trainer and evaluator.
# trainer = create_supervised_trainer(
# model,
# optimizer,
# loss_func,
# device=device,
# )
# metrics = {
# "accuracy": Accuracy(),
# "loss": Loss(loss_func),
# }
# evaluator = create_supervised_evaluator(
# model, metrics=metrics, device=device
# )
# @trainer.on(Events.ITERATION_COMPLETED(every=print_every))
# def log_batch(trainer):
# batch = (trainer.state.iteration - 1) % trainer.state.epoch_length + 1
# print(
# f"Epoch {trainer.state.epoch} / {num_epochs}, "
# f"batch {batch} / {trainer.state.epoch_length}: "
# f"loss: {trainer.state.output:.3f}"
# )
# @trainer.on(Events.EPOCH_COMPLETED)
# def log_epoch(trainer):
# print(f"Epoch {trainer.state.epoch} / {num_epochs} average results: ")
# # Train data.
# evaluator.run(train_loader)
# log_results("train", evaluator.state.metrics, trainer.state.epoch)
# # Val data.
# if val_loader:
# evaluator.run(val_loader)
# log_results("val", evaluator.state.metrics, trainer.state.epoch)
# # Test data.
# if test_loader:
# evaluator.run(test_loader)
# log_results("test", evaluator.state.metrics, trainer.state.epoch)
# print()
# print("-" * 80)
# print()
# # Start training.
# trainer.run(train_loader, max_epochs=num_epochs)
if __name__ == '__main__':
main()
| [
"zipfile.ZipFile",
"torchvision.models.segmentation.fcn_resnet101",
"torch.utils.data.DataLoader",
"numpy.array",
"torch.cuda.is_available",
"matplotlib.pyplot.imshow",
"pathlib.Path",
"urllib.request.urlretrieve",
"torchvision.datasets.ImageFolder",
"numpy.stack",
"torchvision.transforms.ToTens... | [((1198, 1223), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1221, 1223), False, 'import torch\n'), ((1440, 1454), 'pathlib.Path', 'Path', (['DATA_DIR'], {}), '(DATA_DIR)\n', (1444, 1454), False, 'from pathlib import Path\n'), ((2147, 2194), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['data'], {'transform': 'transform'}), '(data, transform=transform)\n', (2167, 2194), False, 'from torchvision import models, datasets, transforms\n'), ((2339, 2416), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': "(name == 'train')"}), "(dataset, batch_size=batch_size, shuffle=name == 'train', **kwargs)\n", (2349, 2416), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((2832, 2875), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2844, 2875), False, 'import torch\n'), ((3963, 3995), 'PIL.Image.open', 'Image.open', (['woman_in_supermarket'], {}), '(woman_in_supermarket)\n', (3973, 3995), False, 'from PIL import Image\n'), ((4001, 4016), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4011, 4016), True, 'import matplotlib.pyplot as plt\n'), ((4021, 4031), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4029, 4031), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6014), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb'], {}), '(rgb)\n', (6009, 6014), True, 'import matplotlib.pyplot as plt\n'), ((6019, 6029), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6027, 6029), True, 'import matplotlib.pyplot as plt\n'), ((1506, 1545), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['example_url'], {}), '(example_url)\n', (1532, 1545), False, 'import urllib\n'), ((1728, 1754), 'shutil.rmtree', 'shutil.rmtree', (['osx_junkdir'], {}), '(osx_junkdir)\n', (1741, 1754), False, 'import shutil\n'), ((4575, 4588), 'numpy.unique', 'np.unique', (['om'], {}), '(om)\n', (4584, 4588), True, 'import numpy as np\n'), ((4742, 5066), 'numpy.array', 'np.array', (['[(0, 0, 0), (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, \n 128), (0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128,\n 0), (192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, \n 128, 128), (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64,\n 128)]'], {}), '([(0, 0, 0), (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128),\n (128, 0, 128), (0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0),\n (64, 128, 0), (192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128\n ), (192, 128, 128), (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0\n ), (0, 64, 128)])\n', (4750, 5066), True, 'import numpy as np\n'), ((5919, 5946), 'numpy.stack', 'np.stack', (['[r, g, b]'], {'axis': '(2)'}), '([r, g, b], axis=2)\n', (5927, 5946), True, 'import numpy as np\n'), ((1559, 1589), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (1574, 1589), False, 'import zipfile\n'), ((1983, 2009), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (2004, 2009), False, 'from torchvision import models, datasets, transforms\n'), ((2019, 2040), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2038, 2040), False, 'from torchvision import models, datasets, transforms\n'), ((2050, 2125), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2070, 2125), False, 'from torchvision import models, datasets, transforms\n'), ((3328, 3378), 'torchvision.models.segmentation.fcn_resnet101', 'models.segmentation.fcn_resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3361, 3378), False, 'from torchvision import models, datasets, transforms\n'), ((3398, 3454), 'torchvision.models.segmentation.deeplabv3_resnet101', 'models.segmentation.deeplabv3_resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3437, 3454), False, 'from torchvision import models, datasets, transforms\n'), ((4063, 4085), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (4080, 4085), False, 'from torchvision import models, datasets, transforms\n'), ((4177, 4198), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4196, 4198), False, 'from torchvision import models, datasets, transforms\n'), ((4230, 4305), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (4250, 4305), False, 'from torchvision import models, datasets, transforms\n'), ((5585, 5605), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (5598, 5605), True, 'import numpy as np\n'), ((5635, 5655), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (5648, 5655), True, 'import numpy as np\n'), ((5685, 5705), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (5698, 5705), True, 'import numpy as np\n')] |
'''
Description: multi object tracking
Author: <EMAIL>
FilePath: /obj_evaluation/measure_judge/common/mot.py
Date: 2021-09-24 19:37:53
'''
import numpy as np
class Munkres:
def __init__(self, cost: list, inv_eps=1000) -> None:
"""[summary]
https://brc2.com/the-algorithm-workshop/
Args:
cost (list): [二维权值方阵]
"""
self.cost = np.array(cost) * inv_eps
self.cost.astype(np.int32)
self.run_cost = self.cost
self.rows = len(cost)
self.cols = len(cost[0])
self.step = 1
self.running = True
assert(self.rows == self.cols)
self.mp = {
1: self.step_one,
2: self.step_two,
3: self.step_three,
4: self.step_four,
5: self.step_five,
6: self.step_six,
7: self.step_seven
}
self.mask = np.zeros((self.rows, self.cols))
self.row_cover = np.zeros(self.rows)
self.col_cover = np.zeros(self.cols)
self.paths = []
def step_one(self):
"""[summary]
For each row of the matrix, find the smallest element and subtract it from every element in its row. Go to Step 2
"""
for i in range(self.rows):
self.run_cost[i] -= min(self.run_cost[i])
self.step = 2
def step_two(self):
"""[summary]
Find a zero (Z) in the resulting matrix. If there is no starred zero in its row or column, star Z. Repeat for each element in the matrix. Go to Step 3
"""
for i in range(self.rows):
for j in range(self.cols):
if self.run_cost[i][j] == 0 and self.row_cover[i] == 0 and self.col_cover[j] == 0:
self.mask[i][j] = 1
self.row_cover[i] = 1
self.col_cover[j] = 1
for i in range(self.rows):
self.row_cover[i] = 0
for j in range(self.cols):
self.col_cover[j] = 0
self.step = 3
def step_three(self):
"""[summary]
Cover each column containing a starred zero.
If K columns are covered, the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
for i in range(self.rows):
for j in range(self.cols):
if self.mask[i][j] == 1:
self.col_cover[j] = 1
colcount = np.sum(self.col_cover)
if colcount >= self.rows or colcount >= self.cols:
self.step = 7
else:
self.step = 4
def __find_a_zero(self):
"""[summary]
Find a noncovered zero
Returns:
[type]: [row, col , default -1]
"""
r, c = -1, -1
for i in range(self.rows):
for j in range(self.cols):
if self.run_cost[i][j] == 0 and self.row_cover[i] == 0 and self.col_cover[j] == 0:
return i, j
return r, c
def __find_star_in_row(self, row):
"""[summary]
Args:
row ([type]): [row]
Returns:
[int]: [find stared col in row, default -1]
"""
for j in range(self.cols):
if self.mask[row][j] == 1:
return j
return -1
def step_four(self):
"""[summary]
Find a noncovered zero and prime it. If there is no starred zero in the row containing this primed zero, Go to Step 5.
Otherwise, cover this row and uncover the column containing the starred zero.
Continue in this manner until there are no uncovered zeros left. Save the smallest uncovered value and Go to Step 6.
"""
done = False
while not done:
noncover_r, noncover_c = self.__find_a_zero()
if noncover_r == -1:
done = True
self.step = 6
else:
self.mask[noncover_r][noncover_c] = 2
star_col = self.__find_star_in_row(noncover_r)
if star_col != -1:
self.row_cover[noncover_r] = 1
self.col_cover[star_col] = 0
else:
done = True
self.step = 5
self.paths.append((noncover_r, noncover_c))
def __find_star_in_col(self, col):
for i in range(self.rows):
if self.mask[i][col] == 1:
return i
return -1
def __find_prime_in_row(self, row):
"""[summary]
Args:
col ([type]): [col]
Returns:
[int]: [find prime row in col, default -1]
"""
for j in range(self.cols):
if self.mask[row][j] == 2:
return j
return -1
def step_five(self):
"""[summary]
Construct a series of alternating primed and starred zeros as follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any). Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred zero in its column.
Unstar each starred zero of the series, star each primed zero of the series, erase all primes and uncover every line in the matrix. Return to Step 3
"""
done = False
while not done:
star_r = self.__find_star_in_col(self.paths[-1][1])
if star_r > -1:
self.paths.append( (star_r, self.paths[-1][1]) )
else:
done = True
if not done:
prime_c = self.__find_prime_in_row( self.paths[-1][0] )
self.paths.append( (self.paths[-1][0], prime_c))
# argument path
for i, j in self.paths:
if self.mask[i][j] == 1:
self.mask[i][j] = 0
else:
self.mask[i][j] = 1
# clear covers
for i in range(self.rows):
self.row_cover[i] = 0
for j in range(self.cols):
self.col_cover[j] = 0
# erase prime
for i in range(self.rows):
for j in range(self.cols):
if self.mask[i][j] == 2:
self.mask[i][j] = 0
self.paths.clear()
self.step = 3
def step_six(self):
"""[summary]
Add the value found in Step 4 to every element of each covered row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines
"""
minval = 1 << 31
for i in range(self.rows):
for j in range(self.cols):
if self.row_cover[i] == 0 and self.col_cover[j] == 0:
minval = min(self.run_cost[i][j], minval)
for i in range(self.rows):
for j in range(self.cols):
if self.row_cover[i] == 1:
self.run_cost[i][j] += minval
if self.col_cover[j] == 0:
self.run_cost[i][j] -= minval
self.step = 4
def step_seven(self):
# print("done !")
# print(self.run_cost)
# print(self.mask)
self.running = False
def run(self):
while self.running:
# print(self.step)
self.mp[self.step]()
# print(self.run_cost)
# print("")
def get_result(self):
res = []
vis = [0] * self.cols
for i in range(self.rows):
for j in range(self.cols):
if self.mask[i][j] == 1 and vis[j] == 0:
res.append(j)
vis[j] = 1
break
if len(res) != self.rows:
print("algorithm error ...")
return None
return res
if __name__ == "__main__":
cost = [ [1.2, 1., 1.], [1., 1.2, 1.], [1., 1., 1.2]]
mkr = Munkres(cost)
mkr.run()
print(mkr.get_result())
| [
"numpy.array",
"numpy.sum",
"numpy.zeros"
] | [((898, 930), 'numpy.zeros', 'np.zeros', (['(self.rows, self.cols)'], {}), '((self.rows, self.cols))\n', (906, 930), True, 'import numpy as np\n'), ((956, 975), 'numpy.zeros', 'np.zeros', (['self.rows'], {}), '(self.rows)\n', (964, 975), True, 'import numpy as np\n'), ((1001, 1020), 'numpy.zeros', 'np.zeros', (['self.cols'], {}), '(self.cols)\n', (1009, 1020), True, 'import numpy as np\n'), ((2482, 2504), 'numpy.sum', 'np.sum', (['self.col_cover'], {}), '(self.col_cover)\n', (2488, 2504), True, 'import numpy as np\n'), ((385, 399), 'numpy.array', 'np.array', (['cost'], {}), '(cost)\n', (393, 399), True, 'import numpy as np\n')] |
"""
Module: utils.c3.c3s1_post_processing
Author: <NAME>
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the FMP Notebooks (https://www.audiolabs-erlangen.de/FMP)
"""
import numpy as np
from scipy import signal
from numba import jit
@jit(nopython=True)
def log_compression(v, gamma=1):
"""Logarithmically compresses a value or array
Notebook: C3/C3S1_LogCompression.ipynb
Args:
v: Value or array
gamma: Compression factor
Returns:
v_compressed: Compressed value or array
"""
return np.log(1 + gamma * v)
@jit(nopython=True)
def normalize_feature_sequence(X, norm='2', threshold=0.0001, v=None):
"""Normalizes the columns of a feature sequence
Notebook: C3/C3S1_FeatureNormalization.ipynb
Args:
X: Feature sequence
norm: The norm to be applied. '1', '2', 'max' or 'z'
threshold: An threshold below which the vector `v` used instead of normalization
v: Used instead of normalization below `threshold`. If None, uses unit vector for given norm
Returns:
X_norm: Normalized feature sequence
"""
assert norm in ['1', '2', 'max', 'z']
K, N = X.shape
X_norm = np.zeros((K, N))
if norm == '1':
if v is None:
v = np.ones(K, dtype=np.float64) / K
for n in range(N):
s = np.sum(np.abs(X[:, n]))
if s > threshold:
X_norm[:, n] = X[:, n] / s
else:
X_norm[:, n] = v
if norm == '2':
if v is None:
v = np.ones(K, dtype=np.float64) / np.sqrt(K)
for n in range(N):
s = np.sqrt(np.sum(X[:, n] ** 2))
if s > threshold:
X_norm[:, n] = X[:, n] / s
else:
X_norm[:, n] = v
if norm == 'max':
if v is None:
v = np.ones(K, dtype=np.float64)
for n in range(N):
s = np.max(np.abs(X[:, n]))
if s > threshold:
X_norm[:, n] = X[:, n] / s
else:
X_norm[:, n] = v
if norm == 'z':
if v is None:
v = np.zeros(K, dtype=np.float64)
for n in range(N):
mu = np.sum(X[:, n]) / K
sigma = np.sqrt(np.sum((X[:, n] - mu) ** 2) / (K - 1))
if sigma > threshold:
X_norm[:, n] = (X[:, n] - mu) / sigma
else:
X_norm[:, n] = v
return X_norm
def smooth_downsample_feature_sequence(X, Fs, filt_len=41, down_sampling=10, w_type='boxcar'):
"""Smoothes and downsamples a feature sequence. Smoothing is achieved by convolution with a filter kernel
Notebook: C3/C3S1_FeatureSmoothing.ipynb
Args:
X: Feature sequence
Fs: Frame rate of `X`
filt_len: Length of smoothing filter
down_sampling: Downsampling factor
w_type: Window type of smoothing filter
Returns:
X_smooth: Smoothed and downsampled feature sequence
Fs_feature: Frame rate of `X_smooth`
"""
filt_kernel = np.expand_dims(signal.get_window(w_type, filt_len), axis=0)
X_smooth = signal.convolve(X, filt_kernel, mode='same') / filt_len
X_smooth = X_smooth[:, ::down_sampling]
Fs_feature = Fs / down_sampling
return X_smooth, Fs_feature
def median_downsample_feature_sequence(X, Fs, filt_len=41, down_sampling=10):
"""Smoothes and downsamples a feature sequence. Smoothing is achieved by median filtering
Notebook: C3/C3S1_FeatureSmoothing.ipynb
Args:
X: Feature sequence
Fs: Frame rate of `X`
filt_len: Length of smoothing filter
down_sampling: Downsampling factor
Returns:
X_smooth: Smoothed and downsampled feature sequence
Fs_feature: Frame rate of `X_smooth`
"""
assert filt_len % 2 == 1 # L needs to be odd
filt_len = [1, filt_len]
X_smooth = signal.medfilt2d(X, filt_len)
X_smooth = X_smooth[:, ::down_sampling]
Fs_feature = Fs / down_sampling
return X_smooth, Fs_feature
| [
"numpy.abs",
"scipy.signal.medfilt2d",
"scipy.signal.convolve",
"numpy.sqrt",
"numpy.ones",
"numpy.log",
"numpy.sum",
"numpy.zeros",
"numba.jit",
"scipy.signal.get_window"
] | [((286, 304), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (289, 304), False, 'from numba import jit\n'), ((626, 644), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (629, 644), False, 'from numba import jit\n'), ((598, 619), 'numpy.log', 'np.log', (['(1 + gamma * v)'], {}), '(1 + gamma * v)\n', (604, 619), True, 'import numpy as np\n'), ((1267, 1283), 'numpy.zeros', 'np.zeros', (['(K, N)'], {}), '((K, N))\n', (1275, 1283), True, 'import numpy as np\n'), ((4054, 4083), 'scipy.signal.medfilt2d', 'signal.medfilt2d', (['X', 'filt_len'], {}), '(X, filt_len)\n', (4070, 4083), False, 'from scipy import signal\n'), ((3204, 3239), 'scipy.signal.get_window', 'signal.get_window', (['w_type', 'filt_len'], {}), '(w_type, filt_len)\n', (3221, 3239), False, 'from scipy import signal\n'), ((3265, 3309), 'scipy.signal.convolve', 'signal.convolve', (['X', 'filt_kernel'], {'mode': '"""same"""'}), "(X, filt_kernel, mode='same')\n", (3280, 3309), False, 'from scipy import signal\n'), ((1950, 1978), 'numpy.ones', 'np.ones', (['K'], {'dtype': 'np.float64'}), '(K, dtype=np.float64)\n', (1957, 1978), True, 'import numpy as np\n'), ((2239, 2268), 'numpy.zeros', 'np.zeros', (['K'], {'dtype': 'np.float64'}), '(K, dtype=np.float64)\n', (2247, 2268), True, 'import numpy as np\n'), ((1347, 1375), 'numpy.ones', 'np.ones', (['K'], {'dtype': 'np.float64'}), '(K, dtype=np.float64)\n', (1354, 1375), True, 'import numpy as np\n'), ((1432, 1447), 'numpy.abs', 'np.abs', (['X[:, n]'], {}), '(X[:, n])\n', (1438, 1447), True, 'import numpy as np\n'), ((1640, 1668), 'numpy.ones', 'np.ones', (['K'], {'dtype': 'np.float64'}), '(K, dtype=np.float64)\n', (1647, 1668), True, 'import numpy as np\n'), ((1671, 1681), 'numpy.sqrt', 'np.sqrt', (['K'], {}), '(K)\n', (1678, 1681), True, 'import numpy as np\n'), ((1735, 1755), 'numpy.sum', 'np.sum', (['(X[:, n] ** 2)'], {}), '(X[:, n] ** 2)\n', (1741, 1755), True, 'import numpy as np\n'), ((2031, 2046), 'numpy.abs', 'np.abs', (['X[:, n]'], {}), '(X[:, n])\n', (2037, 2046), True, 'import numpy as np\n'), ((2315, 2330), 'numpy.sum', 'np.sum', (['X[:, n]'], {}), '(X[:, n])\n', (2321, 2330), True, 'import numpy as np\n'), ((2364, 2391), 'numpy.sum', 'np.sum', (['((X[:, n] - mu) ** 2)'], {}), '((X[:, n] - mu) ** 2)\n', (2370, 2391), True, 'import numpy as np\n')] |
from ir_sim.world import obs_circle
from math import pi, cos, sin
import numpy as np
from collections import namedtuple
from ir_sim.util import collision_cir_cir, collision_cir_matrix, collision_cir_seg, reciprocal_vel_obs
class env_obs_cir:
def __init__(self, obs_cir_class=obs_circle, obs_model='static', obs_cir_num=1, dist_mode = 0, step_time=0.1, components=[], **kwargs):
self.obs_cir_class = obs_cir_class
self.obs_num = obs_cir_num
self.dist_mode = dist_mode
self.obs_cir_list = []
self.components = components
self.obs_model = obs_model # 'static' 'dynamic'
self.obs_square = kwargs.get('obs_square', [0, 0, 10, 10])
self.obs_interval = kwargs.get('obs_interval', 1)
if self.obs_num > 0:
if self.dist_mode == 0:
assert 'obs_radius_list' and 'obs_state_list' in kwargs.keys()
obs_radius_list = kwargs['obs_radius_list']
obs_state_list = kwargs['obs_state_list']
obs_goal_list = kwargs.get('obs_goal_list', [0]*self.obs_num)
if len(obs_radius_list) < self.obs_num:
temp_end = obs_radius_list[-1]
obs_radius_list += [temp_end for i in range(self.obs_num - len(obs_radius_list))]
else:
obs_radius_list = kwargs.get('obs_radius_list', [0.2])
obs_state_list, obs_goal_list, obs_radius_list = self.obs_state_dis(obs_init_mode=self.dist_mode, radius=obs_radius_list[0], **kwargs)
if self.obs_model == 'dynamic':
self.rvo = reciprocal_vel_obs(vxmax = 1.5, vymax = 1.5, **kwargs)
for i in range(self.obs_num):
obs_cir = self.obs_cir_class(id=i, state=obs_state_list[i], radius=obs_radius_list[i], step_time=step_time, obs_model=obs_model, goal=obs_goal_list[i], **kwargs)
self.obs_cir_list.append(obs_cir)
def step_wander(self, **kwargs):
ts = self.obs_total_states()
rvo_vel_list = list(map(lambda agent_s: self.rvo.cal_vel(agent_s, nei_state_list=ts[1]), ts[0]))
arrive_flag = False
for i, obs_cir in enumerate(self.obs_cir_list):
obs_cir.move_forward(rvo_vel_list[i], **kwargs)
if obs_cir.arrive():
arrive_flag = True
if arrive_flag:
goal_list = self.random_goal(**kwargs)
for i, obs_cir in enumerate(self.obs_cir_list):
obs_cir.goal = goal_list[i]
def obs_state_dis(self, obs_init_mode=1, radius=0.2, circular=[5, 5, 4], min_radius=0.2, max_radius=1, **kwargs):
# init_mode: 1 single row
# 2 random
# 3 circular
# square area: x_min, y_min, x_max, y_max
# circular area: x, y, radius
self.random_bear = kwargs.get('random_bear', False)
random_radius = kwargs.get('random_radius', False)
num = self.obs_num
state_list, goal_list = [], []
if obs_init_mode == 1:
# single row
state_list = [np.array([ [i * self.obs_interval], [self.obs_square[1]]]) for i in range(int(self.obs_square[0]), int(self.obs_square[0])+num)]
goal_list = [np.array([ [i * self.obs_interval], [self.obs_square[3]] ]) for i in range(int(self.obs_square[0]), int(self.obs_square[0])+num)]
goal_list.reverse()
elif obs_init_mode == 2:
# random
state_list, goal_list = self.random_start_goal(**kwargs)
elif obs_init_mode == 3:
# circular
circle_point = np.array(circular)
theta_step = 2*pi / num
theta = 0
while theta < 2*pi:
state = circle_point + np.array([ cos(theta) * circular[2], sin(theta) * circular[2], theta + pi- circular[2] ])
goal = circle_point[0:2] + np.array([cos(theta+pi), sin(theta+pi)]) * circular[2]
theta = theta + theta_step
state_list.append(state[:, np.newaxis])
goal_list.append(goal[:, np.newaxis])
if random_radius:
radius_list = np.random.uniform(low = min_radius, high = max_radius, size = (num,))
else:
radius_list = [radius for i in range(num)]
return state_list, goal_list, radius_list
def random_start_goal(self, **kwargs):
num = self.obs_num
random_list = []
goal_list = []
while len(random_list) < 2*num:
new_point = np.random.uniform(low = self.obs_square[0:2], high = self.obs_square[2:4], size = (1, 2)).T
if not self.check_collision(new_point, random_list, self.components, self.obs_interval):
random_list.append(new_point)
start_list = random_list[0 : num]
goal_list = random_list[num : 2 * num]
return start_list, goal_list
def random_goal(self, **kwargs):
num = self.obs_num
random_list = []
while len(random_list) < num:
new_point = np.random.uniform(low = self.obs_square[0:2], high = self.obs_square[2:4], size = (1, 2)).T
if not self.check_collision(new_point, random_list, self.components, self.obs_interval):
random_list.append(new_point)
return random_list
def check_collision(self, check_point, point_list, components, range):
circle = namedtuple('circle', 'x y r')
point = namedtuple('point', 'x y')
self_circle = circle(check_point[0, 0], check_point[1, 0], range/2)
# check collision with map
if collision_cir_matrix(self_circle, components['map_matrix'], components['xy_reso'], components['offset']):
return True
# check collision with line obstacles
for line in components['obs_lines'].obs_line_states:
segment = [point(line[0], line[1]), point(line[2], line[3])]
if collision_cir_seg(self_circle, segment):
return True
for point in point_list:
if self.distance(check_point, point) < range:
return True
return False
def distance(self, point1, point2):
diff = point2[0:2] - point1[0:2]
return np.linalg.norm(diff)
def obs_total_states(self):
agent_state_list = list(map(lambda a: np.squeeze( a.omni_state()), self.obs_cir_list))
nei_state_list = list(map(lambda a: np.squeeze( a.omni_obs_state()), self.obs_cir_list))
return agent_state_list, nei_state_list | [
"collections.namedtuple",
"ir_sim.util.collision_cir_matrix",
"numpy.linalg.norm",
"ir_sim.util.reciprocal_vel_obs",
"math.cos",
"numpy.array",
"ir_sim.util.collision_cir_seg",
"numpy.random.uniform",
"math.sin"
] | [((5447, 5476), 'collections.namedtuple', 'namedtuple', (['"""circle"""', '"""x y r"""'], {}), "('circle', 'x y r')\n", (5457, 5476), False, 'from collections import namedtuple\n'), ((5493, 5519), 'collections.namedtuple', 'namedtuple', (['"""point"""', '"""x y"""'], {}), "('point', 'x y')\n", (5503, 5519), False, 'from collections import namedtuple\n'), ((5644, 5753), 'ir_sim.util.collision_cir_matrix', 'collision_cir_matrix', (['self_circle', "components['map_matrix']", "components['xy_reso']", "components['offset']"], {}), "(self_circle, components['map_matrix'], components[\n 'xy_reso'], components['offset'])\n", (5664, 5753), False, 'from ir_sim.util import collision_cir_cir, collision_cir_matrix, collision_cir_seg, reciprocal_vel_obs\n'), ((6298, 6318), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {}), '(diff)\n', (6312, 6318), True, 'import numpy as np\n'), ((1618, 1668), 'ir_sim.util.reciprocal_vel_obs', 'reciprocal_vel_obs', ([], {'vxmax': '(1.5)', 'vymax': '(1.5)'}), '(vxmax=1.5, vymax=1.5, **kwargs)\n', (1636, 1668), False, 'from ir_sim.util import collision_cir_cir, collision_cir_matrix, collision_cir_seg, reciprocal_vel_obs\n'), ((4167, 4230), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'min_radius', 'high': 'max_radius', 'size': '(num,)'}), '(low=min_radius, high=max_radius, size=(num,))\n', (4184, 4230), True, 'import numpy as np\n'), ((5970, 6009), 'ir_sim.util.collision_cir_seg', 'collision_cir_seg', (['self_circle', 'segment'], {}), '(self_circle, segment)\n', (5987, 6009), False, 'from ir_sim.util import collision_cir_cir, collision_cir_matrix, collision_cir_seg, reciprocal_vel_obs\n'), ((3100, 3157), 'numpy.array', 'np.array', (['[[i * self.obs_interval], [self.obs_square[1]]]'], {}), '([[i * self.obs_interval], [self.obs_square[1]]])\n', (3108, 3157), True, 'import numpy as np\n'), ((3254, 3311), 'numpy.array', 'np.array', (['[[i * self.obs_interval], [self.obs_square[3]]]'], {}), '([[i * self.obs_interval], [self.obs_square[3]]])\n', (3262, 3311), True, 'import numpy as np\n'), ((4542, 4630), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.obs_square[0:2]', 'high': 'self.obs_square[2:4]', 'size': '(1, 2)'}), '(low=self.obs_square[0:2], high=self.obs_square[2:4], size\n =(1, 2))\n', (4559, 4630), True, 'import numpy as np\n'), ((5085, 5173), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.obs_square[0:2]', 'high': 'self.obs_square[2:4]', 'size': '(1, 2)'}), '(low=self.obs_square[0:2], high=self.obs_square[2:4], size\n =(1, 2))\n', (5102, 5173), True, 'import numpy as np\n'), ((3624, 3642), 'numpy.array', 'np.array', (['circular'], {}), '(circular)\n', (3632, 3642), True, 'import numpy as np\n'), ((3784, 3794), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (3787, 3794), False, 'from math import pi, cos, sin\n'), ((3810, 3820), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (3813, 3820), False, 'from math import pi, cos, sin\n'), ((3916, 3931), 'math.cos', 'cos', (['(theta + pi)'], {}), '(theta + pi)\n', (3919, 3931), False, 'from math import pi, cos, sin\n'), ((3931, 3946), 'math.sin', 'sin', (['(theta + pi)'], {}), '(theta + pi)\n', (3934, 3946), False, 'from math import pi, cos, sin\n')] |
from math import sqrt, atan
import pytest
from pytest import approx
import ts2vg
import numpy as np
@pytest.fixture
def empty_ts():
return []
@pytest.fixture
def sample_ts():
return [3.0, 4.0, 2.0, 1.0]
def test_basic(sample_ts):
out_got = ts2vg.NaturalVG().build(sample_ts).edges
out_truth = [
(0, 1),
(1, 2),
(1, 3),
(2, 3),
]
assert sorted(sorted(e) for e in out_got) == sorted(sorted(e) for e in out_truth)
def test_left_to_right(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right').build(sample_ts).edges
out_truth = [
(0, 1),
(1, 2),
(1, 3),
(2, 3),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(sqrt(2.))),
(1, 2, approx(sqrt(5.))),
(1, 3, approx(sqrt(13.))),
(2, 3, approx(sqrt(2.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_sq_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='sq_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(2.)),
(1, 2, approx(5.)),
(1, 3, approx(13.)),
(2, 3, approx(2.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_v_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='v_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(-2.)),
(1, 3, approx(-3.)),
(2, 3, approx(-1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_abs_v_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='abs_v_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(2.)),
(1, 3, approx(3.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_h_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='h_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(1.)),
(1, 3, approx(2.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_abs_h_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='abs_h_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(1.)),
(1, 3, approx(2.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_slope(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='slope').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(-2.)),
(1, 3, approx(-1.5)),
(2, 3, approx(-1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_abs_slope(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='abs_slope').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(2.)),
(1, 3, approx(1.5)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_angle(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='angle').build(sample_ts).edges
out_truth = [
(0, 1, approx(atan(1.))),
(1, 2, approx(atan(-2.))),
(1, 3, approx(atan(-1.5))),
(2, 3, approx(atan(-1.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_abs_angle(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='abs_angle').build(sample_ts).edges
out_truth = [
(0, 1, approx(atan(1.))),
(1, 2, approx(atan(2.))),
(1, 3, approx(atan(1.5))),
(2, 3, approx(atan(1.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom').build(sample_ts).edges
out_truth = [
(1, 0),
(1, 2),
(1, 3),
(2, 3),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(sqrt(2.))),
(1, 2, approx(sqrt(5.))),
(1, 3, approx(sqrt(13.))),
(2, 3, approx(sqrt(2.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_sq_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='sq_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(2.)),
(1, 2, approx(5.)),
(1, 3, approx(13.)),
(2, 3, approx(2.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_v_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='v_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(-1.)),
(1, 2, approx(-2.)),
(1, 3, approx(-3.)),
(2, 3, approx(-1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_abs_v_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='abs_v_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(1.)),
(1, 2, approx(2.)),
(1, 3, approx(3.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_h_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='h_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(-1.)),
(1, 2, approx(1.)),
(1, 3, approx(2.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_abs_h_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='abs_h_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(1.)),
(1, 2, approx(1.)),
(1, 3, approx(2.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_slope(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='slope').build(sample_ts).edges
out_truth = [
(1, 0, approx(1.)),
(1, 2, approx(-2.)),
(1, 3, approx(-1.5)),
(2, 3, approx(-1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_abs_slope(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='abs_slope').build(sample_ts).edges
out_truth = [
(1, 0, approx(1.)),
(1, 2, approx(2.)),
(1, 3, approx(1.5)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_angle(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='angle').build(sample_ts).edges
out_truth = [
(1, 0, approx(atan(1.))),
(1, 2, approx(atan(-2.))),
(1, 3, approx(atan(-1.5))),
(2, 3, approx(atan(-1.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_abs_angle(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='abs_angle').build(sample_ts).edges
out_truth = [
(1, 0, approx(atan(1.))),
(1, 2, approx(atan(2.))),
(1, 3, approx(atan(1.5))),
(2, 3, approx(atan(1.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_adjacency_matrix(sample_ts):
out_got = ts2vg.NaturalVG().build(sample_ts).adjacency_matrix(triangle='upper')
out_truth = [
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0],
]
np.testing.assert_array_equal(out_got, out_truth)
def test_degrees(sample_ts):
out_got = ts2vg.NaturalVG().build(sample_ts).degrees
out_truth = [1, 3, 2, 2]
np.testing.assert_array_equal(out_got, out_truth)
def test_not_built():
with pytest.raises(ts2vg.graph.base.NotBuiltError):
ts2vg.NaturalVG().edges
def test_empty_ts(empty_ts):
out_got = ts2vg.NaturalVG().build(empty_ts).edges
out_truth = []
assert out_got == out_truth
def test_with_xs(sample_ts):
xs = [0., 1., 2., 2.1]
out_got = ts2vg.NaturalVG().build(sample_ts, xs=xs).edges
out_truth = [
(0, 1),
(1, 2),
(2, 3),
]
assert sorted(sorted(e) for e in out_got) == sorted(sorted(e) for e in out_truth)
def test_with_incompatible_xs(sample_ts):
xs = [0., 1., 2., 3., 4., 5., 6.]
with pytest.raises(ValueError):
ts2vg.NaturalVG().build(sample_ts, xs=xs)
def test_with_non_monotonic_increasing_xs(sample_ts):
xs = [0., 4., 2., 3.]
with pytest.raises(ValueError):
ts2vg.NaturalVG().build(sample_ts, xs=xs)
| [
"pytest.approx",
"ts2vg.NaturalVG",
"math.sqrt",
"pytest.raises",
"math.atan",
"numpy.testing.assert_array_equal"
] | [((8103, 8152), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out_got', 'out_truth'], {}), '(out_got, out_truth)\n', (8132, 8152), True, 'import numpy as np\n'), ((8276, 8325), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out_got', 'out_truth'], {}), '(out_got, out_truth)\n', (8305, 8325), True, 'import numpy as np\n'), ((8359, 8404), 'pytest.raises', 'pytest.raises', (['ts2vg.graph.base.NotBuiltError'], {}), '(ts2vg.graph.base.NotBuiltError)\n', (8372, 8404), False, 'import pytest\n'), ((8949, 8974), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8962, 8974), False, 'import pytest\n'), ((9118, 9143), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9131, 9143), False, 'import pytest\n'), ((1274, 1285), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (1280, 1285), False, 'from pytest import approx\n'), ((1302, 1313), 'pytest.approx', 'approx', (['(5.0)'], {}), '(5.0)\n', (1308, 1313), False, 'from pytest import approx\n'), ((1330, 1342), 'pytest.approx', 'approx', (['(13.0)'], {}), '(13.0)\n', (1336, 1342), False, 'from pytest import approx\n'), ((1359, 1370), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (1365, 1370), False, 'from pytest import approx\n'), ((1611, 1622), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (1617, 1622), False, 'from pytest import approx\n'), ((1639, 1651), 'pytest.approx', 'approx', (['(-2.0)'], {}), '(-2.0)\n', (1645, 1651), False, 'from pytest import approx\n'), ((1668, 1680), 'pytest.approx', 'approx', (['(-3.0)'], {}), '(-3.0)\n', (1674, 1680), False, 'from pytest import approx\n'), ((1697, 1709), 'pytest.approx', 'approx', (['(-1.0)'], {}), '(-1.0)\n', (1703, 1709), False, 'from pytest import approx\n'), ((1958, 1969), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (1964, 1969), False, 'from pytest import approx\n'), ((1986, 1997), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (1992, 1997), False, 'from pytest import approx\n'), ((2014, 2025), 'pytest.approx', 'approx', (['(3.0)'], {}), '(3.0)\n', (2020, 2025), False, 'from pytest import approx\n'), ((2042, 2053), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2048, 2053), False, 'from pytest import approx\n'), ((2294, 2305), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2300, 2305), False, 'from pytest import approx\n'), ((2322, 2333), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2328, 2333), False, 'from pytest import approx\n'), ((2350, 2361), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (2356, 2361), False, 'from pytest import approx\n'), ((2378, 2389), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2384, 2389), False, 'from pytest import approx\n'), ((2638, 2649), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2644, 2649), False, 'from pytest import approx\n'), ((2666, 2677), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2672, 2677), False, 'from pytest import approx\n'), ((2694, 2705), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (2700, 2705), False, 'from pytest import approx\n'), ((2722, 2733), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2728, 2733), False, 'from pytest import approx\n'), ((2964, 2975), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2970, 2975), False, 'from pytest import approx\n'), ((2992, 3004), 'pytest.approx', 'approx', (['(-2.0)'], {}), '(-2.0)\n', (2998, 3004), False, 'from pytest import approx\n'), ((3021, 3033), 'pytest.approx', 'approx', (['(-1.5)'], {}), '(-1.5)\n', (3027, 3033), False, 'from pytest import approx\n'), ((3051, 3063), 'pytest.approx', 'approx', (['(-1.0)'], {}), '(-1.0)\n', (3057, 3063), False, 'from pytest import approx\n'), ((3302, 3313), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (3308, 3313), False, 'from pytest import approx\n'), ((3330, 3341), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (3336, 3341), False, 'from pytest import approx\n'), ((3358, 3369), 'pytest.approx', 'approx', (['(1.5)'], {}), '(1.5)\n', (3364, 3369), False, 'from pytest import approx\n'), ((3387, 3398), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (3393, 3398), False, 'from pytest import approx\n'), ((4965, 4976), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (4971, 4976), False, 'from pytest import approx\n'), ((4993, 5004), 'pytest.approx', 'approx', (['(5.0)'], {}), '(5.0)\n', (4999, 5004), False, 'from pytest import approx\n'), ((5021, 5033), 'pytest.approx', 'approx', (['(13.0)'], {}), '(13.0)\n', (5027, 5033), False, 'from pytest import approx\n'), ((5050, 5061), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (5056, 5061), False, 'from pytest import approx\n'), ((5302, 5314), 'pytest.approx', 'approx', (['(-1.0)'], {}), '(-1.0)\n', (5308, 5314), False, 'from pytest import approx\n'), ((5331, 5343), 'pytest.approx', 'approx', (['(-2.0)'], {}), '(-2.0)\n', (5337, 5343), False, 'from pytest import approx\n'), ((5360, 5372), 'pytest.approx', 'approx', (['(-3.0)'], {}), '(-3.0)\n', (5366, 5372), False, 'from pytest import approx\n'), ((5389, 5401), 'pytest.approx', 'approx', (['(-1.0)'], {}), '(-1.0)\n', (5395, 5401), False, 'from pytest import approx\n'), ((5650, 5661), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (5656, 5661), False, 'from pytest import approx\n'), ((5678, 5689), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (5684, 5689), False, 'from pytest import approx\n'), ((5706, 5717), 'pytest.approx', 'approx', (['(3.0)'], {}), '(3.0)\n', (5712, 5717), False, 'from pytest import approx\n'), ((5734, 5745), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (5740, 5745), False, 'from pytest import approx\n'), ((5986, 5998), 'pytest.approx', 'approx', (['(-1.0)'], {}), '(-1.0)\n', (5992, 5998), False, 'from pytest import approx\n'), ((6015, 6026), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (6021, 6026), False, 'from pytest import approx\n'), ((6043, 6054), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (6049, 6054), False, 'from pytest import approx\n'), ((6071, 6082), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (6077, 6082), False, 'from pytest import approx\n'), ((6331, 6342), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (6337, 6342), False, 'from pytest import approx\n'), ((6359, 6370), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (6365, 6370), False, 'from pytest import approx\n'), ((6387, 6398), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (6393, 6398), False, 'from pytest import approx\n'), ((6415, 6426), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (6421, 6426), False, 'from pytest import approx\n'), ((6657, 6668), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (6663, 6668), False, 'from pytest import approx\n'), ((6685, 6697), 'pytest.approx', 'approx', (['(-2.0)'], {}), '(-2.0)\n', (6691, 6697), False, 'from pytest import approx\n'), ((6714, 6726), 'pytest.approx', 'approx', (['(-1.5)'], {}), '(-1.5)\n', (6720, 6726), False, 'from pytest import approx\n'), ((6744, 6756), 'pytest.approx', 'approx', (['(-1.0)'], {}), '(-1.0)\n', (6750, 6756), False, 'from pytest import approx\n'), ((6995, 7006), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (7001, 7006), False, 'from pytest import approx\n'), ((7023, 7034), 'pytest.approx', 'approx', (['(2.0)'], {}), '(2.0)\n', (7029, 7034), False, 'from pytest import approx\n'), ((7051, 7062), 'pytest.approx', 'approx', (['(1.5)'], {}), '(1.5)\n', (7057, 7062), False, 'from pytest import approx\n'), ((7080, 7091), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (7086, 7091), False, 'from pytest import approx\n'), ((8414, 8431), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {}), '()\n', (8429, 8431), False, 'import ts2vg\n'), ((260, 277), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {}), '()\n', (275, 277), False, 'import ts2vg\n'), ((528, 569), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""'}), "(directed='left_to_right')\n", (543, 569), False, 'import ts2vg\n'), ((791, 853), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""distance"""'}), "(directed='left_to_right', weighted='distance')\n", (806, 853), False, 'import ts2vg\n'), ((918, 927), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (922, 927), False, 'from math import sqrt, atan\n'), ((952, 961), 'math.sqrt', 'sqrt', (['(5.0)'], {}), '(5.0)\n', (956, 961), False, 'from math import sqrt, atan\n'), ((986, 996), 'math.sqrt', 'sqrt', (['(13.0)'], {}), '(13.0)\n', (990, 996), False, 'from math import sqrt, atan\n'), ((1021, 1030), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (1025, 1030), False, 'from math import sqrt, atan\n'), ((1151, 1216), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""sq_distance"""'}), "(directed='left_to_right', weighted='sq_distance')\n", (1166, 1216), False, 'import ts2vg\n'), ((1489, 1553), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""v_distance"""'}), "(directed='left_to_right', weighted='v_distance')\n", (1504, 1553), False, 'import ts2vg\n'), ((1832, 1900), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""abs_v_distance"""'}), "(directed='left_to_right', weighted='abs_v_distance')\n", (1847, 1900), False, 'import ts2vg\n'), ((2172, 2236), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""h_distance"""'}), "(directed='left_to_right', weighted='h_distance')\n", (2187, 2236), False, 'import ts2vg\n'), ((2512, 2580), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""abs_h_distance"""'}), "(directed='left_to_right', weighted='abs_h_distance')\n", (2527, 2580), False, 'import ts2vg\n'), ((2847, 2906), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""slope"""'}), "(directed='left_to_right', weighted='slope')\n", (2862, 2906), False, 'import ts2vg\n'), ((3181, 3244), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""abs_slope"""'}), "(directed='left_to_right', weighted='abs_slope')\n", (3196, 3244), False, 'import ts2vg\n'), ((3512, 3571), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""angle"""'}), "(directed='left_to_right', weighted='angle')\n", (3527, 3571), False, 'import ts2vg\n'), ((3636, 3645), 'math.atan', 'atan', (['(1.0)'], {}), '(1.0)\n', (3640, 3645), False, 'from math import sqrt, atan\n'), ((3670, 3680), 'math.atan', 'atan', (['(-2.0)'], {}), '(-2.0)\n', (3674, 3680), False, 'from math import sqrt, atan\n'), ((3705, 3715), 'math.atan', 'atan', (['(-1.5)'], {}), '(-1.5)\n', (3709, 3715), False, 'from math import sqrt, atan\n'), ((3741, 3751), 'math.atan', 'atan', (['(-1.0)'], {}), '(-1.0)\n', (3745, 3751), False, 'from math import sqrt, atan\n'), ((3870, 3933), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""left_to_right"""', 'weighted': '"""abs_angle"""'}), "(directed='left_to_right', weighted='abs_angle')\n", (3885, 3933), False, 'import ts2vg\n'), ((3998, 4007), 'math.atan', 'atan', (['(1.0)'], {}), '(1.0)\n', (4002, 4007), False, 'from math import sqrt, atan\n'), ((4032, 4041), 'math.atan', 'atan', (['(2.0)'], {}), '(2.0)\n', (4036, 4041), False, 'from math import sqrt, atan\n'), ((4066, 4075), 'math.atan', 'atan', (['(1.5)'], {}), '(1.5)\n', (4070, 4075), False, 'from math import sqrt, atan\n'), ((4101, 4110), 'math.atan', 'atan', (['(1.0)'], {}), '(1.0)\n', (4105, 4110), False, 'from math import sqrt, atan\n'), ((4219, 4260), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""'}), "(directed='top_to_bottom')\n", (4234, 4260), False, 'import ts2vg\n'), ((4482, 4544), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""distance"""'}), "(directed='top_to_bottom', weighted='distance')\n", (4497, 4544), False, 'import ts2vg\n'), ((4609, 4618), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4613, 4618), False, 'from math import sqrt, atan\n'), ((4643, 4652), 'math.sqrt', 'sqrt', (['(5.0)'], {}), '(5.0)\n', (4647, 4652), False, 'from math import sqrt, atan\n'), ((4677, 4687), 'math.sqrt', 'sqrt', (['(13.0)'], {}), '(13.0)\n', (4681, 4687), False, 'from math import sqrt, atan\n'), ((4712, 4721), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4716, 4721), False, 'from math import sqrt, atan\n'), ((4842, 4907), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""sq_distance"""'}), "(directed='top_to_bottom', weighted='sq_distance')\n", (4857, 4907), False, 'import ts2vg\n'), ((5180, 5244), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""v_distance"""'}), "(directed='top_to_bottom', weighted='v_distance')\n", (5195, 5244), False, 'import ts2vg\n'), ((5524, 5592), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""abs_v_distance"""'}), "(directed='top_to_bottom', weighted='abs_v_distance')\n", (5539, 5592), False, 'import ts2vg\n'), ((5864, 5928), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""h_distance"""'}), "(directed='top_to_bottom', weighted='h_distance')\n", (5879, 5928), False, 'import ts2vg\n'), ((6205, 6273), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""abs_h_distance"""'}), "(directed='top_to_bottom', weighted='abs_h_distance')\n", (6220, 6273), False, 'import ts2vg\n'), ((6540, 6599), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""slope"""'}), "(directed='top_to_bottom', weighted='slope')\n", (6555, 6599), False, 'import ts2vg\n'), ((6874, 6937), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""abs_slope"""'}), "(directed='top_to_bottom', weighted='abs_slope')\n", (6889, 6937), False, 'import ts2vg\n'), ((7205, 7264), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""angle"""'}), "(directed='top_to_bottom', weighted='angle')\n", (7220, 7264), False, 'import ts2vg\n'), ((7329, 7338), 'math.atan', 'atan', (['(1.0)'], {}), '(1.0)\n', (7333, 7338), False, 'from math import sqrt, atan\n'), ((7363, 7373), 'math.atan', 'atan', (['(-2.0)'], {}), '(-2.0)\n', (7367, 7373), False, 'from math import sqrt, atan\n'), ((7398, 7408), 'math.atan', 'atan', (['(-1.5)'], {}), '(-1.5)\n', (7402, 7408), False, 'from math import sqrt, atan\n'), ((7434, 7444), 'math.atan', 'atan', (['(-1.0)'], {}), '(-1.0)\n', (7438, 7444), False, 'from math import sqrt, atan\n'), ((7563, 7626), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {'directed': '"""top_to_bottom"""', 'weighted': '"""abs_angle"""'}), "(directed='top_to_bottom', weighted='abs_angle')\n", (7578, 7626), False, 'import ts2vg\n'), ((7691, 7700), 'math.atan', 'atan', (['(1.0)'], {}), '(1.0)\n', (7695, 7700), False, 'from math import sqrt, atan\n'), ((7725, 7734), 'math.atan', 'atan', (['(2.0)'], {}), '(2.0)\n', (7729, 7734), False, 'from math import sqrt, atan\n'), ((7759, 7768), 'math.atan', 'atan', (['(1.5)'], {}), '(1.5)\n', (7763, 7768), False, 'from math import sqrt, atan\n'), ((7794, 7803), 'math.atan', 'atan', (['(1.0)'], {}), '(1.0)\n', (7798, 7803), False, 'from math import sqrt, atan\n'), ((8198, 8215), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {}), '()\n', (8213, 8215), False, 'import ts2vg\n'), ((8483, 8500), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {}), '()\n', (8498, 8500), False, 'import ts2vg\n'), ((8649, 8666), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {}), '()\n', (8664, 8666), False, 'import ts2vg\n'), ((8984, 9001), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {}), '()\n', (8999, 9001), False, 'import ts2vg\n'), ((9153, 9170), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {}), '()\n', (9168, 9170), False, 'import ts2vg\n'), ((7915, 7932), 'ts2vg.NaturalVG', 'ts2vg.NaturalVG', ([], {}), '()\n', (7930, 7932), False, 'import ts2vg\n')] |
#!/usr/bin/env python
"""
Custom functions for identifiability analysis to calculate
and plot confidence intervals based on a profile-likelihood analysis. Adapted
from lmfit, with custom functions to select the range for parameter scanning and
for plotting the profile likelihood.
"""
from collections import OrderedDict
from lmfit.minimizer import Minimizer, MinimizerResult, MinimizerException
from lmfit.model import ModelResult
import numpy as np
import scipy as sp
import math
from matplotlib import pyplot as plt
from multiprocessing import Pool
__version__ = '0.3.3dev1'
CONF_ERR_GEN = 'Cannot determine Confidence Intervals'
CONF_ERR_NVARS = '%s with < 2 variables' % CONF_ERR_GEN
class ConfidenceInterval:
"""Class used to calculate the confidence interval."""
def __init__(self, minimizer, result, p_names=None, log=False):
assert isinstance(minimizer, Minimizer) or isinstance(
minimizer, ModelResult
), 'minimizer must be instance of `lmfit.minimizer.Minimizer` or `lmfit.model.ModelResult`'
assert isinstance(result, MinimizerResult) or isinstance(
result, ModelResult
), 'result must be instance of `lmfit.minimizer.MinimizerResult` or `lmfit.model.ModelResult`'
self.minimizer = minimizer
self.result = result
self.params = result.params.copy()
self.org = {}
for para_key in self.params:
self.org[para_key] = (
self.params[para_key].value,
self.params[para_key].stderr,
)
self.best_chi = result.chisqr
if not p_names:
p_names = [i for i in self.params if self.params[i].vary]
self.p_names = p_names
self.fit_params = [self.params[p] for p in self.p_names]
self.log = log
self._traces_calculated = False
self._k = 2 # degree of smoothing spline
# check that there are at least 2 true variables!
nvars = len([p for p in self.params.values() if p.vary])
if nvars < 2:
raise MinimizerException(CONF_ERR_NVARS)
self.trace_dict = {i: {} for i in self.p_names}
def calc_all_ci(self, limits=0.5, points=11, prob=0.95, method='leastsq', mp=True):
"""Calculate all confidence intervals."""
assert (
(type(prob) == float) & (prob > 0) & (prob < 1)
), 'Please provide a probability value between 0 and 1.'
self.prob = prob
self.method = method
self.ci_values = OrderedDict()
self.threshold = self._calc_threshold()
if not self._traces_calculated:
self._populate_traces(limits, points, mp)
for p in self.p_names:
self.ci_values[p] = self._process_ci(p)
return self.ci_values
def _populate_traces(self, limits, points, mp):
if mp:
proc_pool = Pool()
arl = []
results = []
for para in self.p_names:
if isinstance(para, str):
para = self.params[para]
if self.log:
para_vals = np.logspace(
np.log10(para.value * limits), np.log10(para.value / limits), points,
)
else:
para_vals = np.linspace(limits * para.value, (2 - limits) * para.value, points)
para.vary = False
self.trace_dict[para.name]['value'] = []
self.trace_dict[para.name]['dchi'] = []
self.trace_dict[para.name]['results'] = []
for val in para_vals:
self.trace_dict[para.name]['value'].append(val)
if mp:
arl.append(proc_pool.apply_async(self._calc_dchi, args=(self, para, val)))
else:
results.append(self.calc_dchi(para, val))
para.vary = True
self._reset_vals()
if mp:
arl[-1].wait()
for ar in arl:
results.append(ar.get())
proc_pool.close()
for (para, dchi, opt_res) in results:
self.trace_dict[para.name]['dchi'].append(dchi)
self.trace_dict[para.name]['results'].append(opt_res)
self._traces_calculated = True
def _process_ci(self, p_name):
xx = self.trace_dict[p_name]['value']
yy = self.trace_dict[p_name]['dchi']
t = self.threshold
spl = sp.interpolate.UnivariateSpline(xx, yy, k=self._k, s=0)
if self.log:
allx = np.logspace(np.log10(xx[0]), np.log10(xx[-1]), 20000)
else:
allx = np.linspace(xx[0], xx[-1], 20000)
lo = allx[spl(allx) <= t][0]
hi = allx[spl(allx) <= t][-1]
# catch non-identifiable cases
if lo == xx[0]:
lo = np.nan
if hi == xx[-1]:
hi = np.nan
return lo, hi
def _reset_vals(self):
"""Reset parameter values to best-fit values."""
for para_key in self.params:
(self.params[para_key].value, self.params[para_key].stderr,) = self.org[
para_key
]
@staticmethod
def _calc_dchi(ci_instance, para, val):
"""
Static method to calculate the normalised delta chi-squared
using multiprocessing.
"""
para.vary = False
para.value = val
save_para = ci_instance.params[para.name]
ci_instance.params[para.name] = para
ci_instance.minimizer.prepare_fit(ci_instance.params)
out = ci_instance.minimizer.minimize(method=ci_instance.method)
dchi = ci_instance._dchi(ci_instance.result, out)
ci_instance.params[para.name] = save_para
para.vary = True
return para, dchi, out
def calc_dchi(self, para, val, restore=False):
"""
Calculate the normalised delta chi-squared for
a given parameter value.
"""
if restore:
self._reset_vals()
para.value = val
save_para = self.params[para.name]
self.params[para.name] = para
self.minimizer.prepare_fit(self.params)
out = self.minimizer.minimize(method=self.method)
dchi = self._dchi(self.result, out)
self.params[para.name] = save_para
return para, dchi, out
def _dchi(self, best_fit, new_fit):
"""
Return the normalised delta chi-squared between the best fit
and the new fit.
"""
dchi = new_fit.chisqr / best_fit.chisqr - 1.0
return dchi
def _calc_threshold(self):
"""
Return the threshold of the normalised chi-squared for
the given probability.
"""
nfree = self.result.nfree
nfix = 1
threshold_scaled = sp.stats.chi2.ppf(self.prob, nfix)
threshold = threshold_scaled * nfix / nfree
return threshold
def plot_ci(self, para, ax=None):
assert para in self.p_names, 'para must be one of ' + str(self.p_names)
if not ax:
f, ax = plt.subplots()
xx = self.trace_dict[para]['value']
yy = self.trace_dict[para]['dchi']
t = self.threshold
spl = sp.interpolate.UnivariateSpline(xx, yy, k=self._k, s=0)
allx = np.linspace(xx[0], xx[-1], 20000)
ax.plot(xx, yy, '+')
ax.plot(allx, spl(allx), '-', lw=1)
ax.axhline(t, color='k', ls='--', lw=0.5)
ax.axvline(self.params[para].value, color='k', ls='-', lw=0.5)
lo, hi = self.ci_values[para]
if np.isnan(lo):
lo = ax.get_xlim()[0]
if np.isnan(hi):
hi = ax.get_xlim()[1]
ax.axvspan(lo, hi, alpha=0.1, color='b')
if self.log:
ax.semilogx()
ax.set_xlabel('Parameter value')
ax.set_ylabel(r'$\chi^2\left/\chi^2_0\right. - 1$')
ax.set_title(para)
def plot_all_ci(self):
num = len(self.p_names)
numcols = 3
numrows = math.ceil(num / numcols)
f, ax = plt.subplots(nrows=numrows, ncols=numcols, figsize=(9, 2.5 * numrows))
for i in range(num):
if num <= numcols:
theax = ax[i]
else:
theax = ax[i // numcols, i % numcols]
self.plot_ci(self.p_names[i], ax=theax)
# remove empty axes
if num % numcols != 0:
empty = numcols - num % numcols
for i in range(-empty, 0):
if num <= numcols:
ax[i].set_visible(False)
else:
ax[num // numcols, i].set_visible(False)
f.tight_layout()
def conf_interval(
minimizer,
result,
p_names=None,
prob=0.95,
limits=0.5,
log=False,
points=11,
method='leastsq',
return_CIclass=False,
mp=True,
):
"""
Calculate the confidence interval (CI) for parameters.
The parameter for which the CI is calculated will be varied, while the
remaining parameters are re-optimized to minimize the chi-square. The
resulting chi-square is used to calculate the probability with a given
statistic, i.e. chi-squared test.
Parameters
----------
minimizer : Minimizer or ModelResult
The minimizer to use, holding objective function.
result : MinimizerResult or ModelResult
The result of running Minimizer.minimize() or Model.fit().
p_names : list, optional
Names of the parameters for which the CI is calculated. If None
(default), the CI is calculated for every parameter.
prob : float, optional
The probability for the confidence interval (<1). If None,
the default is 0.95 (95 % confidence interval).
limits : float, optional
The limits (as a fraction of the original parameter value) within which
to vary the parameters for identifiability analysis (default is 0.5).
If ``log=False``, the parameter is varied from p*limits to p*(2 - limits),
where p is the original value.
If ``log=True``, the parameter is varied from p*limits to p/limits.
log : bool, optional
Whether to vary the parameter in a log (True) or a linear (False,
default) scale.
points : int, optional
The number of points for which to calculate the profile likelihood over
the given parameter range.
method : str, optional
The lmfit mimimize() method to use (default='leastsq')
return_CIclass : bool, optional
When true, return the instantiated ``ConfidenceInterval`` class to
access its methods directly (default=False).
mp : bool, optional
Run the optimization in parallel using ``multiprocessing`` (default=True)
Returns
-------
output : dict
A dictionary containing a list of ``(lower, upper)``-tuples containing
the confidence bounds for each parameter.
ci : ``ConfidenceInterval`` instance, optional
Instantiated ``ConfidenceInterval`` class to access the attached methods.
"""
assert (limits > 0) & (limits < 1), 'Please select a limits value between 0 and 1.'
ci = ConfidenceInterval(minimizer, result, p_names, log)
output = ci.calc_all_ci(limits, points, prob, method=method, mp=mp)
if return_CIclass:
return output, ci
return output
| [
"collections.OrderedDict",
"lmfit.minimizer.MinimizerException",
"math.ceil",
"numpy.log10",
"numpy.linspace",
"scipy.stats.chi2.ppf",
"numpy.isnan",
"multiprocessing.Pool",
"scipy.interpolate.UnivariateSpline",
"matplotlib.pyplot.subplots"
] | [((2517, 2530), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2528, 2530), False, 'from collections import OrderedDict\n'), ((4410, 4465), 'scipy.interpolate.UnivariateSpline', 'sp.interpolate.UnivariateSpline', (['xx', 'yy'], {'k': 'self._k', 's': '(0)'}), '(xx, yy, k=self._k, s=0)\n', (4441, 4465), True, 'import scipy as sp\n'), ((6746, 6780), 'scipy.stats.chi2.ppf', 'sp.stats.chi2.ppf', (['self.prob', 'nfix'], {}), '(self.prob, nfix)\n', (6763, 6780), True, 'import scipy as sp\n'), ((7159, 7214), 'scipy.interpolate.UnivariateSpline', 'sp.interpolate.UnivariateSpline', (['xx', 'yy'], {'k': 'self._k', 's': '(0)'}), '(xx, yy, k=self._k, s=0)\n', (7190, 7214), True, 'import scipy as sp\n'), ((7230, 7263), 'numpy.linspace', 'np.linspace', (['xx[0]', 'xx[-1]', '(20000)'], {}), '(xx[0], xx[-1], 20000)\n', (7241, 7263), True, 'import numpy as np\n'), ((7507, 7519), 'numpy.isnan', 'np.isnan', (['lo'], {}), '(lo)\n', (7515, 7519), True, 'import numpy as np\n'), ((7566, 7578), 'numpy.isnan', 'np.isnan', (['hi'], {}), '(hi)\n', (7574, 7578), True, 'import numpy as np\n'), ((7936, 7960), 'math.ceil', 'math.ceil', (['(num / numcols)'], {}), '(num / numcols)\n', (7945, 7960), False, 'import math\n'), ((7977, 8047), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'numrows', 'ncols': 'numcols', 'figsize': '(9, 2.5 * numrows)'}), '(nrows=numrows, ncols=numcols, figsize=(9, 2.5 * numrows))\n', (7989, 8047), True, 'from matplotlib import pyplot as plt\n'), ((2065, 2099), 'lmfit.minimizer.MinimizerException', 'MinimizerException', (['CONF_ERR_NVARS'], {}), '(CONF_ERR_NVARS)\n', (2083, 2099), False, 'from lmfit.minimizer import Minimizer, MinimizerResult, MinimizerException\n'), ((2882, 2888), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (2886, 2888), False, 'from multiprocessing import Pool\n'), ((4593, 4626), 'numpy.linspace', 'np.linspace', (['xx[0]', 'xx[-1]', '(20000)'], {}), '(xx[0], xx[-1], 20000)\n', (4604, 4626), True, 'import numpy as np\n'), ((7016, 7030), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7028, 7030), True, 'from matplotlib import pyplot as plt\n'), ((3267, 3334), 'numpy.linspace', 'np.linspace', (['(limits * para.value)', '((2 - limits) * para.value)', 'points'], {}), '(limits * para.value, (2 - limits) * para.value, points)\n', (3278, 3334), True, 'import numpy as np\n'), ((4518, 4533), 'numpy.log10', 'np.log10', (['xx[0]'], {}), '(xx[0])\n', (4526, 4533), True, 'import numpy as np\n'), ((4535, 4551), 'numpy.log10', 'np.log10', (['xx[-1]'], {}), '(xx[-1])\n', (4543, 4551), True, 'import numpy as np\n'), ((3133, 3162), 'numpy.log10', 'np.log10', (['(para.value * limits)'], {}), '(para.value * limits)\n', (3141, 3162), True, 'import numpy as np\n'), ((3164, 3193), 'numpy.log10', 'np.log10', (['(para.value / limits)'], {}), '(para.value / limits)\n', (3172, 3193), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import pdb
class Model(nn.Module):
r"""Spatial temporal graph convolutional networks.
Args:
in_channels (int): Number of channels in the input data
num_class (int): Number of classes for the classification task
graph_args (dict): The arguments for building the graph
edge_importance_weighting (bool): If ``True``, adds a learnable
importance weighting to the edges of the graph
**kwargs (optional): Other parameters for graph convolution units
Shape:
- Input: :math:`(N, in_channels, T_{in}, V_{in}, M_{in})`
- Output: :math:`(N, num_class)` where
:math:`N` is a batch size,
:math:`T_{in}` is a length of input sequence,
:math:`V_{in}` is the number of graph nodes,
:math:`M_{in}` is the number of instance in a frame.
"""
def __init__(self, in_channels, num_class, A,
edge_importance_weighting, **kwargs):
super().__init__()
# load graph
# **this is the adj matrix Soham produced that computes correlation based on raw data **
#A = np.load('../cs230/adj/adj_matrix.npy')
# **this is the adj matrix that computes correlation based on z-score of data for all 1200 timesteps**
Dl = np.sum(A, 0)
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if Dl[i] > 0:
Dn[i, i] = Dl[i] ** (-0.5)
DAD = np.dot(np.dot(Dn, A), Dn)
temp_matrix = np.zeros((1, A.shape[0], A.shape[0]))
temp_matrix[0] = DAD
A = torch.tensor(temp_matrix, dtype=torch.float32, requires_grad=False)
self.register_buffer('A', A)
# build networks (**number of layers, final output features, kernel size**)
spatial_kernel_size = A.size(0)
temporal_kernel_size = 11 # update temporal kernel size
kernel_size = (temporal_kernel_size, spatial_kernel_size)
self.data_bn = nn.BatchNorm1d(in_channels * A.size(1))
kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'}
self.st_gcn_networks = nn.ModuleList((
st_gcn(in_channels, 64, kernel_size, 1, residual=False, **kwargs0),
st_gcn(64, 64, kernel_size, 1, residual=False, **kwargs),
st_gcn(64, 64, kernel_size, 1, residual=False, **kwargs),
st_gcn(64, 64, kernel_size, 1, residual=False, **kwargs),
#st_gcn(64, 128, kernel_size, 2, **kwargs),
#st_gcn(128, 128, kernel_size, 1, **kwargs),
#st_gcn(128, 128, kernel_size, 1, **kwargs),
#st_gcn(128, 256, kernel_size, 2, **kwargs),
#st_gcn(256, 256, kernel_size, 1, **kwargs),
#st_gcn(256, 256, kernel_size, 1, **kwargs),
))
# initialize parameters for edge importance weighting
if edge_importance_weighting:
# self.edge_importance = nn.ParameterList([
# nn.Parameter(torch.ones(self.A.size()))
# for i in self.st_gcn_networks
# ])
self.edge_importance = nn.Parameter(torch.ones(self.A.size()))
else:
self.edge_importance = [1] * len(self.st_gcn_networks)
# fcn for prediction (**number of fully connected layers**)
self.fcn = nn.Conv2d(64, num_class, kernel_size=1)
self.sig = nn.Sigmoid()
def forward(self, x):
# data normalization
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
# forwad
# for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
# x, _ = gcn(x, self.A * (importance + torch.transpose(importance,1,2)))
#print(self.edge_importance.shape)
for gcn in self.st_gcn_networks:
x, _ = gcn(x, self.A * (self.edge_importance*self.edge_importance+torch.transpose(self.edge_importance*self.edge_importance,1,2)))
# global pooling
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(N, M, -1, 1, 1).mean(dim=1)
# prediction
# pdb.set_trace()
x = self.fcn(x)
x = self.sig(x)
x = x.view(x.size(0), -1)
return x
def extract_feature(self, x):
# data normalization
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
# forwad
for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
x, _ = gcn(x, self.A * importance)
_, c, t, v = x.size()
feature = x.view(N, M, c, t, v).permute(0, 2, 3, 4, 1)
# prediction
x = self.fcn(x)
output = x.view(N, M, -1, t, v).permute(0, 2, 3, 4, 1)
# pdb.set_trace()
return output, feature
class st_gcn(nn.Module):
r"""Applies a spatial temporal graph convolution over an input graph sequence.
Args:
in_channels (int): Number of channels in the input sequence data
out_channels (int): Number of channels produced by the convolution
kernel_size (tuple): Size of the temporal convolving kernel and graph convolving kernel
stride (int, optional): Stride of the temporal convolution. Default: 1
dropout (int, optional): Dropout rate of the final output. Default: 0
residual (bool, optional): If ``True``, applies a residual mechanism. Default: ``True``
Shape:
- Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format
- Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
dropout=0.5,
residual=True):
super().__init__()
print("Dropout={}".format(dropout))
assert len(kernel_size) == 2
assert kernel_size[0] % 2 == 1
padding = ((kernel_size[0] - 1) // 2, 0)
self.gcn = ConvTemporalGraphical(in_channels, out_channels,
kernel_size[1])
self.tcn = nn.Sequential(
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
out_channels,
out_channels,
(kernel_size[0], 1),
(stride, 1),
padding,
),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True),
)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=(stride, 1)),
nn.BatchNorm2d(out_channels),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A):
res = self.residual(x)
x, A = self.gcn(x, A)
x = self.tcn(x) + res
return self.relu(x), A
class ConvTemporalGraphical(nn.Module):
r"""The basic module for applying a graph convolution.
Args:
in_channels (int): Number of channels in the input sequence data
out_channels (int): Number of channels produced by the convolution
kernel_size (int): Size of the graph convolving kernel
t_kernel_size (int): Size of the temporal convolving kernel
t_stride (int, optional): Stride of the temporal convolution. Default: 1
t_padding (int, optional): Temporal zero-padding added to both sides of
the input. Default: 0
t_dilation (int, optional): Spacing between temporal kernel elements.
Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output.
Default: ``True``
Shape:
- Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format
- Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
t_kernel_size=1,
t_stride=1,
t_padding=0,
t_dilation=1,
bias=True):
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(
in_channels,
out_channels * kernel_size,
kernel_size=(t_kernel_size, 1),
padding=(t_padding, 0),
stride=(t_stride, 1),
dilation=(t_dilation, 1),
bias=bias)
def forward(self, x, A):
assert A.size(0) == self.kernel_size
x = self.conv(x)
n, kc, t, v = x.size()
x = x.view(n, self.kernel_size, kc//self.kernel_size, t, v)
x = torch.einsum('nkctv,kvw->nctw', (x, A))
return x.contiguous(), A
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.BatchNorm2d",
"torch.nn.Dropout",
"torch.nn.Conv2d",
"torch.transpose",
"numpy.sum",
"numpy.zeros",
"torch.tensor",
"torch.einsum",
"numpy.dot"
] | [((1411, 1423), 'numpy.sum', 'np.sum', (['A', '(0)'], {}), '(A, 0)\n', (1417, 1423), True, 'import numpy as np\n'), ((1467, 1497), 'numpy.zeros', 'np.zeros', (['(num_node, num_node)'], {}), '((num_node, num_node))\n', (1475, 1497), True, 'import numpy as np\n'), ((1664, 1701), 'numpy.zeros', 'np.zeros', (['(1, A.shape[0], A.shape[0])'], {}), '((1, A.shape[0], A.shape[0]))\n', (1672, 1701), True, 'import numpy as np\n'), ((1743, 1810), 'torch.tensor', 'torch.tensor', (['temp_matrix'], {'dtype': 'torch.float32', 'requires_grad': '(False)'}), '(temp_matrix, dtype=torch.float32, requires_grad=False)\n', (1755, 1810), False, 'import torch\n'), ((3449, 3488), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'num_class'], {'kernel_size': '(1)'}), '(64, num_class, kernel_size=1)\n', (3458, 3488), True, 'import torch.nn as nn\n'), ((3508, 3520), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3518, 3520), True, 'import torch.nn as nn\n'), ((7902, 7923), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7909, 7923), True, 'import torch.nn as nn\n'), ((9855, 10029), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(out_channels * kernel_size)'], {'kernel_size': '(t_kernel_size, 1)', 'padding': '(t_padding, 0)', 'stride': '(t_stride, 1)', 'dilation': '(t_dilation, 1)', 'bias': 'bias'}), '(in_channels, out_channels * kernel_size, kernel_size=(\n t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1),\n dilation=(t_dilation, 1), bias=bias)\n', (9864, 10029), True, 'import torch.nn as nn\n'), ((10319, 10358), 'torch.einsum', 'torch.einsum', (['"""nkctv,kvw->nctw"""', '(x, A)'], {}), "('nkctv,kvw->nctw', (x, A))\n", (10331, 10358), False, 'import torch\n'), ((1622, 1635), 'numpy.dot', 'np.dot', (['Dn', 'A'], {}), '(Dn, A)\n', (1628, 1635), True, 'import numpy as np\n'), ((7071, 7099), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (7085, 7099), True, 'import torch.nn as nn\n'), ((7113, 7134), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7120, 7134), True, 'import torch.nn as nn\n'), ((7148, 7233), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(kernel_size[0], 1)', '(stride, 1)', 'padding'], {}), '(out_channels, out_channels, (kernel_size[0], 1), (stride, 1), padding\n )\n', (7157, 7233), True, 'import torch.nn as nn\n'), ((7337, 7365), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (7351, 7365), True, 'import torch.nn as nn\n'), ((7379, 7412), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {'inplace': '(True)'}), '(dropout, inplace=True)\n', (7389, 7412), True, 'import torch.nn as nn\n'), ((7667, 7738), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': '(stride, 1)'}), '(in_channels, out_channels, kernel_size=1, stride=(stride, 1))\n', (7676, 7738), True, 'import torch.nn as nn\n'), ((7837, 7865), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (7851, 7865), True, 'import torch.nn as nn\n'), ((4191, 4257), 'torch.transpose', 'torch.transpose', (['(self.edge_importance * self.edge_importance)', '(1)', '(2)'], {}), '(self.edge_importance * self.edge_importance, 1, 2)\n', (4206, 4257), False, 'import torch\n')] |
import numpy as np
from scipy.signal import stft, istft
from scipy.io import wavfile
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from Crypto.Util.Padding import pad, unpad
from Crypto.Util.strxor import strxor
import random
import struct
from reedsolo import RSCodec
import warnings
import os
warnings.filterwarnings('ignore')
_check = 32
_rsc_block = 255 - _check
_header_size = 32
MODE_PLAIN = 0
MODE_AES = 1
__all__ = ['Cryp', 'EMess', 'embeed', 'extract', 'estimate']
class EMess():
def __init__(self, text: bytes) -> None:
self.text = text
self.nn = 0
self.bn = 0
self.blen = len(text) * 8
def __iter__(self):
return self
def __next__(self):
if self.nn * 8 + self.bn > self.blen:
raise StopIteration
cc = bin(self.text[self.nn])[2:].zfill(8)[self.bn]
self.bn += 1
if self.bn == 8:
self.nn += 1
self.bn = 0
return cc
def __len__(self):
return len(self.text)
def __eq__(self, tt: object) -> bool:
return self.text == tt
class Cryp():
def __init__(self, mode=None, debug=False, password=None, verify=False, key=None, **kwargs):
if password is not None:
self.mode = MODE_AES
elif mode is None:
pass
else:
self.mode = MODE_PLAIN
self.block_size = 16
if self.mode == MODE_AES:
if isinstance(password, bytes):
self.cipher = AES.new(
pad(password, 16), AES.MODE_CBC, bytes(16))
elif isinstance(password, str):
self.cipher = AES.new(
pad(password.encode(), 16), AES.MODE_CBC, bytes(16))
else:
raise TypeError('Unknown key type')
self.block_size = self.cipher.block_size
if self.mode not in [0, 1]:
raise TypeError('Unknown encryption mode')
self.verify = verify
self.debug = debug
if self.verify:
if key is None:
raise Exception('no key provided')
self.key = RSA.import_key(open(key).read())
def decrypt(self, data: bytes, hashalg=SHA256):
broken = False
try:
bb = self._unhead(data[:_header_size])
except:
print(data)
return b'\x00', True
_rrs = RSCodec(_check)
dd = data[_header_size:bb * 255 + _header_size]
if self.debug:
print(_rrs.check(dd))
kd = b''
for i in range(bb):
try:
kd += _rrs.decode(dd[i * 255:(i + 1) * 255])[0]
except:
kd += dd[i * 255:(i + 1) * 255 - _check]
broken = True
try:
dd = unpad(kd, _rsc_block)
except:
dd = kd[:-kd[-1]]
broken = True
if self.mode == MODE_AES:
dd = self._aes_dec(dd)
else:
dd = dd
try:
dd = unpad(dd, self.block_size)
except:
dd = dd[:-dd[-1]]
broken = True
if self.verify:
sz = self.key.size_in_bytes()
dd, sig = dd[:-sz], dd[-sz:]
hs = hashalg.new(dd)
try:
pkcs1_15.new(self.key).verify(hs, sig)
except:
print('signature not valid')
if broken:
print('data may be broken during the process')
return dd, broken
def encrypt(self, data: bytes, hashalg=SHA256):
if self.verify:
if not self.key.has_private():
raise AttributeError('not a private key')
hs = hashalg.new(data)
sig = pkcs1_15.new(self.key).sign(hs)
ee = pad(data + sig, self.block_size)
else:
ee = pad(data, self.block_size)
if self.mode == MODE_AES:
ee = self._aes_enc(ee)
else:
pass
return self._ehead(ee)
def _ehead(self, data: bytes):
header = b'FsTeg\x01\x02\x03'
ee = pad(data, self.block_size)
ll = struct.pack('i', 1 + len(ee) // self.block_size)
_rrs1 = RSCodec()
_rrs2 = RSCodec(_check)
return _rrs1.encode(pad(header + ll, _header_size - 10)) + _rrs2.encode(pad(data, _rsc_block))
def _unhead(self, hh: bytes):
_rrs = RSCodec()
try:
hd = _rrs.decode(hh)[0]
except:
print('broken header')
hd = hh[:-10]
if hd[:5] != b'FsTeg':
raise Exception('Unknown header type')
ll = 1 + int.from_bytes(hd[8:12], 'little') * \
self.block_size // _rsc_block
return ll
def _aes_enc(self, data: bytes):
return self.cipher.encrypt(data)
def _aes_dec(self, data: bytes):
return self.cipher.decrypt(data)
def extract(audiofile: str, ths=2048, perseg=441, overlap=0):
srt, sig = wavfile.read(audiofile)
bb = sig.shape[0] // (perseg - overlap) - 1
f, t, zxxl = stft(sig[:bb * (perseg - overlap), 0], srt,
'rect', perseg, overlap)
f, t, zxxr = stft(sig[:bb * (perseg - overlap), 1], srt,
'rect', perseg, overlap)
i = 0
d = ''
# of = open('ex.log', 'w')
while i < bb:
cl = zxxl[:, i]
cr = zxxr[:, i]
idxl = np.argwhere(np.abs(cl) > ths)
idxr = np.argwhere(np.abs(cr) > ths)
for j in idxl:
d += _gp_by_amp(cl[j])
for j in idxr:
d += _gp_by_amp(cr[j])
# of.write('L: {} {}, R: {} {} @{}\n'.format(list(f[idxl].T[0]), list(np.angle(
# cl[idxl].T[0], 1).astype(int)), list(f[idxr].T[0]), list(np.angle(cr[idxr].T[0], 1).astype(int)), str(t[i])[:str(t[i]).index('.') + 3]))
i += 1
# of.close()
return _bin2byt(d)
def embeed(mess: EMess, infile: str, outfile: str, ths=2048, perseg=441, overlap=0):
srt, sig = wavfile.read(infile)
sig = np.nan_to_num(sig)
bb = sig.shape[0] // (perseg - overlap) - 1
f, t, zxxl = stft(sig[:bb * (perseg - overlap), 0], srt,
'rect', perseg, overlap)
f, t, zxxr = stft(sig[:bb * (perseg - overlap), 1], srt,
'rect', perseg, overlap)
sl = []
sr = []
ll = 0
i = 0
lm = len(mess) * 8
# of = open('em.log', 'w')
while ll < lm:
cpl = zxxl[:, i].copy()
cpr = zxxr[:, i].copy()
idxl = np.argwhere(np.abs(cpl) > ths)
idxr = np.argwhere(np.abs(cpr) > ths)
# if len(idxl.tolist()) + len(idxr.tolist()) == 0:
# i += 1
# continue
# of.write('L: {} {}, R: {} {} @{}\n'.format(list(f[idxl].T[0]), list(np.angle(
# cpl[idxl].T[0], 1).astype(int)), list(f[idxr].T[0]), list(np.angle(cpr[idxr].T[0], 1).astype(int)), str(t[i])[:str(t[i]).index('.') + 3]))
for j in range(min(len(idxl), lm - ll)):
cpl[idxl[j]] = _hb_by_amp(cpl[idxl[j]], next(mess))
ll += 1
for j in range(min(len(idxr), lm - ll)):
cpr[idxr[j]] = _hb_by_amp(cpr[idxr[j]], next(mess))
ll += 1
sl.append(cpl)
sr.append(cpr)
i += 1
# of.close()
print('blocks used:', i, 'total:', bb)
_, rsl = istft(np.array(sl).T, srt, 'rect', perseg, overlap)
_, rsr = istft(np.array(sr).T, srt, 'rect', perseg, overlap)
fsg = list(np.array(np.around([rsl, rsr]), np.int16).T)
fsg += list(sig[i * (perseg - overlap):])
fsg = np.array(fsg, np.int16)
wavfile.write(outfile, srt, fsg)
def _hb_by_amp(nn: complex, b: str, delt=16):
i = nn.real
j = nn.imag
mm = np.abs(nn)
if (int(mm // delt) % 2) ^ int(b):
i += np.cos(np.angle(nn)) * delt
j += np.sin(np.angle(nn)) * delt
return np.complex(i, j)
def _gp_by_amp(tt: complex, delt=16):
bb = '1' if int(np.abs(tt) // delt) % 2 else '0'
return bb
def _hb_by_ang(nn: complex, b: str, delt=4):
ll = np.abs(nn)
aa = np.angle(nn, True)
gg = int(aa) // delt
if (gg % 2) ^ int(b):
s1 = (2 * gg - 1) * delt / 2
s2 = (2 * gg + 3) * delt / 2
if aa - s1 > s2 - aa:
aa = s2
else:
aa = s1
else:
aa = (2 * gg + 1) * delt / 2
return np.complex(np.cos(aa * np.pi / 180) * ll, np.sin(aa * np.pi / 180) * ll)
return nn
def _gp_by_ang(nn: complex, delt=4):
return '1' if (int(np.angle(nn, True)) // delt) % 2 else '0'
def _bin2byt(s: str):
d = b''
while len(s) >= 8:
ct = int(s[:8], 2).to_bytes(1, 'little')
d += ct
s = s[8:]
return d
def estimate(audiofile: str, ths=2048, perseg=441, overlap=0):
srt, sig = wavfile.read(audiofile)
bb = sig.shape[0] // (perseg - overlap) - 1
_, _, zxxl = stft(sig[:bb * (perseg - overlap), 0], srt,
'rect', perseg, overlap)
_, _, zxxr = stft(sig[:bb * (perseg - overlap), 1], srt,
'rect', perseg, overlap)
i = 0
u = 0
while i < bb:
cl = zxxl[:, i]
cr = zxxr[:, i]
u += sum(np.abs(cl) > ths) + sum(np.abs(cr) > ths)
i += 1
return u // 8, srt, sig.shape[0]
def convert():
if not os.popen('ffmpeg'):
pass
def mis(out, raw):
st = len(raw) - len(out)
rt = raw[:len(out)]
mi = EMess(strxor(out, rt))
cc = 0
i = 0
while i < len(mi) * 8:
cc += int(next(mi))
i += 1
return st / len(raw), cc / len(out) / 8
if __name__ == '__main__':
infile = 'test/wavs/01.wav'
outfile = 'out.wav'
privk = 'test/test.key'
pubk = 'test/test.pub.key'
# print(estimate(infile), 'bytes available.')
# c = Cryp()
# d = Cryp(debug=True)
c = Cryp(MODE_AES, password=b'<PASSWORD>')
d = Cryp(MODE_AES, password=b'<PASSWORD>')
# c = Cryp(MODE_AES, password=b'<PASSWORD>',
# verify=True, key=privk)
# d = Cryp(MODE_AES, password=b'<PASSWORD>',
# verify=True, key=pubk)
# mm = random.randbytes(2048)
mm = b'hello' * 200
ee = c.encrypt(mm)
# print(ee, len(ee))
embeed(EMess(ee), infile, outfile, ths=1600)
# os.system('ffmpeg -i out.wav -b:a 320k o1.mp3 -y')
# os.system('ffmpeg -i o1.mp3 o2.wav -y')
dd = extract('out.wav', ths=1600)
# print(dd[:len(ee)], len(dd))
dd, fg = d.decrypt(dd)
los, ber = mis(dd, mm)
# print(dd)
print(dd == mm, 'lost={},ber={}'.format(los, ber))
| [
"numpy.abs",
"scipy.signal.stft",
"Crypto.Util.Padding.pad",
"numpy.angle",
"numpy.complex",
"numpy.array",
"Crypto.Signature.pkcs1_15.new",
"reedsolo.RSCodec",
"scipy.io.wavfile.read",
"scipy.io.wavfile.write",
"os.popen",
"numpy.cos",
"numpy.sin",
"Crypto.Util.strxor.strxor",
"numpy.ar... | [((382, 415), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (405, 415), False, 'import warnings\n'), ((5030, 5053), 'scipy.io.wavfile.read', 'wavfile.read', (['audiofile'], {}), '(audiofile)\n', (5042, 5053), False, 'from scipy.io import wavfile\n'), ((5119, 5187), 'scipy.signal.stft', 'stft', (['sig[:bb * (perseg - overlap), 0]', 'srt', '"""rect"""', 'perseg', 'overlap'], {}), "(sig[:bb * (perseg - overlap), 0], srt, 'rect', perseg, overlap)\n", (5123, 5187), False, 'from scipy.signal import stft, istft\n'), ((5227, 5295), 'scipy.signal.stft', 'stft', (['sig[:bb * (perseg - overlap), 1]', 'srt', '"""rect"""', 'perseg', 'overlap'], {}), "(sig[:bb * (perseg - overlap), 1], srt, 'rect', perseg, overlap)\n", (5231, 5295), False, 'from scipy.signal import stft, istft\n'), ((6038, 6058), 'scipy.io.wavfile.read', 'wavfile.read', (['infile'], {}), '(infile)\n', (6050, 6058), False, 'from scipy.io import wavfile\n'), ((6069, 6087), 'numpy.nan_to_num', 'np.nan_to_num', (['sig'], {}), '(sig)\n', (6082, 6087), True, 'import numpy as np\n'), ((6153, 6221), 'scipy.signal.stft', 'stft', (['sig[:bb * (perseg - overlap), 0]', 'srt', '"""rect"""', 'perseg', 'overlap'], {}), "(sig[:bb * (perseg - overlap), 0], srt, 'rect', perseg, overlap)\n", (6157, 6221), False, 'from scipy.signal import stft, istft\n'), ((6261, 6329), 'scipy.signal.stft', 'stft', (['sig[:bb * (perseg - overlap), 1]', 'srt', '"""rect"""', 'perseg', 'overlap'], {}), "(sig[:bb * (perseg - overlap), 1], srt, 'rect', perseg, overlap)\n", (6265, 6329), False, 'from scipy.signal import stft, istft\n'), ((7603, 7626), 'numpy.array', 'np.array', (['fsg', 'np.int16'], {}), '(fsg, np.int16)\n', (7611, 7626), True, 'import numpy as np\n'), ((7631, 7663), 'scipy.io.wavfile.write', 'wavfile.write', (['outfile', 'srt', 'fsg'], {}), '(outfile, srt, fsg)\n', (7644, 7663), False, 'from scipy.io import wavfile\n'), ((7753, 7763), 'numpy.abs', 'np.abs', (['nn'], {}), '(nn)\n', (7759, 7763), True, 'import numpy as np\n'), ((7896, 7912), 'numpy.complex', 'np.complex', (['i', 'j'], {}), '(i, j)\n', (7906, 7912), True, 'import numpy as np\n'), ((8076, 8086), 'numpy.abs', 'np.abs', (['nn'], {}), '(nn)\n', (8082, 8086), True, 'import numpy as np\n'), ((8096, 8114), 'numpy.angle', 'np.angle', (['nn', '(True)'], {}), '(nn, True)\n', (8104, 8114), True, 'import numpy as np\n'), ((8808, 8831), 'scipy.io.wavfile.read', 'wavfile.read', (['audiofile'], {}), '(audiofile)\n', (8820, 8831), False, 'from scipy.io import wavfile\n'), ((8897, 8965), 'scipy.signal.stft', 'stft', (['sig[:bb * (perseg - overlap), 0]', 'srt', '"""rect"""', 'perseg', 'overlap'], {}), "(sig[:bb * (perseg - overlap), 0], srt, 'rect', perseg, overlap)\n", (8901, 8965), False, 'from scipy.signal import stft, istft\n'), ((9005, 9073), 'scipy.signal.stft', 'stft', (['sig[:bb * (perseg - overlap), 1]', 'srt', '"""rect"""', 'perseg', 'overlap'], {}), "(sig[:bb * (perseg - overlap), 1], srt, 'rect', perseg, overlap)\n", (9009, 9073), False, 'from scipy.signal import stft, istft\n'), ((2471, 2486), 'reedsolo.RSCodec', 'RSCodec', (['_check'], {}), '(_check)\n', (2478, 2486), False, 'from reedsolo import RSCodec\n'), ((4159, 4185), 'Crypto.Util.Padding.pad', 'pad', (['data', 'self.block_size'], {}), '(data, self.block_size)\n', (4162, 4185), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((4264, 4273), 'reedsolo.RSCodec', 'RSCodec', ([], {}), '()\n', (4271, 4273), False, 'from reedsolo import RSCodec\n'), ((4290, 4305), 'reedsolo.RSCodec', 'RSCodec', (['_check'], {}), '(_check)\n', (4297, 4305), False, 'from reedsolo import RSCodec\n'), ((4459, 4468), 'reedsolo.RSCodec', 'RSCodec', ([], {}), '()\n', (4466, 4468), False, 'from reedsolo import RSCodec\n'), ((9321, 9339), 'os.popen', 'os.popen', (['"""ffmpeg"""'], {}), "('ffmpeg')\n", (9329, 9339), False, 'import os\n'), ((9443, 9458), 'Crypto.Util.strxor.strxor', 'strxor', (['out', 'rt'], {}), '(out, rt)\n', (9449, 9458), False, 'from Crypto.Util.strxor import strxor\n'), ((2863, 2884), 'Crypto.Util.Padding.unpad', 'unpad', (['kd', '_rsc_block'], {}), '(kd, _rsc_block)\n', (2868, 2884), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((3090, 3116), 'Crypto.Util.Padding.unpad', 'unpad', (['dd', 'self.block_size'], {}), '(dd, self.block_size)\n', (3095, 3116), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((3850, 3882), 'Crypto.Util.Padding.pad', 'pad', (['(data + sig)', 'self.block_size'], {}), '(data + sig, self.block_size)\n', (3853, 3882), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((3914, 3940), 'Crypto.Util.Padding.pad', 'pad', (['data', 'self.block_size'], {}), '(data, self.block_size)\n', (3917, 3940), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((7376, 7388), 'numpy.array', 'np.array', (['sl'], {}), '(sl)\n', (7384, 7388), True, 'import numpy as np\n'), ((7441, 7453), 'numpy.array', 'np.array', (['sr'], {}), '(sr)\n', (7449, 7453), True, 'import numpy as np\n'), ((8393, 8417), 'numpy.cos', 'np.cos', (['(aa * np.pi / 180)'], {}), '(aa * np.pi / 180)\n', (8399, 8417), True, 'import numpy as np\n'), ((8424, 8448), 'numpy.sin', 'np.sin', (['(aa * np.pi / 180)'], {}), '(aa * np.pi / 180)\n', (8430, 8448), True, 'import numpy as np\n'), ((4334, 4369), 'Crypto.Util.Padding.pad', 'pad', (['(header + ll)', '(_header_size - 10)'], {}), '(header + ll, _header_size - 10)\n', (4337, 4369), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((4386, 4407), 'Crypto.Util.Padding.pad', 'pad', (['data', '_rsc_block'], {}), '(data, _rsc_block)\n', (4389, 4407), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((5463, 5473), 'numpy.abs', 'np.abs', (['cl'], {}), '(cl)\n', (5469, 5473), True, 'import numpy as np\n'), ((5508, 5518), 'numpy.abs', 'np.abs', (['cr'], {}), '(cr)\n', (5514, 5518), True, 'import numpy as np\n'), ((6561, 6572), 'numpy.abs', 'np.abs', (['cpl'], {}), '(cpl)\n', (6567, 6572), True, 'import numpy as np\n'), ((6607, 6618), 'numpy.abs', 'np.abs', (['cpr'], {}), '(cpr)\n', (6613, 6618), True, 'import numpy as np\n'), ((7511, 7532), 'numpy.around', 'np.around', (['[rsl, rsr]'], {}), '([rsl, rsr])\n', (7520, 7532), True, 'import numpy as np\n'), ((7823, 7835), 'numpy.angle', 'np.angle', (['nn'], {}), '(nn)\n', (7831, 7835), True, 'import numpy as np\n'), ((7864, 7876), 'numpy.angle', 'np.angle', (['nn'], {}), '(nn)\n', (7872, 7876), True, 'import numpy as np\n'), ((1614, 1631), 'Crypto.Util.Padding.pad', 'pad', (['password', '(16)'], {}), '(password, 16)\n', (1617, 1631), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((3801, 3823), 'Crypto.Signature.pkcs1_15.new', 'pkcs1_15.new', (['self.key'], {}), '(self.key)\n', (3813, 3823), False, 'from Crypto.Signature import pkcs1_15\n'), ((7973, 7983), 'numpy.abs', 'np.abs', (['tt'], {}), '(tt)\n', (7979, 7983), True, 'import numpy as np\n'), ((8531, 8549), 'numpy.angle', 'np.angle', (['nn', '(True)'], {}), '(nn, True)\n', (8539, 8549), True, 'import numpy as np\n'), ((9199, 9209), 'numpy.abs', 'np.abs', (['cl'], {}), '(cl)\n', (9205, 9209), True, 'import numpy as np\n'), ((9223, 9233), 'numpy.abs', 'np.abs', (['cr'], {}), '(cr)\n', (9229, 9233), True, 'import numpy as np\n'), ((3362, 3384), 'Crypto.Signature.pkcs1_15.new', 'pkcs1_15.new', (['self.key'], {}), '(self.key)\n', (3374, 3384), False, 'from Crypto.Signature import pkcs1_15\n')] |
# Copyright (c) 2021 <NAME>
from pylib_sakata import init as init
# uncomment the follows when the file is executed in a Python console.
# init.close_all()
# init.clear_all()
import os
import shutil
import numpy as np
from control import matlab
from pylib_sakata import ctrl
from pylib_sakata import plot
print('Start simulation!')
# Common parameters
figurefolderName = 'figure_exercise_07'
if os.path.exists(figurefolderName):
shutil.rmtree(figurefolderName)
os.makedirs(figurefolderName)
dataNum = 10000
freqrange = [10, 10000]
freq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum, base=10)
print('Common parameters were set.')
# Plant model
L = 0.1
R = 10.0
M = 2.0
C = 10.0
K = 0
Kt = 1.0
Pi = ctrl.tf([1], [L, R])
Ps = ctrl.tf([1], [M, C, K])
Pi_frd = ctrl.sys2frd(Pi, freq)
Ps_frd = ctrl.sys2frd(Ps, freq)
print('Plant model was set.')
# Design current PI controller
freqC = 500.0
zetaC = 1.0
Ci = ctrl.pi(freqC, zetaC, L, R)
Ci_frd = ctrl.sys2frd(Ci, freq)
print('Current PI controller was designed.')
# Design position PID controller
freq1 = 100.0
zeta1 = 1.0
freq2 = 100.0
zeta2 = 1.0
Cs = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K)
Cs_frd = ctrl.sys2frd(Cs, freq)
print('Position PID controller was designed.')
print('Frequency response analysis is running...')
Si = ctrl.feedback(Pi, Ci, sys='S')
Ti = ctrl.feedback(Pi, Ci, sys='T')
Ss = ctrl.feedback(Ps, Cs*Ti*Kt, sys='S')
Ts = ctrl.feedback(Ps, Cs*Ti*Kt, sys='T')
Gi_frd = Pi_frd * Ci_frd
Si_frd = 1/(1 + Gi_frd)
Ti_frd = 1 - Si_frd
Gs_frd = Ps_frd * Cs_frd * Ti_frd
Ss_frd = 1/(1 + Gs_frd)
Ts_frd = 1 - Ss_frd
print('Time response analysis is running...')
ti = np.linspace(0.0, 5.0e-3, dataNum)
ri = np.ones(len(ti))
yi, touti, xout = matlab.lsim(Ti, ri, ti)
ei, touti, xout = matlab.lsim(Si, ri, ti)
ui, touti, xout = matlab.lsim(Ci, ei, ti)
tp = np.linspace(0.0, 0.05, dataNum)
rp = np.ones(len(tp))
yp, toutp, xout = matlab.lsim(Ts, rp, tp)
ep, toutp, xout = matlab.lsim(Ss, rp, tp)
up, toutp, xout = matlab.lsim(Cs, ep, tp)
print('Plotting figures...')
# Time response of current
fig = plot.makefig()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
plot.plot_xy(ax1, ti, yi, '-', 'b', 1.5, 1.0, [0, max(ti)], ylabel='Current [A]', legend=['y'], title='Time response of current')
plot.plot_xy(ax2, ti, ui, '-', 'b', 1.5, 1.0, [0, max(ti)], xlabel='Time [s]', ylabel='Voltage [V]', legend=['u'])
plot.savefig(figurefolderName+'/time_resp_i.png')
# Time response of position
fig = plot.makefig()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
plot.plot_xy(ax1, tp, yp, '-', 'b', 1.5, 1.0, [0, max(tp)], ylabel='Position [m]', legend=['y'], title='Time response of position')
plot.plot_xy(ax2, tp, up, '-', 'b', 1.5, 1.0, [0, max(tp)], xlabel='Time [s]', ylabel='Force [N]', legend=['u'])
plot.savefig(figurefolderName+'/time_resp_p.png')
# Sensitivity function of current
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Si_frd, '-', 'b', 1.5, 1.0, freqrange, title='Bode diagram of current loop')
plot.plot_tffrd(ax_mag, ax_phase, Ti_frd, '-', 'r', 1.5, 1.0, freqrange, magrange=[-50, 10], legend=['S', 'T'])
plot.savefig(figurefolderName+'/freq_ST_i.png')
# Sensitivity function of position
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Ss_frd, '-', 'b', 1.5, 1.0, freqrange, title='Bode diagram of position loop')
plot.plot_tffrd(ax_mag, ax_phase, Ts_frd, '-', 'r', 1.5, 1.0, freqrange, magrange=[-50, 10], legend=['S', 'T'])
plot.savefig(figurefolderName+'/freq_ST_p.png')
# Nyquist of current
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gi_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram of current loop')
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName+'/nyquist_i.png')
# Nyquist of position
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gs_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram of position loop')
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName+'/nyquist_p.png')
print('Finished.')
| [
"pylib_sakata.plot.plot_nyquist_assistline",
"os.path.exists",
"numpy.log10",
"pylib_sakata.ctrl.pid",
"os.makedirs",
"pylib_sakata.ctrl.pi",
"pylib_sakata.plot.plot_nyquist",
"pylib_sakata.plot.savefig",
"pylib_sakata.ctrl.sys2frd",
"shutil.rmtree",
"numpy.linspace",
"pylib_sakata.ctrl.feedba... | [((400, 432), 'os.path.exists', 'os.path.exists', (['figurefolderName'], {}), '(figurefolderName)\n', (414, 432), False, 'import os\n'), ((470, 499), 'os.makedirs', 'os.makedirs', (['figurefolderName'], {}), '(figurefolderName)\n', (481, 499), False, 'import os\n'), ((731, 751), 'pylib_sakata.ctrl.tf', 'ctrl.tf', (['[1]', '[L, R]'], {}), '([1], [L, R])\n', (738, 751), False, 'from pylib_sakata import ctrl\n'), ((757, 780), 'pylib_sakata.ctrl.tf', 'ctrl.tf', (['[1]', '[M, C, K]'], {}), '([1], [M, C, K])\n', (764, 780), False, 'from pylib_sakata import ctrl\n'), ((790, 812), 'pylib_sakata.ctrl.sys2frd', 'ctrl.sys2frd', (['Pi', 'freq'], {}), '(Pi, freq)\n', (802, 812), False, 'from pylib_sakata import ctrl\n'), ((822, 844), 'pylib_sakata.ctrl.sys2frd', 'ctrl.sys2frd', (['Ps', 'freq'], {}), '(Ps, freq)\n', (834, 844), False, 'from pylib_sakata import ctrl\n'), ((938, 965), 'pylib_sakata.ctrl.pi', 'ctrl.pi', (['freqC', 'zetaC', 'L', 'R'], {}), '(freqC, zetaC, L, R)\n', (945, 965), False, 'from pylib_sakata import ctrl\n'), ((975, 997), 'pylib_sakata.ctrl.sys2frd', 'ctrl.sys2frd', (['Ci', 'freq'], {}), '(Ci, freq)\n', (987, 997), False, 'from pylib_sakata import ctrl\n'), ((1134, 1179), 'pylib_sakata.ctrl.pid', 'ctrl.pid', (['freq1', 'zeta1', 'freq2', 'zeta2', 'M', 'C', 'K'], {}), '(freq1, zeta1, freq2, zeta2, M, C, K)\n', (1142, 1179), False, 'from pylib_sakata import ctrl\n'), ((1189, 1211), 'pylib_sakata.ctrl.sys2frd', 'ctrl.sys2frd', (['Cs', 'freq'], {}), '(Cs, freq)\n', (1201, 1211), False, 'from pylib_sakata import ctrl\n'), ((1316, 1346), 'pylib_sakata.ctrl.feedback', 'ctrl.feedback', (['Pi', 'Ci'], {'sys': '"""S"""'}), "(Pi, Ci, sys='S')\n", (1329, 1346), False, 'from pylib_sakata import ctrl\n'), ((1352, 1382), 'pylib_sakata.ctrl.feedback', 'ctrl.feedback', (['Pi', 'Ci'], {'sys': '"""T"""'}), "(Pi, Ci, sys='T')\n", (1365, 1382), False, 'from pylib_sakata import ctrl\n'), ((1389, 1429), 'pylib_sakata.ctrl.feedback', 'ctrl.feedback', (['Ps', '(Cs * Ti * Kt)'], {'sys': '"""S"""'}), "(Ps, Cs * Ti * Kt, sys='S')\n", (1402, 1429), False, 'from pylib_sakata import ctrl\n'), ((1431, 1471), 'pylib_sakata.ctrl.feedback', 'ctrl.feedback', (['Ps', '(Cs * Ti * Kt)'], {'sys': '"""T"""'}), "(Ps, Cs * Ti * Kt, sys='T')\n", (1444, 1471), False, 'from pylib_sakata import ctrl\n'), ((1669, 1701), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.005)', 'dataNum'], {}), '(0.0, 0.005, dataNum)\n', (1680, 1701), True, 'import numpy as np\n'), ((1743, 1766), 'control.matlab.lsim', 'matlab.lsim', (['Ti', 'ri', 'ti'], {}), '(Ti, ri, ti)\n', (1754, 1766), False, 'from control import matlab\n'), ((1785, 1808), 'control.matlab.lsim', 'matlab.lsim', (['Si', 'ri', 'ti'], {}), '(Si, ri, ti)\n', (1796, 1808), False, 'from control import matlab\n'), ((1827, 1850), 'control.matlab.lsim', 'matlab.lsim', (['Ci', 'ei', 'ti'], {}), '(Ci, ei, ti)\n', (1838, 1850), False, 'from control import matlab\n'), ((1857, 1888), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.05)', 'dataNum'], {}), '(0.0, 0.05, dataNum)\n', (1868, 1888), True, 'import numpy as np\n'), ((1929, 1952), 'control.matlab.lsim', 'matlab.lsim', (['Ts', 'rp', 'tp'], {}), '(Ts, rp, tp)\n', (1940, 1952), False, 'from control import matlab\n'), ((1971, 1994), 'control.matlab.lsim', 'matlab.lsim', (['Ss', 'rp', 'tp'], {}), '(Ss, rp, tp)\n', (1982, 1994), False, 'from control import matlab\n'), ((2013, 2036), 'control.matlab.lsim', 'matlab.lsim', (['Cs', 'ep', 'tp'], {}), '(Cs, ep, tp)\n', (2024, 2036), False, 'from control import matlab\n'), ((2100, 2114), 'pylib_sakata.plot.makefig', 'plot.makefig', ([], {}), '()\n', (2112, 2114), False, 'from pylib_sakata import plot\n'), ((2414, 2465), 'pylib_sakata.plot.savefig', 'plot.savefig', (["(figurefolderName + '/time_resp_i.png')"], {}), "(figurefolderName + '/time_resp_i.png')\n", (2426, 2465), False, 'from pylib_sakata import plot\n'), ((2499, 2513), 'pylib_sakata.plot.makefig', 'plot.makefig', ([], {}), '()\n', (2511, 2513), False, 'from pylib_sakata import plot\n'), ((2813, 2864), 'pylib_sakata.plot.savefig', 'plot.savefig', (["(figurefolderName + '/time_resp_p.png')"], {}), "(figurefolderName + '/time_resp_p.png')\n", (2825, 2864), False, 'from pylib_sakata import plot\n'), ((2904, 2918), 'pylib_sakata.plot.makefig', 'plot.makefig', ([], {}), '()\n', (2916, 2918), False, 'from pylib_sakata import plot\n'), ((2981, 3095), 'pylib_sakata.plot.plot_tffrd', 'plot.plot_tffrd', (['ax_mag', 'ax_phase', 'Si_frd', '"""-"""', '"""b"""', '(1.5)', '(1.0)', 'freqrange'], {'title': '"""Bode diagram of current loop"""'}), "(ax_mag, ax_phase, Si_frd, '-', 'b', 1.5, 1.0, freqrange,\n title='Bode diagram of current loop')\n", (2996, 3095), False, 'from pylib_sakata import plot\n'), ((3092, 3207), 'pylib_sakata.plot.plot_tffrd', 'plot.plot_tffrd', (['ax_mag', 'ax_phase', 'Ti_frd', '"""-"""', '"""r"""', '(1.5)', '(1.0)', 'freqrange'], {'magrange': '[-50, 10]', 'legend': "['S', 'T']"}), "(ax_mag, ax_phase, Ti_frd, '-', 'r', 1.5, 1.0, freqrange,\n magrange=[-50, 10], legend=['S', 'T'])\n", (3107, 3207), False, 'from pylib_sakata import plot\n'), ((3204, 3253), 'pylib_sakata.plot.savefig', 'plot.savefig', (["(figurefolderName + '/freq_ST_i.png')"], {}), "(figurefolderName + '/freq_ST_i.png')\n", (3216, 3253), False, 'from pylib_sakata import plot\n'), ((3294, 3308), 'pylib_sakata.plot.makefig', 'plot.makefig', ([], {}), '()\n', (3306, 3308), False, 'from pylib_sakata import plot\n'), ((3371, 3486), 'pylib_sakata.plot.plot_tffrd', 'plot.plot_tffrd', (['ax_mag', 'ax_phase', 'Ss_frd', '"""-"""', '"""b"""', '(1.5)', '(1.0)', 'freqrange'], {'title': '"""Bode diagram of position loop"""'}), "(ax_mag, ax_phase, Ss_frd, '-', 'b', 1.5, 1.0, freqrange,\n title='Bode diagram of position loop')\n", (3386, 3486), False, 'from pylib_sakata import plot\n'), ((3483, 3598), 'pylib_sakata.plot.plot_tffrd', 'plot.plot_tffrd', (['ax_mag', 'ax_phase', 'Ts_frd', '"""-"""', '"""r"""', '(1.5)', '(1.0)', 'freqrange'], {'magrange': '[-50, 10]', 'legend': "['S', 'T']"}), "(ax_mag, ax_phase, Ts_frd, '-', 'r', 1.5, 1.0, freqrange,\n magrange=[-50, 10], legend=['S', 'T'])\n", (3498, 3598), False, 'from pylib_sakata import plot\n'), ((3595, 3644), 'pylib_sakata.plot.savefig', 'plot.savefig', (["(figurefolderName + '/freq_ST_p.png')"], {}), "(figurefolderName + '/freq_ST_p.png')\n", (3607, 3644), False, 'from pylib_sakata import plot\n'), ((3671, 3685), 'pylib_sakata.plot.makefig', 'plot.makefig', ([], {}), '()\n', (3683, 3685), False, 'from pylib_sakata import plot\n'), ((3712, 3807), 'pylib_sakata.plot.plot_nyquist', 'plot.plot_nyquist', (['ax', 'Gi_frd', '"""-"""', '"""b"""', '(1.5)', '(1.0)'], {'title': '"""Nyquist Diagram of current loop"""'}), "(ax, Gi_frd, '-', 'b', 1.5, 1.0, title=\n 'Nyquist Diagram of current loop')\n", (3729, 3807), False, 'from pylib_sakata import plot\n'), ((3803, 3835), 'pylib_sakata.plot.plot_nyquist_assistline', 'plot.plot_nyquist_assistline', (['ax'], {}), '(ax)\n', (3831, 3835), False, 'from pylib_sakata import plot\n'), ((3836, 3885), 'pylib_sakata.plot.savefig', 'plot.savefig', (["(figurefolderName + '/nyquist_i.png')"], {}), "(figurefolderName + '/nyquist_i.png')\n", (3848, 3885), False, 'from pylib_sakata import plot\n'), ((3913, 3927), 'pylib_sakata.plot.makefig', 'plot.makefig', ([], {}), '()\n', (3925, 3927), False, 'from pylib_sakata import plot\n'), ((3954, 4050), 'pylib_sakata.plot.plot_nyquist', 'plot.plot_nyquist', (['ax', 'Gs_frd', '"""-"""', '"""b"""', '(1.5)', '(1.0)'], {'title': '"""Nyquist Diagram of position loop"""'}), "(ax, Gs_frd, '-', 'b', 1.5, 1.0, title=\n 'Nyquist Diagram of position loop')\n", (3971, 4050), False, 'from pylib_sakata import plot\n'), ((4046, 4078), 'pylib_sakata.plot.plot_nyquist_assistline', 'plot.plot_nyquist_assistline', (['ax'], {}), '(ax)\n', (4074, 4078), False, 'from pylib_sakata import plot\n'), ((4079, 4128), 'pylib_sakata.plot.savefig', 'plot.savefig', (["(figurefolderName + '/nyquist_p.png')"], {}), "(figurefolderName + '/nyquist_p.png')\n", (4091, 4128), False, 'from pylib_sakata import plot\n'), ((438, 469), 'shutil.rmtree', 'shutil.rmtree', (['figurefolderName'], {}), '(figurefolderName)\n', (451, 469), False, 'import shutil\n'), ((559, 581), 'numpy.log10', 'np.log10', (['freqrange[0]'], {}), '(freqrange[0])\n', (567, 581), True, 'import numpy as np\n'), ((583, 605), 'numpy.log10', 'np.log10', (['freqrange[1]'], {}), '(freqrange[1])\n', (591, 605), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import helper
import project_helper
import project_tests
# Compute the Highs and Lows in a Window
def get_high_lows_lookback(high, low, lookback_days):
"""
Get the highs and lows in a lookback window.
Parameters
----------
high : DataFrame
High price for each ticker and date
low : DataFrame
Low price for each ticker and date
lookback_days : int
The number of days to look back
Returns
-------
lookback_high : DataFrame
Lookback high price for each ticker and date
lookback_low : DataFrame
Lookback low price for each ticker and date
"""
# getting max price for high prices excluding present day
lookback_high = high.rolling(window=lookback_days).max().shift()
# getting min price for low prices excluding present day
lookback_low = low.rolling(window=lookback_days).min().shift()
return lookback_high, lookback_low
# Compute Long and Short Signals
def get_long_short(close, lookback_high, lookback_low):
"""
Generate the signals long, short, and do nothing.
Parameters
----------
close : DataFrame
Close price for each ticker and date
lookback_high : DataFrame
Lookback high price for each ticker and date
lookback_low : DataFrame
Lookback low price for each ticker and date
Returns
-------
long_short : DataFrame
The long, short, and do nothing signals for each ticker and date
"""
# creating signal dataframe with similar datetime as index
signal_df = pd.DataFrame(columns=close.columns, index=close.index)
# getting the row and column length of the df
row_len = close.shape[0]
col_len = close.shape[1]
# looping through matching datasets for signaling
for row in range(0, row_len):
for col in range(0, col_len):
if lookback_low.iloc[row][col] > close.iloc[row][col]:
signal_df.iloc[row][col] = -1
elif lookback_high.iloc[row][col] < close.iloc[row][col]:
signal_df.iloc[row][col] = 1
else:
signal_df.iloc[row][col] = 0
# Converting to int64 datatype
for index, row in signal_df.iterrows():
signal_df.loc[index] = signal_df.loc[index].astype('int64')
return signal_df
# Remove unnecessary signals
def clear_signals(signals, window_size):
"""
Clear out signals in a Series of just long or short signals.
Remove the number of signals down to 1 within the window size time period.
Parameters
----------
signals : Pandas Series
The long, short, or do nothing signals
window_size : int
The number of days to have a single signal
Returns
-------
signals : Pandas Series
Signals with the signals removed from the window size
"""
# Start with buffer of window size
# This handles the edge case of calculating past_signal in the beginning
clean_signals = [0]*window_size
for signal_i, current_signal in enumerate(signals):
# Check if there was a signal in the past window_size of days
has_past_signal = bool(sum(clean_signals[signal_i:signal_i+window_size]))
# Use the current signal if there's no past signal, else 0/False
clean_signals.append(not has_past_signal and current_signal)
# Remove buffer
clean_signals = clean_signals[window_size:]
# Return the signals as a Series of Ints
return pd.Series(np.array(clean_signals).astype(np.int), signals.index)
# Filter required signals
def filter_signals(signal, lookahead_days):
"""
Filter out signals in a DataFrame.
Parameters
----------
signal : DataFrame
The long, short, and do nothing signals for each ticker and date
lookahead_days : int
The number of days to look ahead
Returns
-------
filtered_signal : DataFrame
The filtered long, short, and do nothing signals for each ticker and date
"""
# getting signal values for columns and rows
col_values = signal.columns.values
index_values = signal.index.values
# getting the short and long dfs
short_df = pd.DataFrame([], columns=col_values , index=index_values)
long_df = pd.DataFrame([], columns=col_values , index=index_values)
# iterating through the df, comparing the values of each signal checking and indicating if there is signal(1, -1) or not(0)
for (idx_l,col_l),(idx_s, col_s),(idx_sig, col_sig) in zip(long_df.iterrows(), short_df.iterrows(), signal.iterrows()):
for value in col_values:
if col_sig[value] == -1:
col_s[value] =-1
else :
col_s[value] = 0
if col_sig[value] == 1:
col_l[value] =1
else :
col_l[value] = 0
# filtering the df for the number of lookahead days and apply the clear_signals function to each column
# returning a function from another function with lambda (functional programming)
filtered_long = long_df.apply(lambda x: clear_signals(x,lookahead_days), axis=0)
filtered_short = short_df.apply(lambda x: clear_signals(x,lookahead_days), axis=0)
# adding the 2 dfs to obtain 1 df to be returned
return filtered_long.add(filtered_short)
# Get Lookahead Close Prices
def get_lookahead_prices(close, lookahead_days):
"""
Get the lookahead prices for `lookahead_days` number of days.
Parameters
----------
close : DataFrame
Close price for each ticker and date
lookahead_days : int
The number of days to look ahead
Returns
-------
lookahead_prices : DataFrame
The lookahead prices for each ticker and date
"""
return close.shift(-lookahead_days)
| [
"pandas.DataFrame",
"numpy.array"
] | [((1674, 1728), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'close.columns', 'index': 'close.index'}), '(columns=close.columns, index=close.index)\n', (1686, 1728), True, 'import pandas as pd\n'), ((4434, 4490), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': 'col_values', 'index': 'index_values'}), '([], columns=col_values, index=index_values)\n', (4446, 4490), True, 'import pandas as pd\n'), ((4507, 4563), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': 'col_values', 'index': 'index_values'}), '([], columns=col_values, index=index_values)\n', (4519, 4563), True, 'import pandas as pd\n'), ((3699, 3722), 'numpy.array', 'np.array', (['clean_signals'], {}), '(clean_signals)\n', (3707, 3722), True, 'import numpy as np\n')] |
"""
The lidar system, data and fit (1 of 2 datasets)
================================================
Generate a chart of the data fitted by Gaussian curve
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
def model(t, coeffs):
return coeffs[0] + coeffs[1] * np.exp(- ((t-coeffs[2])/coeffs[3])**2)
def residuals(coeffs, y, t):
return y - model(t, coeffs)
waveform_1 = np.load('waveform_1.npy')
t = np.arange(len(waveform_1))
x0 = np.array([3, 30, 15, 1], dtype=float)
x, flag = leastsq(residuals, x0, args=(waveform_1, t))
print(x)
fig, ax = plt.subplots(figsize=(8, 6))
plt.plot(t, waveform_1, t, model(t, x))
plt.xlabel('Time [ns]')
plt.ylabel('Amplitude [bins]')
plt.legend(['Waveform', 'Model'])
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"numpy.array",
"scipy.optimize.leastsq",
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((424, 449), 'numpy.load', 'np.load', (['"""waveform_1.npy"""'], {}), "('waveform_1.npy')\n", (431, 449), True, 'import numpy as np\n'), ((487, 524), 'numpy.array', 'np.array', (['[3, 30, 15, 1]'], {'dtype': 'float'}), '([3, 30, 15, 1], dtype=float)\n', (495, 524), True, 'import numpy as np\n'), ((535, 579), 'scipy.optimize.leastsq', 'leastsq', (['residuals', 'x0'], {'args': '(waveform_1, t)'}), '(residuals, x0, args=(waveform_1, t))\n', (542, 579), False, 'from scipy.optimize import leastsq\n'), ((601, 629), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (613, 629), True, 'import matplotlib.pyplot as plt\n'), ((670, 693), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [ns]"""'], {}), "('Time [ns]')\n", (680, 693), True, 'import matplotlib.pyplot as plt\n'), ((694, 724), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude [bins]"""'], {}), "('Amplitude [bins]')\n", (704, 724), True, 'import matplotlib.pyplot as plt\n'), ((725, 758), 'matplotlib.pyplot.legend', 'plt.legend', (["['Waveform', 'Model']"], {}), "(['Waveform', 'Model'])\n", (735, 758), True, 'import matplotlib.pyplot as plt\n'), ((759, 769), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (767, 769), True, 'import matplotlib.pyplot as plt\n'), ((307, 350), 'numpy.exp', 'np.exp', (['(-((t - coeffs[2]) / coeffs[3]) ** 2)'], {}), '(-((t - coeffs[2]) / coeffs[3]) ** 2)\n', (313, 350), True, 'import numpy as np\n')] |
from __future__ import annotations
import itertools
import logging
import math
import re
from collections import Counter
from itertools import combinations, starmap
from typing import Dict, Iterable, List, TextIO, Tuple
import networkx as nx
import numpy as np
import numpy.typing as npt
from networkx.drawing.nx_pydot import write_dot
from ..cli import run_with_file_argument
from ..io_utils import read_line
logger = logging.getLogger(__name__)
HEADER_PATTERN = re.compile(r"^\-\-\-\sscanner\s\d+\s\-\-\-$")
def read_beacons(input: TextIO) -> Iterable[npt.NDArray]:
while True:
header = read_line(input)
if not header:
break
beacons: List[Tuple[int, int, int]] = []
assert HEADER_PATTERN.match(header) is not None
while line := read_line(input):
x, y, z = map(int, line.split(","))
beacons.append((x, y, z))
yield np.array(beacons)
def distance(a: npt.NDArray[int], b: npt.NDArray[int]) -> float:
dist: float = np.linalg.norm(a - b)
return dist
def get_edges(beacons: npt.NDArray[int]) -> Dict[float, Tuple[int, int]]:
enumerated_beacons = enumerate(beacons)
result: Dict[float, Tuple[int, int]] = {}
for (a_idx, a_beacon), (b_idx, b_beacon) in combinations(enumerated_beacons, 2):
dist = distance(a_beacon, b_beacon)
assert dist not in result
result[dist] = a_idx, b_idx
return result
def resolve_scanner(
source_beacons: npt.NDArray[int], target_beacons: npt.NDArray[int]
) -> Tuple[npt.NDArray[int], npt.NDArray[int]]:
# find common edges
source_edges = get_edges(source_beacons)
target_edges = get_edges(target_beacons)
common_edges = set(source_edges) & set(target_edges)
# Now pick 2 nodes at random, then one more and find their equivalents
first_edge, second_edge, *_ = common_edges
source_node_a, source_node_b = source_edges[first_edge]
other_source_nodes = source_edges[
second_edge
] # at least one is guaranteed to be neither a nor b
source_node_c, *_ = set(other_source_nodes) - {source_node_a, source_node_b}
source_a_to_b = distance(
source_beacons[source_node_a], source_beacons[source_node_b]
)
source_a_to_c = distance(
source_beacons[source_node_a], source_beacons[source_node_c]
)
source_b_to_c = distance(
source_beacons[source_node_b], source_beacons[source_node_c]
)
target_nodes_a_or_b = target_edges[first_edge]
target_nodes_c_or_d = target_edges[second_edge]
assert (
distance(
target_beacons[target_nodes_a_or_b[0]],
target_beacons[target_nodes_a_or_b[1]],
)
== source_a_to_b
)
# Figure out which nodes are which (map A, B, C from source to target)
if (
distance(
target_beacons[target_nodes_a_or_b[0]],
target_beacons[target_nodes_c_or_d[0]],
)
== source_a_to_c
):
target_node_a, target_node_b = target_nodes_a_or_b
target_node_c, target_node_d = target_nodes_c_or_d
elif (
distance(
target_beacons[target_nodes_a_or_b[1]],
target_beacons[target_nodes_c_or_d[0]],
)
== source_a_to_c
):
target_node_b, target_node_a = target_nodes_a_or_b
target_node_c, target_node_d = target_nodes_c_or_d
elif (
distance(
target_beacons[target_nodes_a_or_b[0]],
target_beacons[target_nodes_c_or_d[1]],
)
== source_a_to_c
):
target_node_a, target_node_b = target_nodes_a_or_b
target_node_d, target_node_c = target_nodes_c_or_d
else:
assert (
distance(
target_beacons[target_nodes_a_or_b[1]],
target_beacons[target_nodes_c_or_d[1]],
)
== source_a_to_c
)
target_node_b, target_node_a = target_nodes_a_or_b
target_node_d, target_node_c = target_nodes_c_or_d
# make sure that our triangle is correct
assert (
distance(target_beacons[target_node_a], target_beacons[target_node_b])
== source_a_to_b
)
assert (
distance(target_beacons[target_node_a], target_beacons[target_node_c])
== source_a_to_c
)
assert (
distance(target_beacons[target_node_b], target_beacons[target_node_c])
== source_b_to_c
)
# now figure out the coords transformation
source_a_coords = source_beacons[source_node_a]
target_a_coords = target_beacons[target_node_a]
source_b_coords = source_beacons[source_node_b]
target_b_coords = target_beacons[target_node_b]
# analyze how the coords change for a know pair of mirrored
source_vector = source_a_coords - source_b_coords
target_vector = target_a_coords - target_b_coords
# to execute the naive approach we need the translation to be unique on all axes
abs_source_vector = np.abs(source_vector)
assert len(np.unique(abs_source_vector)) == 3
abs_target_vector = np.abs(target_vector)
assert len(np.unique(abs_target_vector)) == 3
# the absolute differences should match
assert set(abs_source_vector) == set(abs_target_vector)
# now we just need to figure out which axis is which and then the scanners position
rotation_matrix = np.zeros((3, 3), dtype=int)
for source_axis, abs_source_value in enumerate(abs_source_vector):
(target_axis,) = np.where(abs_target_vector == abs_source_value)
is_negated = np.sign(source_vector[source_axis]) != np.sign(
target_vector[target_axis]
)
logger.info(
"Source axis %d (value %d) is target axis %d (value %d) %s",
source_axis,
source_vector[source_axis],
target_axis,
target_vector[target_axis],
"negated" if is_negated else "direct",
)
rotation_matrix[target_axis, source_axis] = -1 if is_negated else 1
logger.info("Rotation matrix is %s", rotation_matrix)
# make sure the our rotation matrix works
assert np.array_equal(target_vector @ rotation_matrix, source_vector)
# now figure out the scanners translation offsets
translation_matrix = source_a_coords - (target_a_coords @ rotation_matrix)
logger.info("Translation matrix is %s", translation_matrix)
# make sure that the whole rotation and translation works
assert np.array_equal(
target_a_coords @ rotation_matrix + translation_matrix, source_a_coords
)
assert np.array_equal(
target_b_coords @ rotation_matrix + translation_matrix, source_b_coords
)
# Now we can map all points from the target scanner into source scanner's coords
result: npt.NDArray[int] = target_beacons @ rotation_matrix + translation_matrix
return result, translation_matrix
def check_for_repeating_distances(scanners: List[npt.NDArray[int]]) -> None:
# First we verify that there are no repeating distances within
# each scanner's beacons - thanks to this we will be able to identify
# graph edges in an unique way.
has_repeating_distances = False
for i, beacons in enumerate(scanners):
distances = starmap(distance, combinations(beacons, 2))
counter = Counter(distances)
repeating_distances = (
count for dist, count in counter.most_common() if count > 2
)
for count in repeating_distances:
logger.info("Scanner %d repeated distance %d", i, count)
has_repeating_distances = True
logger.info("Scanner %d done", i)
assert not has_repeating_distances
def build_neighbourhood_graph(scanners: List[npt.NDArray[int]]) -> nx.Graph:
# Then we build a graph of adjacency between scanners.
# We require a fully conncted clique of size 12 to be common
# between graphs.
# We will use this adjacency graph to resolve the scanners in order.
neighbourhood_graph = nx.Graph()
for idx, _ in enumerate(scanners):
neighbourhood_graph.add_node(idx)
min_edges = math.comb(12, 2)
for (a_idx, a_beacons), (b_idx, b_beacons) in combinations(enumerate(scanners), 2):
a_edges = get_edges(a_beacons)
b_edges = get_edges(b_beacons)
common_edges = set(a_edges) & set(b_edges)
if len(common_edges) >= min_edges:
logger.info(
"Scanner %d and %d have %d common edges",
a_idx,
b_idx,
len(common_edges),
)
neighbourhood_graph.add_edge(a_idx, b_idx)
nx.nx_pydot.to_pydot(neighbourhood_graph).write_png("neighbourhood_graph.png")
return neighbourhood_graph
def traverse_and_resolve_scanners(
scanners: List[npt.NDArray[int]], neighbourhood_graph: nx.Graph
) -> npt.NDArray[int]:
scanner_positions = np.zeros((len(scanners), 3), dtype=int)
# We need to start resolving our graphs
# we consider the first scanner to be canonical
# we will do a DFS run through the neighbourhood graph, unifiying scanners
# as we go
unresolved_nodes = set(range(1, len(scanners)))
def traverse_neighbourhood(source_scanner_idx: int) -> None:
for neighbour_scanner_idx in neighbourhood_graph.neighbors(source_scanner_idx):
if neighbour_scanner_idx not in unresolved_nodes:
continue # already visited
# resolve and update this scanner
resolved_scanner, scanner_position = resolve_scanner(
scanners[source_scanner_idx], scanners[neighbour_scanner_idx]
)
# make sure that at least the required 12 points are matching
assert (
len(
set(map(tuple, resolved_scanner))
& set(map(tuple, scanners[source_scanner_idx]))
)
>= 12
)
scanners[neighbour_scanner_idx] = resolved_scanner
scanner_positions[neighbour_scanner_idx] = scanner_position
unresolved_nodes.remove(neighbour_scanner_idx)
logger.info("Resolved scanner %d", neighbour_scanner_idx)
traverse_neighbourhood(neighbour_scanner_idx)
# Resolve all scanners
traverse_neighbourhood(0)
return scanner_positions
def main(input: TextIO) -> str:
scanners = list(read_beacons(input))
check_for_repeating_distances(scanners)
neighbourhood_graph = build_neighbourhood_graph(scanners)
traverse_and_resolve_scanners(scanners, neighbourhood_graph)
# Now all beacons are in the same dimension space
# So we can just see how many unique points we have
all_beacons = set(map(tuple, itertools.chain.from_iterable(scanners)))
number_of_beacons = len(all_beacons)
logger.info("Unique beacons %d", number_of_beacons)
return f"{number_of_beacons}"
if __name__ == "__main__":
run_with_file_argument(main)
| [
"logging.getLogger",
"numpy.abs",
"numpy.unique",
"re.compile",
"numpy.where",
"networkx.Graph",
"itertools.combinations",
"collections.Counter",
"numpy.zeros",
"numpy.array",
"numpy.array_equal",
"numpy.sign",
"itertools.chain.from_iterable",
"numpy.linalg.norm",
"networkx.nx_pydot.to_p... | [((423, 450), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (440, 450), False, 'import logging\n'), ((469, 523), 're.compile', 're.compile', (['"""^\\\\-\\\\-\\\\-\\\\sscanner\\\\s\\\\d+\\\\s\\\\-\\\\-\\\\-$"""'], {}), "('^\\\\-\\\\-\\\\-\\\\sscanner\\\\s\\\\d+\\\\s\\\\-\\\\-\\\\-$')\n", (479, 523), False, 'import re\n'), ((1014, 1035), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (1028, 1035), True, 'import numpy as np\n'), ((1266, 1301), 'itertools.combinations', 'combinations', (['enumerated_beacons', '(2)'], {}), '(enumerated_beacons, 2)\n', (1278, 1301), False, 'from itertools import combinations, starmap\n'), ((4963, 4984), 'numpy.abs', 'np.abs', (['source_vector'], {}), '(source_vector)\n', (4969, 4984), True, 'import numpy as np\n'), ((5059, 5080), 'numpy.abs', 'np.abs', (['target_vector'], {}), '(target_vector)\n', (5065, 5080), True, 'import numpy as np\n'), ((5347, 5374), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'int'}), '((3, 3), dtype=int)\n', (5355, 5374), True, 'import numpy as np\n'), ((6114, 6176), 'numpy.array_equal', 'np.array_equal', (['(target_vector @ rotation_matrix)', 'source_vector'], {}), '(target_vector @ rotation_matrix, source_vector)\n', (6128, 6176), True, 'import numpy as np\n'), ((6449, 6540), 'numpy.array_equal', 'np.array_equal', (['(target_a_coords @ rotation_matrix + translation_matrix)', 'source_a_coords'], {}), '(target_a_coords @ rotation_matrix + translation_matrix,\n source_a_coords)\n', (6463, 6540), True, 'import numpy as np\n'), ((6562, 6653), 'numpy.array_equal', 'np.array_equal', (['(target_b_coords @ rotation_matrix + translation_matrix)', 'source_b_coords'], {}), '(target_b_coords @ rotation_matrix + translation_matrix,\n source_b_coords)\n', (6576, 6653), True, 'import numpy as np\n'), ((7983, 7993), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (7991, 7993), True, 'import networkx as nx\n'), ((8092, 8108), 'math.comb', 'math.comb', (['(12)', '(2)'], {}), '(12, 2)\n', (8101, 8108), False, 'import math\n'), ((5471, 5518), 'numpy.where', 'np.where', (['(abs_target_vector == abs_source_value)'], {}), '(abs_target_vector == abs_source_value)\n', (5479, 5518), True, 'import numpy as np\n'), ((7291, 7309), 'collections.Counter', 'Counter', (['distances'], {}), '(distances)\n', (7298, 7309), False, 'from collections import Counter\n'), ((911, 928), 'numpy.array', 'np.array', (['beacons'], {}), '(beacons)\n', (919, 928), True, 'import numpy as np\n'), ((5000, 5028), 'numpy.unique', 'np.unique', (['abs_source_vector'], {}), '(abs_source_vector)\n', (5009, 5028), True, 'import numpy as np\n'), ((5096, 5124), 'numpy.unique', 'np.unique', (['abs_target_vector'], {}), '(abs_target_vector)\n', (5105, 5124), True, 'import numpy as np\n'), ((5540, 5575), 'numpy.sign', 'np.sign', (['source_vector[source_axis]'], {}), '(source_vector[source_axis])\n', (5547, 5575), True, 'import numpy as np\n'), ((5579, 5614), 'numpy.sign', 'np.sign', (['target_vector[target_axis]'], {}), '(target_vector[target_axis])\n', (5586, 5614), True, 'import numpy as np\n'), ((7247, 7271), 'itertools.combinations', 'combinations', (['beacons', '(2)'], {}), '(beacons, 2)\n', (7259, 7271), False, 'from itertools import combinations, starmap\n'), ((8607, 8648), 'networkx.nx_pydot.to_pydot', 'nx.nx_pydot.to_pydot', (['neighbourhood_graph'], {}), '(neighbourhood_graph)\n', (8627, 8648), True, 'import networkx as nx\n'), ((10715, 10754), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['scanners'], {}), '(scanners)\n', (10744, 10754), False, 'import itertools\n')] |
import csv
path = "/home/ubuntu/data_set_5/"
csv_binary = "driving_log.csv"
lines = []
#read in the csv file
with open(path + csv_binary) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
import cv2
import numpy as np
from sklearn.utils import shuffle
def generator(samples, batch_size=32):
num_samples = len(samples)
correction = 0.2
while True: # loop forever
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batches = samples[offset:offset+batch_size]
images = []
measurements = []
for b in batches:
if abs(float(b[3])) < 0.15:
continue
center_image_source = path + 'IMG/'+b[0].split('/')[-1]
center_image = cv2.imread(center_image_source)
center_measurement = float(b[3])
images.append(center_image)
measurements.append(center_measurement)
left_image_source = path + 'IMG/'+b[1].split('/')[-1]
left_image = cv2.imread(left_image_source)
left_measurement = center_measurement + correction
images.append(left_image)
measurements.append(left_measurement)
right_image_source = path + 'IMG/'+b[2].split('/')[-1]
right_image = cv2.imread(right_image_source)
right_measurement = center_measurement - correction
images.append(right_image)
measurements.append(right_measurement)
#trim image
augmented_images, augmented_measurements = [], []
for image, angle in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(angle)
augmented_images.append(cv2.flip(image, 1))
augmented_measurements.append(angle*-1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
yield shuffle(X_train,y_train)
# train using a generator
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
ch, row, col = 3, 80, 320 # trimmed format
from keras.models import Sequential
from keras.layers import Lambda, Cropping2D
from keras.layers.core import Dense, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
#nvdia network
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(3,160,320)))
model.add(Convolution2D(24,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(36,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(48,5,5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Dropout(0.5))
model.add(Dense(1))
# Adam optimizer
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch=len(train_samples),
validation_data=validation_generator, nb_val_samples=len(validation_samples),
nb_epoch=5)
print(model.summary())
model.save('model.h5')
| [
"keras.layers.core.Flatten",
"cv2.imread",
"cv2.flip",
"keras.layers.convolutional.Convolution2D",
"sklearn.model_selection.train_test_split",
"sklearn.utils.shuffle",
"keras.layers.Lambda",
"keras.models.Sequential",
"numpy.array",
"keras.layers.Cropping2D",
"keras.layers.core.Dropout",
"csv.... | [((326, 364), 'sklearn.model_selection.train_test_split', 'train_test_split', (['lines'], {'test_size': '(0.2)'}), '(lines, test_size=0.2)\n', (342, 364), False, 'from sklearn.model_selection import train_test_split\n'), ((2650, 2662), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2660, 2662), False, 'from keras.models import Sequential\n'), ((165, 184), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (175, 184), False, 'import csv\n'), ((2673, 2733), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (2679, 2733), False, 'from keras.layers import Lambda, Cropping2D\n'), ((2743, 2809), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((50, 20), (0, 0))', 'input_shape': '(3, 160, 320)'}), '(cropping=((50, 20), (0, 0)), input_shape=(3, 160, 320))\n', (2753, 2809), False, 'from keras.layers import Lambda, Cropping2D\n'), ((2817, 2877), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(24)', '(5)', '(5)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(24, 5, 5, subsample=(2, 2), activation='relu')\n", (2830, 2877), False, 'from keras.layers.convolutional import Convolution2D\n'), ((2886, 2946), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(36)', '(5)', '(5)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(36, 5, 5, subsample=(2, 2), activation='relu')\n", (2899, 2946), False, 'from keras.layers.convolutional import Convolution2D\n'), ((2955, 3015), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(48)', '(5)', '(5)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(48, 5, 5, subsample=(2, 2), activation='relu')\n", (2968, 3015), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3024, 3066), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(64, 3, 3, activation='relu')\n", (3037, 3066), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3076, 3118), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(64, 3, 3, activation='relu')\n", (3089, 3118), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3128, 3137), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (3135, 3137), False, 'from keras.layers.core import Dense, Flatten, Dropout\n'), ((3149, 3159), 'keras.layers.core.Dense', 'Dense', (['(100)'], {}), '(100)\n', (3154, 3159), False, 'from keras.layers.core import Dense, Flatten, Dropout\n'), ((3171, 3183), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3178, 3183), False, 'from keras.layers.core import Dense, Flatten, Dropout\n'), ((3195, 3204), 'keras.layers.core.Dense', 'Dense', (['(50)'], {}), '(50)\n', (3200, 3204), False, 'from keras.layers.core import Dense, Flatten, Dropout\n'), ((3216, 3228), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3223, 3228), False, 'from keras.layers.core import Dense, Flatten, Dropout\n'), ((3240, 3249), 'keras.layers.core.Dense', 'Dense', (['(10)'], {}), '(10)\n', (3245, 3249), False, 'from keras.layers.core import Dense, Flatten, Dropout\n'), ((3261, 3273), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3268, 3273), False, 'from keras.layers.core import Dense, Flatten, Dropout\n'), ((3285, 3293), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3290, 3293), False, 'from keras.layers.core import Dense, Flatten, Dropout\n'), ((562, 578), 'sklearn.utils.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (569, 578), False, 'from sklearn.utils import shuffle\n'), ((2115, 2141), 'numpy.array', 'np.array', (['augmented_images'], {}), '(augmented_images)\n', (2123, 2141), True, 'import numpy as np\n'), ((2164, 2196), 'numpy.array', 'np.array', (['augmented_measurements'], {}), '(augmented_measurements)\n', (2172, 2196), True, 'import numpy as np\n'), ((954, 985), 'cv2.imread', 'cv2.imread', (['center_image_source'], {}), '(center_image_source)\n', (964, 985), False, 'import cv2\n'), ((1235, 1264), 'cv2.imread', 'cv2.imread', (['left_image_source'], {}), '(left_image_source)\n', (1245, 1264), False, 'import cv2\n'), ((1530, 1560), 'cv2.imread', 'cv2.imread', (['right_image_source'], {}), '(right_image_source)\n', (1540, 1560), False, 'import cv2\n'), ((2215, 2240), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2222, 2240), False, 'from sklearn.utils import shuffle\n'), ((2014, 2032), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2022, 2032), False, 'import cv2\n')] |
"""
Algorithm :
Message in converted in binary values.
Binary digit 0 corresponds to an odd RGB value and binary digit 1 corresponds to an even RGB value of the image.
If binary value of the message is 1 then the sum of R, G and B values will be an even integer otherwise it will be odd.
"""
from PIL import Image
from numpy import array
import numpy as np
# taking msg as input and converting it to binary
msg = input("Enter the message to encrypt in the image: ")
bmsg = []
for i in msg:
bmsg.append(format(int(ord(i)), '08b'))
bmsg = "".join(bmsg)
print("Encrypting.........\nPlease Wait! It may take few minutes.......")
img = Image.open('test.jpg') # path of the image to encrypt (with extension)
rows, columns, channels = np.shape(img)
arr = array(img)
# function to check a number is even or not
def is_even(x):
if(x%2==0):
return 1
# converting every pixel of the image into a odd number
counter = 0
for i in range(rows):
for j in range(columns):
tmp = arr[i][j]
for k in range(3):
if counter < (len(bmsg)*3 + 24):
arr[i][j][k] = tmp[k] + 1 if is_even(tmp[k]) else tmp[k]
counter += 1
# encrypting the binary msg in the RGB values of the image by using the algorithm
counter = 0
for i in range(rows):
for j in range(columns):
tmp = arr[i][j]
if counter < len(bmsg):
arr[i][j][2] = (tmp[2] + 1 if int(bmsg[counter]) else tmp[2])
counter += 1
# forming and then saving image with encrypted RGB values
img = Image.fromarray(arr, 'RGB')
img.save('encrypted.png')
print("Done!\nThe image with encrypted message is stored in the same directory.")
| [
"numpy.array",
"numpy.shape",
"PIL.Image.open",
"PIL.Image.fromarray"
] | [((638, 660), 'PIL.Image.open', 'Image.open', (['"""test.jpg"""'], {}), "('test.jpg')\n", (648, 660), False, 'from PIL import Image\n'), ((735, 748), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (743, 748), True, 'import numpy as np\n'), ((755, 765), 'numpy.array', 'array', (['img'], {}), '(img)\n', (760, 765), False, 'from numpy import array\n'), ((1454, 1481), 'PIL.Image.fromarray', 'Image.fromarray', (['arr', '"""RGB"""'], {}), "(arr, 'RGB')\n", (1469, 1481), False, 'from PIL import Image\n')] |
import os
import sys
import argparse
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, DistributedSampler
from torchvision.transforms import functional as tfms
# import wandb
from apex import amp
import numpy as np
from numpy import array
from ignite.engine import Events, Engine, _prepare_batch
from ignite.metrics import RunningAverage
from ignite.handlers import TerminateOnNan, ModelCheckpoint, DiskSaver, global_step_from_engine
from ignite.contrib.handlers import ProgressBar
from photobooth.engines.sr_supervised import create_sr_evaluator, create_sr_trainer
from photobooth.data.datasets import DIV2K
from photobooth.models import edsr
from photobooth.models.srgan import SRDescriminator
from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90
MEAN = torch.tensor([0.4488, 0.4371, 0.4040])
DS_ROOT = '/srv/datasets/DIV2K'
def train_tfms(crop_size, mean):
def _transform(lowres, highres):
h, w, _ = lowres.shape
scaling_factor = highres.shape[0] // lowres.shape[0]
x = np.random.randint(h // crop_size)
y = np.random.randint(w // crop_size)
lr = crop_bounding_box(
lowres,
x * crop_size, y * crop_size,
crop_size, crop_size)
hr = crop_bounding_box(
highres,
x * crop_size * scaling_factor, y * crop_size * scaling_factor,
crop_size * scaling_factor, crop_size * scaling_factor)
if np.random.rand() > 0.5:
# Rotate by 90
lr = rot_90(lr)
hr = rot_90(hr)
if np.random.rand() > 0.5:
# Vertical Flip
lr = flip_vertical(lr)
hr = flip_vertical(hr)
if np.random.rand() > 0.5:
# Horizontal Flip
lr = flip_horizontal(lr)
hr = flip_horizontal(hr)
# Normalize
lr = lr / 255. - MEAN.numpy()
hr = hr / 255. - MEAN.numpy()
# To torch tensor
lr = torch.from_numpy(lr.astype('f4'))
hr = torch.from_numpy(hr.astype('f4'))
lr = lr.permute(2, 0, 1)
hr = hr.permute(2, 0, 1)
return {'lowres': lr, 'highres': hr}
return _transform
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, required=True)
parser.add_argument('--learning_rate', type=float, required=True)
parser.add_argument('--weight_decay', type=float, required=False, default=1e-4)
parser.add_argument('--epochs', type=int, required=True)
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--crop_size', type=int, default=48)
parser.add_argument('--state_dict', type=str, required=False)
parser.add_argument('--bootstrap', action='store_true')
parser.add_argument('--distributed', action='store_true')
parser.add_argument('--mixed_precision', action='store_true')
parser.add_argument('--local_rank', type=int)
args = parser.parse_args()
if args.distributed:
dist.init_process_group('nccl', init_method='env://')
world_size = dist.get_world_size()
world_rank = dist.get_rank()
local_rank = args.local_rank
else:
local_rank = 0
torch.cuda.set_device(local_rank)
device = torch.device('cuda')
train_ds = DIV2K(DS_ROOT, split='train', config='bicubic/x4',
transforms=train_tfms(args.crop_size, MEAN))
if args.distributed:
sampler_args = dict(num_replicas=world_size, rank=local_rank)
train_loader = DataLoader(
train_ds, batch_size=args.batch_size,
shuffle=False, num_workers=8,
sampler=(DistributedSampler(train_ds, **sampler_args) if args.distributed else None)
)
model = edsr.edsr_baseline_x4(3, 3)
checkpoint = torch.load('weights/edsr_baseline_x4_pnsr=27.36.pth', map_location='cpu')
model.load_state_dict(checkpoint)
model = model.to(device)
for p in model.parameters():
p.requires_grad_(False)
descriminator = SRDescriminator(3, 1)
descriminator = descriminator.to(device)
loss_fn = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.AdamW(descriminator.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay)
if args.mixed_precision:
(model, descriminator), optimizer = amp.initialize([model, descriminator], optimizer)
if args.distributed:
descriminator = DistributedDataParallel(descriminator, device_ids=[local_rank])
def _update_model(engine, batch):
x, y = _prepare_batch(batch, device=device, non_blocking=True)
optimizer.zero_grad()
with torch.no_grad():
fake = model(x)
real = y
x_gan = torch.cat([fake, real], dim=0)
y_gan = torch.cat([
torch.zeros(fake.size(0), 1),
torch.ones(real.size(0), 1)
]).to(device)
y_pred = descriminator(x_gan)
loss = loss_fn(y_pred, y_gan)
if args.mixed_precision:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
return loss
trainer = Engine(_update_model)
RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
ProgressBar(persist=False).attach(trainer, ['loss'])
if local_rank == 0:
checkpointer = ModelCheckpoint(
dirname='checkpoints',
filename_prefix='model',
score_name='loss',
score_function=lambda engine: engine.state.metrics['loss'],
n_saved=5,
global_step_transform=global_step_from_engine(trainer),
)
trainer.add_event_handler(
Events.COMPLETED, checkpointer,
to_save={'descriminator': descriminator if not args.distributed else descriminator.module})
trainer.run(train_loader, max_epochs=args.epochs)
| [
"torch.utils.data.DistributedSampler",
"apex.amp.scale_loss",
"numpy.random.rand",
"photobooth.transforms.rot_90",
"ignite.engine.Engine",
"apex.amp.initialize",
"torch.distributed.get_rank",
"argparse.ArgumentParser",
"photobooth.transforms.crop_bounding_box",
"ignite.engine._prepare_batch",
"i... | [((907, 944), 'torch.tensor', 'torch.tensor', (['[0.4488, 0.4371, 0.404]'], {}), '([0.4488, 0.4371, 0.404])\n', (919, 944), False, 'import torch\n'), ((2348, 2373), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2371, 2373), False, 'import argparse\n'), ((3355, 3388), 'torch.cuda.set_device', 'torch.cuda.set_device', (['local_rank'], {}), '(local_rank)\n', (3376, 3388), False, 'import torch\n'), ((3402, 3422), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3414, 3422), False, 'import torch\n'), ((3879, 3906), 'photobooth.models.edsr.edsr_baseline_x4', 'edsr.edsr_baseline_x4', (['(3)', '(3)'], {}), '(3, 3)\n', (3900, 3906), False, 'from photobooth.models import edsr\n'), ((3924, 3997), 'torch.load', 'torch.load', (['"""weights/edsr_baseline_x4_pnsr=27.36.pth"""'], {'map_location': '"""cpu"""'}), "('weights/edsr_baseline_x4_pnsr=27.36.pth', map_location='cpu')\n", (3934, 3997), False, 'import torch\n'), ((4151, 4172), 'photobooth.models.srgan.SRDescriminator', 'SRDescriminator', (['(3)', '(1)'], {}), '(3, 1)\n', (4166, 4172), False, 'from photobooth.models.srgan import SRDescriminator\n'), ((4232, 4260), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (4258, 4260), False, 'import torch\n'), ((5399, 5420), 'ignite.engine.Engine', 'Engine', (['_update_model'], {}), '(_update_model)\n', (5405, 5420), False, 'from ignite.engine import Events, Engine, _prepare_batch\n'), ((1155, 1188), 'numpy.random.randint', 'np.random.randint', (['(h // crop_size)'], {}), '(h // crop_size)\n', (1172, 1188), True, 'import numpy as np\n'), ((1201, 1234), 'numpy.random.randint', 'np.random.randint', (['(w // crop_size)'], {}), '(w // crop_size)\n', (1218, 1234), True, 'import numpy as np\n'), ((1249, 1326), 'photobooth.transforms.crop_bounding_box', 'crop_bounding_box', (['lowres', '(x * crop_size)', '(y * crop_size)', 'crop_size', 'crop_size'], {}), '(lowres, x * crop_size, y * crop_size, crop_size, crop_size)\n', (1266, 1326), False, 'from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90\n'), ((1377, 1527), 'photobooth.transforms.crop_bounding_box', 'crop_bounding_box', (['highres', '(x * crop_size * scaling_factor)', '(y * crop_size * scaling_factor)', '(crop_size * scaling_factor)', '(crop_size * scaling_factor)'], {}), '(highres, x * crop_size * scaling_factor, y * crop_size *\n scaling_factor, crop_size * scaling_factor, crop_size * scaling_factor)\n', (1394, 1527), False, 'from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90\n'), ((3146, 3199), 'torch.distributed.init_process_group', 'dist.init_process_group', (['"""nccl"""'], {'init_method': '"""env://"""'}), "('nccl', init_method='env://')\n", (3169, 3199), True, 'import torch.distributed as dist\n'), ((3221, 3242), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3240, 3242), True, 'import torch.distributed as dist\n'), ((3264, 3279), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3277, 3279), True, 'import torch.distributed as dist\n'), ((4521, 4570), 'apex.amp.initialize', 'amp.initialize', (['[model, descriminator]', 'optimizer'], {}), '([model, descriminator], optimizer)\n', (4535, 4570), False, 'from apex import amp\n'), ((4621, 4684), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (['descriminator'], {'device_ids': '[local_rank]'}), '(descriminator, device_ids=[local_rank])\n', (4644, 4684), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((4739, 4794), 'ignite.engine._prepare_batch', '_prepare_batch', (['batch'], {'device': 'device', 'non_blocking': '(True)'}), '(batch, device=device, non_blocking=True)\n', (4753, 4794), False, 'from ignite.engine import Events, Engine, _prepare_batch\n'), ((4917, 4947), 'torch.cat', 'torch.cat', (['[fake, real]'], {'dim': '(0)'}), '([fake, real], dim=0)\n', (4926, 4947), False, 'import torch\n'), ((1573, 1589), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1587, 1589), True, 'import numpy as np\n'), ((1641, 1651), 'photobooth.transforms.rot_90', 'rot_90', (['lr'], {}), '(lr)\n', (1647, 1651), False, 'from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90\n'), ((1669, 1679), 'photobooth.transforms.rot_90', 'rot_90', (['hr'], {}), '(hr)\n', (1675, 1679), False, 'from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90\n'), ((1692, 1708), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1706, 1708), True, 'import numpy as np\n'), ((1761, 1778), 'photobooth.transforms.flip_vertical', 'flip_vertical', (['lr'], {}), '(lr)\n', (1774, 1778), False, 'from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90\n'), ((1796, 1813), 'photobooth.transforms.flip_vertical', 'flip_vertical', (['hr'], {}), '(hr)\n', (1809, 1813), False, 'from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90\n'), ((1825, 1841), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1839, 1841), True, 'import numpy as np\n'), ((1896, 1915), 'photobooth.transforms.flip_horizontal', 'flip_horizontal', (['lr'], {}), '(lr)\n', (1911, 1915), False, 'from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90\n'), ((1933, 1952), 'photobooth.transforms.flip_horizontal', 'flip_horizontal', (['hr'], {}), '(hr)\n', (1948, 1952), False, 'from photobooth.transforms import flip_horizontal, flip_vertical, to_tensor, crop_bounding_box, rot_90\n'), ((4839, 4854), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4852, 4854), False, 'import torch\n'), ((5425, 5469), 'ignite.metrics.RunningAverage', 'RunningAverage', ([], {'output_transform': '(lambda x: x)'}), '(output_transform=lambda x: x)\n', (5439, 5469), False, 'from ignite.metrics import RunningAverage\n'), ((5498, 5524), 'ignite.contrib.handlers.ProgressBar', 'ProgressBar', ([], {'persist': '(False)'}), '(persist=False)\n', (5509, 5524), False, 'from ignite.contrib.handlers import ProgressBar\n'), ((3784, 3828), 'torch.utils.data.DistributedSampler', 'DistributedSampler', (['train_ds'], {}), '(train_ds, **sampler_args)\n', (3802, 3828), False, 'from torch.utils.data import DataLoader, DistributedSampler\n'), ((5209, 5240), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (5223, 5240), False, 'from apex import amp\n'), ((5848, 5880), 'ignite.handlers.global_step_from_engine', 'global_step_from_engine', (['trainer'], {}), '(trainer)\n', (5871, 5880), False, 'from ignite.handlers import TerminateOnNan, ModelCheckpoint, DiskSaver, global_step_from_engine\n')] |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import numpy.testing as npt
from reinforceflow.core import SumTree, MinTree
def test_sumtree_sum():
capacity = 100000
dataset = list(range(capacity))
dataset_actual = list(range(capacity, 2*capacity))
tree = SumTree(capacity)
for i in dataset:
tree.append(i)
for i in dataset_actual:
tree.append(i)
assert tree.sum() == sum(dataset_actual)
def test_sumtree_find_idx():
size = 100000
tree = SumTree(size)
for i in range(size):
tree.append(i)
for i in range(size):
idx = tree.find_sum_idx(i)
assert 0 <= idx < size, 'Index = %s' % idx
def test_sumtree_distribution():
priors = np.array([20000.0, 30000.0, 500.0, 49500.0, 0.0])
tree = SumTree(len(priors))
s = int(np.sum(priors))
expected_priors = priors / s
received_priors = np.zeros_like(priors)
for p in priors:
tree.append(p)
for i in range(0, s):
idx = tree.find_sum_idx(i)
received_priors[idx] += 1
received_priors = received_priors / s
npt.assert_almost_equal(expected_priors, received_priors, decimal=4)
def test_mintree_min():
capacity = 100000
dataset = list(range(capacity))
dataset_actual = list(range(capacity, 2*capacity))
tree = MinTree(capacity)
for i in dataset:
tree.append(i)
for i in dataset_actual:
tree.append(i)
assert tree.min() == min(dataset_actual)
| [
"reinforceflow.core.MinTree",
"numpy.array",
"numpy.testing.assert_almost_equal",
"numpy.sum",
"numpy.zeros_like",
"reinforceflow.core.SumTree"
] | [((355, 372), 'reinforceflow.core.SumTree', 'SumTree', (['capacity'], {}), '(capacity)\n', (362, 372), False, 'from reinforceflow.core import SumTree, MinTree\n'), ((575, 588), 'reinforceflow.core.SumTree', 'SumTree', (['size'], {}), '(size)\n', (582, 588), False, 'from reinforceflow.core import SumTree, MinTree\n'), ((798, 847), 'numpy.array', 'np.array', (['[20000.0, 30000.0, 500.0, 49500.0, 0.0]'], {}), '([20000.0, 30000.0, 500.0, 49500.0, 0.0])\n', (806, 847), True, 'import numpy as np\n'), ((963, 984), 'numpy.zeros_like', 'np.zeros_like', (['priors'], {}), '(priors)\n', (976, 984), True, 'import numpy as np\n'), ((1170, 1238), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['expected_priors', 'received_priors'], {'decimal': '(4)'}), '(expected_priors, received_priors, decimal=4)\n', (1193, 1238), True, 'import numpy.testing as npt\n'), ((1389, 1406), 'reinforceflow.core.MinTree', 'MinTree', (['capacity'], {}), '(capacity)\n', (1396, 1406), False, 'from reinforceflow.core import SumTree, MinTree\n'), ((892, 906), 'numpy.sum', 'np.sum', (['priors'], {}), '(priors)\n', (898, 906), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
pol_thres=0.7
xlsx_filename='Data_1_5.83.xlsx'
table=pd.read_excel(xlsx_filename, index_col=None, header=None)
Ex, Ey, Ez, h, X, T, n2, labs =[],[],[],[],[],[],[],[]
for i in range(9):
Ex.append(table[0+i*9][1::].values.astype(float))
Ey.append(table[1+i*9][1::].values.astype(float))
Ez.append(table[2+i*9][1::].values.astype(float))
h.append(table[3+i*9][1::].values.astype(float))
X.append(table[4+i*9][1::].values.astype(float))
T.append(table[5+i*9][1::].values.astype(float))
n2.append(table[6+i*9][1::].values.astype(float))
labs.append(table[7+i*9][1])
fig=plt.figure(figsize=(9,6))
ax=plt.axes()
colors=['r','g','b','orange','m']
labels=[r'$\alpha=-28^\circ$',r'$\alpha=-14^\circ$',r'$\alpha=0^\circ$',r'$\alpha=14^\circ$',r'$\alpha=28^\circ$']
ind=0
for i in [0,3,5,6,1]:
plt.plot(X[i],h[i],color=colors[ind],label='')
plt.plot(X[i][np.where(Ez[i]>pol_thres)[0]],h[i][np.where(Ez[i]>pol_thres)[0]],color=colors[ind],label=labels[ind],lw=4)
ind+=1
plt.legend(loc=3)
plt.xlabel('X, km')
plt.ylabel('h, km')
ax.set_xticks([-300,-200,-100,0,100,200,300])
ax.set_ylim(80,230)
ann1 = ax.annotate('', xy=(11, 80), xycoords='data',
xytext=(11, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann2 = ax.annotate('', xy=(82, 80), xycoords='data',
xytext=(82, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann3 = ax.annotate('', xy=(113, 80), xycoords='data',
xytext=(113, 90), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann4 = ax.annotate('A', Color='k', xy=(7, 90), xycoords='data',
xytext=(7-3, 92), textcoords='data')
ann5 = ax.annotate('B', Color='k', xy=(78, 90), xycoords='data',
xytext=(78-3, 92), textcoords='data')
ann4 = ax.annotate('C', Color='k', xy=(109, 90), xycoords='data',
xytext=(109-3, 92), textcoords='data')
r=40
x0=50; y0=220
dx=-r*np.cos(75.822*np.pi/180); dy=-r*np.sin(75.822*np.pi/180);
# ~ print(dx,dy)
ann_mag = ax.annotate('', xy=(x0+dx, y0+dy), xycoords='data',
xytext=(x0, y0), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_B = ax.annotate('B', Color='k', xy=(30, 200), xycoords='data',
xytext=(27,200), textcoords='data',fontsize=16,fontweight='bold')
ax.plot([-300,300],[223,223], "k--",lw=2)
ann_ns = ax.annotate('', xy=(150, 120), xycoords='data',
xytext=(300, 120), textcoords='data',
arrowprops=dict(arrowstyle="->",
ec="k",lw=2))
ann_N = ax.annotate('N', Color='k', xy=(125, 120), xycoords='data',
xytext=(132,118), textcoords='data',fontsize=16)
ann_S = ax.annotate('S', Color='k', xy=(304, 120), xycoords='data',
xytext=(305,118), textcoords='data',fontsize=16)
plt.savefig('figure1.pdf',dpi=600)
plt.savefig('figure1.png')
# ~ plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"numpy.cos",
"pandas.read_excel",
"numpy.sin",
"matplotlib.pyplot.legend"
] | [((72, 110), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (91, 110), True, 'import matplotlib.pyplot as plt\n'), ((165, 222), 'pandas.read_excel', 'pd.read_excel', (['xlsx_filename'], {'index_col': 'None', 'header': 'None'}), '(xlsx_filename, index_col=None, header=None)\n', (178, 222), True, 'import pandas as pd\n'), ((712, 738), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (722, 738), True, 'import matplotlib.pyplot as plt\n'), ((741, 751), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (749, 751), True, 'import matplotlib.pyplot as plt\n'), ((1116, 1133), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(3)'}), '(loc=3)\n', (1126, 1133), True, 'import matplotlib.pyplot as plt\n'), ((1134, 1153), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X, km"""'], {}), "('X, km')\n", (1144, 1153), True, 'import matplotlib.pyplot as plt\n'), ((1154, 1173), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h, km"""'], {}), "('h, km')\n", (1164, 1173), True, 'import matplotlib.pyplot as plt\n'), ((3147, 3182), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure1.pdf"""'], {'dpi': '(600)'}), "('figure1.pdf', dpi=600)\n", (3158, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3208), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure1.png"""'], {}), "('figure1.png')\n", (3193, 3208), True, 'import matplotlib.pyplot as plt\n'), ((933, 982), 'matplotlib.pyplot.plot', 'plt.plot', (['X[i]', 'h[i]'], {'color': 'colors[ind]', 'label': '""""""'}), "(X[i], h[i], color=colors[ind], label='')\n", (941, 982), True, 'import matplotlib.pyplot as plt\n'), ((2205, 2233), 'numpy.cos', 'np.cos', (['(75.822 * np.pi / 180)'], {}), '(75.822 * np.pi / 180)\n', (2211, 2233), True, 'import numpy as np\n'), ((2237, 2265), 'numpy.sin', 'np.sin', (['(75.822 * np.pi / 180)'], {}), '(75.822 * np.pi / 180)\n', (2243, 2265), True, 'import numpy as np\n'), ((998, 1025), 'numpy.where', 'np.where', (['(Ez[i] > pol_thres)'], {}), '(Ez[i] > pol_thres)\n', (1006, 1025), True, 'import numpy as np\n'), ((1033, 1060), 'numpy.where', 'np.where', (['(Ez[i] > pol_thres)'], {}), '(Ez[i] > pol_thres)\n', (1041, 1060), True, 'import numpy as np\n')] |
'''Functions to calculate substrate thermal noise
'''
from __future__ import division, print_function
import numpy as np
from numpy import exp, inf, pi, sqrt
import scipy.special
import scipy.integrate
from .. import const
from ..const import BESSEL_ZEROS as zeta
from ..const import J0M as j0m
def substrate_thermorefractive(f, materials, wBeam, exact=False):
"""Substrate thermal displacement noise spectrum from thermorefractive fluctuations
:f: frequency array in Hz
:materials: gwinc optic materials structure
:wBeam: beam radius (at 1 / e^2 power)
:exact: whether to use adiabatic approximation or exact calculation (False)
:returns: displacement noise power spectrum at :f:, in meters
"""
H = materials.MassThickness
kBT = const.kB * materials.Substrate.Temp
Temp = materials.Substrate.Temp
rho = materials.Substrate.MassDensity
beta = materials.Substrate.dndT
C = materials.Substrate.MassCM
kappa = materials.Substrate.MassKappa
r0 = wBeam/np.sqrt(2)
omega = 2*pi*f
if exact:
def integrand(k, om, D):
return D * k**3 * exp(-k**2 * wBeam**2/4) / (D**2 * k**4 + om**2)
inte = np.array([scipy.integrate.quad(lambda k: integrand(k, om, kappa/(rho*C)), 0, inf)[0] for om in omega])
# From P1400084 Heinert et al. Eq. 15
#psdCD = @(gamma,m,int) 2*(3/pi^7)^(1/3)*kBT*H*gamma^2*m/hbar^2*cdDens^(1/3)*int; %units are meters
psdTR = lambda int_: 2/pi * H * beta**2 * kBT * Temp / (rho*C) * int_
psd = psdTR(inte)
psd = 2/pi * H * beta**2 * kBT * Temp / (rho*C) * inte
else:
psd = 4*H*beta**2*kappa*kBT*Temp/(pi*r0**4*omega**2*(rho*C)**2)
return psd
def substrate_brownian(f, materials, wBeam):
"""Substrate thermal displacement noise spectrum due to substrate mechanical loss
:f: frequency array in Hz
:materials: gwinc optic materials structure
:wBeam: beam radius (at 1 / e^2 power)
:returns: displacement noise power spectrum at :f:, in meters
"""
Y = materials.Substrate.MirrorY
sigma = materials.Substrate.MirrorSigma
c2 = materials.Substrate.c2
n = materials.Substrate.MechanicalLossExponent
alphas = materials.Substrate.Alphas
kBT = const.kB * materials.Substrate.Temp
cftm, aftm = substrate_brownian_FiniteCorr(materials, wBeam)
# Bulk substrate contribution
phibulk = c2 * f**n
cbulk = 8 * kBT * aftm * phibulk / (2 * pi * f)
# Surface loss contribution
# csurf = alphas/(Y*pi*wBeam^2)
csurf = alphas*(1-2*sigma)/((1-sigma)*Y*pi*wBeam**2)
csurf *= 8 * kBT / (2 * pi * f)
return csurf + cbulk
def substrate_brownian_FiniteCorr(materials, wBeam):
"""Substrate brownian noise finite-size test mass correction
:materials: gwinc optic materials structure
:wBeam: beam radius (at 1 / e^2 power)
:returns: correction factors tuple:
cftm = finite mirror correction factor
aftm = amplitude coefficient for thermal noise:
thermal noise contribution to displacement noise is
S_x(f) = (8 * kB * T / (2*pi*f)) * Phi(f) * aftm
Equation references to Bondu, et al. Physics Letters A 246 (1998)
227-236 (hereafter BHV) and Liu and Thorne gr-qc/0002055 (hereafter LT)
"""
a = materials.MassRadius
h = materials.MassThickness
Y = materials.Substrate.MirrorY
sigma = materials.Substrate.MirrorSigma
# LT uses e-folding of power
r0 = wBeam / sqrt(2)
km = zeta/a
Qm = exp(-2*km*h) # LT eq. 35a
Um = (1-Qm)*(1+Qm)+4*h*km*Qm
Um = Um/((1-Qm)**2-4*(km*h)**2*Qm) # LT 53 (BHV eq. btwn 29 & 30)
x = exp(-(zeta*r0/a)**2/4)
s = sum(x/(zeta**2*j0m)) # LT 57
x2 = x*x
U0 = sum(Um*x2/(zeta*j0m**2))
U0 = U0*(1-sigma)*(1+sigma)/(pi*a*Y) # LT 56 (BHV eq. 3)
p0 = 1/(pi*a**2) # LT 28
DeltaU = (pi*h**2*p0)**2
DeltaU = DeltaU + 12*pi*h**2*p0*sigma*s
DeltaU = DeltaU + 72*(1-sigma)*s**2
DeltaU = DeltaU*a**2/(6*pi*h**3*Y) # LT 54
# LT 58 (eq. following BHV 31)
aftm = DeltaU + U0
# amplitude coef for infinite TM, LT 59
# factored out: (8 * kB * T * Phi) / (2 * pi * f)
aitm = (1 - sigma**2) / (2 * sqrt(2 * pi) * Y * r0)
# finite mirror correction
cftm = aftm / aitm
return cftm, aftm
def substrate_thermoelastic(f, materials, wBeam):
"""Substrate thermal displacement noise spectrum from thermoelastic fluctuations
:f: frequency array in Hz
:materials: gwinc optic materials structure
:wBeam: beam radius (at 1 / e^2 power)
:returns: displacement noise power spectrum at :f:, in meters
"""
sigma = materials.Substrate.MirrorSigma
rho = materials.Substrate.MassDensity
kappa = materials.Substrate.MassKappa # thermal conductivity
alpha = materials.Substrate.MassAlpha # thermal expansion
CM = materials.Substrate.MassCM # heat capacity @ constant mass
Temp = materials.Substrate.Temp # temperature
kBT = const.kB * materials.Substrate.Temp
S = 8*(1+sigma)**2*kappa*alpha**2*Temp*kBT # note kBT has factor Temp
S /= (sqrt(2*pi)*(CM*rho)**2)
S /= (wBeam/sqrt(2))**3 # LT 18 less factor 1/omega^2
# Corrections for finite test masses:
S *= substrate_thermoelastic_FiniteCorr(materials, wBeam)
return S/(2*pi*f)**2
def substrate_thermoelastic_FiniteCorr(materials, wBeam):
"""Substrate thermoelastic noise finite-size test mass correction
:materials: gwinc optic materials structure
:wBeam: beam radius (at 1 / e^2 power)
:returns: correction factor
(Liu & Thorne gr-qc/0002055 equation 46)
Equation references to Bondu, et al. Physics Letters A 246 (1998)
227-236 (hereafter BHV) or Liu and Thorne gr-qc/0002055 (hereafter LT)
"""
a = materials.MassRadius
h = materials.MassThickness
sigma = materials.Substrate.MirrorSigma
# LT uses power e-folding
r0 = wBeam/sqrt(2)
km = zeta/a
Qm = exp(-2*km*h) # LT 35a
pm = exp(-(km*r0)**2/4)/(pi*(a*j0m)**2) # LT 37
c0 = 6*(a/h)**2*sum(j0m*pm/zeta**2) # LT 32
c1 = -2*c0/h # LT 32
p0 = 1/(pi*a**2) # LT 28
c1 += p0/(2*h) # LT 40
coeff = (1-Qm)*((1-Qm)*(1+Qm)+8*h*km*Qm)
coeff += 4*(h*km)**2*Qm*(1+Qm)
coeff *= km*(pm*j0m)**2*(1-Qm)
coeff /= ((1-Qm)**2-4*(h*km)**2*Qm)**2
coeff = sum(coeff) + h*c1**2/(1+sigma)**2
coeff *= (sqrt(2*pi)*r0)**3*a**2 # LT 46
return coeff
| [
"numpy.exp",
"numpy.sqrt"
] | [((3510, 3526), 'numpy.exp', 'exp', (['(-2 * km * h)'], {}), '(-2 * km * h)\n', (3513, 3526), False, 'from numpy import exp, inf, pi, sqrt\n'), ((3649, 3679), 'numpy.exp', 'exp', (['(-(zeta * r0 / a) ** 2 / 4)'], {}), '(-(zeta * r0 / a) ** 2 / 4)\n', (3652, 3679), False, 'from numpy import exp, inf, pi, sqrt\n'), ((5951, 5967), 'numpy.exp', 'exp', (['(-2 * km * h)'], {}), '(-2 * km * h)\n', (5954, 5967), False, 'from numpy import exp, inf, pi, sqrt\n'), ((1015, 1025), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1022, 1025), True, 'import numpy as np\n'), ((3476, 3483), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (3480, 3483), False, 'from numpy import exp, inf, pi, sqrt\n'), ((5097, 5109), 'numpy.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (5101, 5109), False, 'from numpy import exp, inf, pi, sqrt\n'), ((5917, 5924), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5921, 5924), False, 'from numpy import exp, inf, pi, sqrt\n'), ((5983, 6007), 'numpy.exp', 'exp', (['(-(km * r0) ** 2 / 4)'], {}), '(-(km * r0) ** 2 / 4)\n', (5986, 6007), False, 'from numpy import exp, inf, pi, sqrt\n'), ((5137, 5144), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5141, 5144), False, 'from numpy import exp, inf, pi, sqrt\n'), ((6375, 6387), 'numpy.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (6379, 6387), False, 'from numpy import exp, inf, pi, sqrt\n'), ((1123, 1152), 'numpy.exp', 'exp', (['(-k ** 2 * wBeam ** 2 / 4)'], {}), '(-k ** 2 * wBeam ** 2 / 4)\n', (1126, 1152), False, 'from numpy import exp, inf, pi, sqrt\n'), ((4199, 4211), 'numpy.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (4203, 4211), False, 'from numpy import exp, inf, pi, sqrt\n')] |
# Sample code from the TorchVision 0.3 Object Detection Finetuning Tutorial
# http://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
import os
import numpy as np
import torch
import random
import json
import argparse
from PIL import Image
import cv2
from alfworld.agents.detector.engine import train_one_epoch, evaluate
import alfworld.agents.detector.utils as utils
import torchvision
import alfworld.agents.detector.transforms as T
from alfworld.agents.detector.mrcnn import get_model_instance_segmentation, load_pretrained_model
import sys
import alfworld.gen.constants as constants
MIN_PIXELS = 100
OBJECTS_DETECTOR = constants.OBJECTS_DETECTOR
STATIC_RECEPTACLES = constants.STATIC_RECEPTACLES
ALL_DETECTOR = constants.ALL_DETECTOR
def get_object_classes(object_type):
if object_type == "objects":
return OBJECTS_DETECTOR
elif object_type == "receptacles":
return STATIC_RECEPTACLES
else:
return ALL_DETECTOR
class AlfredDataset(object):
def __init__(self, root, transforms, args):
self.root = root
self.transforms = transforms
self.args = args
self.object_classes = get_object_classes(args.object_types)
# load all image files, sorting them to
# ensure that they are aligned
self.get_data_files(root, balance_scenes=args.balance_scenes)
def get_data_files(self, root, balance_scenes=False):
if balance_scenes:
kitchen_path = os.path.join(root, 'kitchen', 'images')
living_path = os.path.join(root, 'living', 'images')
bedroom_path = os.path.join(root, 'bedroom', 'images')
bathroom_path = os.path.join(root, 'bathroom', 'images')
kitchen = list(sorted(os.listdir(kitchen_path)))
living = list(sorted(os.listdir(living_path)))
bedroom = list(sorted(os.listdir(bedroom_path)))
bathroom = list(sorted(os.listdir(bathroom_path)))
min_size = min(len(kitchen), len(living), len(bedroom), len(bathroom))
kitchen = [os.path.join(kitchen_path, f) for f in random.sample(kitchen, int(min_size*self.args.kitchen_factor))]
living = [os.path.join(living_path, f) for f in random.sample(living, int(min_size*self.args.living_factor))]
bedroom = [os.path.join(bedroom_path, f) for f in random.sample(bedroom, int(min_size*self.args.bedroom_factor))]
bathroom = [os.path.join(bathroom_path, f) for f in random.sample(bathroom, int(min_size*self.args.bathroom_factor))]
self.imgs = kitchen + living + bedroom + bathroom
self.masks = [f.replace("images", "masks") for f in self.imgs]
self.metas = [f.replace("images", "meta").replace(".png", ".json") for f in self.imgs]
else:
self.imgs = [os.path.join(root, "images", f) for f in list(sorted(os.listdir(os.path.join(root, "images"))))]
self.masks = [os.path.join(root, "masks", f) for f in list(sorted(os.listdir(os.path.join(root, "masks"))))]
self.metas = [os.path.join(root, "meta", f) for f in list(sorted(os.listdir(os.path.join(root, "meta"))))]
def __getitem__(self, idx):
# load images ad masks
img_path = self.imgs[idx]
mask_path = self.masks[idx]
meta_path = self.metas[idx]
# print("Opening: %s" % (self.imgs[idx]))
with open(meta_path, 'r') as f:
color_to_object = json.load(f)
img = Image.open(img_path).convert("RGB")
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = Image.open(mask_path)
mask = np.array(mask)
im_width, im_height = mask.shape[0], mask.shape[1]
seg_colors = np.unique(mask.reshape(im_height*im_height, 3), axis=0)
masks, boxes, labels = [], [], []
for color in seg_colors:
color_str = str(tuple(color[::-1]))
if color_str in color_to_object:
object_id = color_to_object[color_str]
object_class = object_id.split("|", 1)[0] if "|" in object_id else ""
if "Basin" in object_id:
object_class += "Basin"
if object_class in self.object_classes:
smask = np.all(mask == color, axis=2)
pos = np.where(smask)
num_pixels = len(pos[0])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
# skip if not sufficient pixels
# if num_pixels < MIN_PIXELS:
if (xmax-xmin)*(ymax-ymin) < MIN_PIXELS:
continue
class_idx = self.object_classes.index(object_class)
masks.append(smask)
boxes.append([xmin, ymin, xmax, ymax])
labels.append(class_idx)
if self.args.debug:
disp_img = np.array(img)
cv2.rectangle(disp_img, (xmin, ymin), (xmax, ymax), color=(0, 255, 0), thickness=2)
cv2.putText(disp_img, object_class, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), thickness=2)
sg = np.uint8(smask[:, :, np.newaxis])*255
print(xmax-xmin, ymax-ymin, num_pixels)
cv2.imshow("img", np.array(disp_img))
cv2.imshow("sg", sg)
cv2.waitKey(0)
if len(boxes) == 0:
return None, None
iscrowd = torch.zeros(len(masks), dtype=torch.int64)
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def main(args):
# train on the GPU or on the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# our dataset has two classes only - background and person
num_classes = len(get_object_classes(args.object_types))+1
# use our dataset and defined transformations
dataset = AlfredDataset(args.data_path, get_transform(train=True), args)
dataset_test = AlfredDataset(args.data_path, get_transform(train=False), args)
# split the dataset in train and test set
# indices = torch.randperm(len(dataset)).tolist()
indices = list(range(len(dataset)))
dataset = torch.utils.data.Subset(dataset, indices[:-4000])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-4000:])
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=args.batch_size, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
# get the model using our helper function
if args.load_model:
model = load_pretrained_model(args.load_model)
else:
model = get_model_instance_segmentation(num_classes)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=args.lr,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# let's train it for 10 epochs
num_epochs = 10
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# # evaluate on the test dataset
# evaluate(model, data_loader_test, device=device)
# save model
model_path = os.path.join(args.save_path, "%s_%03d.pth" % (args.save_name, epoch))
torch.save(model.state_dict(), model_path)
print("Saving %s" % model_path)
print("Done training!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, default="data/")
parser.add_argument("--save_path", type=str, default="data/")
parser.add_argument("--object_types", choices=["objects", "receptacles", "all"], default="all")
parser.add_argument("--save_name", type=str, default="mrcnn_alfred_objects")
parser.add_argument("--load_model", type=str, default="")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--lr", type=float, default=0.005)
parser.add_argument("--balance_scenes", action='store_true')
parser.add_argument("--kitchen_factor", type=float, default=1.0)
parser.add_argument("--living_factor", type=float, default=1.0)
parser.add_argument("--bedroom_factor", type=float, default=1.0)
parser.add_argument("--bathroom_factor", type=float, default=1.0)
parser.add_argument("--debug", action='store_true')
args = parser.parse_args()
main(args)
| [
"cv2.rectangle",
"numpy.uint8",
"torch.as_tensor",
"alfworld.agents.detector.transforms.ToTensor",
"cv2.imshow",
"alfworld.agents.detector.transforms.RandomHorizontalFlip",
"numpy.array",
"alfworld.agents.detector.transforms.Compose",
"torch.cuda.is_available",
"os.listdir",
"argparse.ArgumentPa... | [((6642, 6663), 'alfworld.agents.detector.transforms.Compose', 'T.Compose', (['transforms'], {}), '(transforms)\n', (6651, 6663), True, 'import alfworld.agents.detector.transforms as T\n'), ((7326, 7375), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['dataset', 'indices[:-4000]'], {}), '(dataset, indices[:-4000])\n', (7349, 7375), False, 'import torch\n'), ((7395, 7449), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['dataset_test', 'indices[-4000:]'], {}), '(dataset_test, indices[-4000:])\n', (7418, 7449), False, 'import torch\n'), ((7519, 7646), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'collate_fn': 'utils.collate_fn'}), '(dataset, batch_size=args.batch_size, shuffle=\n True, num_workers=4, collate_fn=utils.collate_fn)\n', (7546, 7646), False, 'import torch\n'), ((7683, 7815), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_test'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(4)', 'collate_fn': 'utils.collate_fn'}), '(dataset_test, batch_size=args.batch_size,\n shuffle=False, num_workers=4, collate_fn=utils.collate_fn)\n', (7710, 7815), False, 'import torch\n'), ((8195, 8265), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': 'args.lr', 'momentum': '(0.9)', 'weight_decay': '(0.0005)'}), '(params, lr=args.lr, momentum=0.9, weight_decay=0.0005)\n', (8210, 8265), False, 'import torch\n'), ((8353, 8419), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(3)', 'gamma': '(0.1)'}), '(optimizer, step_size=3, gamma=0.1)\n', (8384, 8419), False, 'import torch\n'), ((9196, 9221), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9219, 9221), False, 'import argparse\n'), ((3690, 3711), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (3700, 3711), False, 'from PIL import Image\n'), ((3728, 3742), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (3736, 3742), True, 'import numpy as np\n'), ((5802, 5845), 'torch.as_tensor', 'torch.as_tensor', (['boxes'], {'dtype': 'torch.float32'}), '(boxes, dtype=torch.float32)\n', (5817, 5845), False, 'import torch\n'), ((5863, 5905), 'torch.as_tensor', 'torch.as_tensor', (['labels'], {'dtype': 'torch.int64'}), '(labels, dtype=torch.int64)\n', (5878, 5905), False, 'import torch\n'), ((5922, 5963), 'torch.as_tensor', 'torch.as_tensor', (['masks'], {'dtype': 'torch.uint8'}), '(masks, dtype=torch.uint8)\n', (5937, 5963), False, 'import torch\n'), ((5984, 6003), 'torch.tensor', 'torch.tensor', (['[idx]'], {}), '([idx])\n', (5996, 6003), False, 'import torch\n'), ((6548, 6560), 'alfworld.agents.detector.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (6558, 6560), True, 'import alfworld.agents.detector.transforms as T\n'), ((6783, 6808), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6806, 6808), False, 'import torch\n'), ((6759, 6779), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6771, 6779), False, 'import torch\n'), ((6814, 6833), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6826, 6833), False, 'import torch\n'), ((7916, 7954), 'alfworld.agents.detector.mrcnn.load_pretrained_model', 'load_pretrained_model', (['args.load_model'], {}), '(args.load_model)\n', (7937, 7954), False, 'from alfworld.agents.detector.mrcnn import get_model_instance_segmentation, load_pretrained_model\n'), ((7981, 8025), 'alfworld.agents.detector.mrcnn.get_model_instance_segmentation', 'get_model_instance_segmentation', (['num_classes'], {}), '(num_classes)\n', (8012, 8025), False, 'from alfworld.agents.detector.mrcnn import get_model_instance_segmentation, load_pretrained_model\n'), ((8683, 8759), 'alfworld.agents.detector.engine.train_one_epoch', 'train_one_epoch', (['model', 'optimizer', 'data_loader', 'device', 'epoch'], {'print_freq': '(10)'}), '(model, optimizer, data_loader, device, epoch, print_freq=10)\n', (8698, 8759), False, 'from alfworld.agents.detector.engine import train_one_epoch, evaluate\n'), ((8965, 9034), 'os.path.join', 'os.path.join', (['args.save_path', "('%s_%03d.pth' % (args.save_name, epoch))"], {}), "(args.save_path, '%s_%03d.pth' % (args.save_name, epoch))\n", (8977, 9034), False, 'import os\n'), ((1475, 1514), 'os.path.join', 'os.path.join', (['root', '"""kitchen"""', '"""images"""'], {}), "(root, 'kitchen', 'images')\n", (1487, 1514), False, 'import os\n'), ((1541, 1579), 'os.path.join', 'os.path.join', (['root', '"""living"""', '"""images"""'], {}), "(root, 'living', 'images')\n", (1553, 1579), False, 'import os\n'), ((1607, 1646), 'os.path.join', 'os.path.join', (['root', '"""bedroom"""', '"""images"""'], {}), "(root, 'bedroom', 'images')\n", (1619, 1646), False, 'import os\n'), ((1675, 1715), 'os.path.join', 'os.path.join', (['root', '"""bathroom"""', '"""images"""'], {}), "(root, 'bathroom', 'images')\n", (1687, 1715), False, 'import os\n'), ((3454, 3466), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3463, 3466), False, 'import json\n'), ((6602, 6629), 'alfworld.agents.detector.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (6624, 6629), True, 'import alfworld.agents.detector.transforms as T\n'), ((2068, 2097), 'os.path.join', 'os.path.join', (['kitchen_path', 'f'], {}), '(kitchen_path, f)\n', (2080, 2097), False, 'import os\n'), ((2193, 2221), 'os.path.join', 'os.path.join', (['living_path', 'f'], {}), '(living_path, f)\n', (2205, 2221), False, 'import os\n'), ((2316, 2345), 'os.path.join', 'os.path.join', (['bedroom_path', 'f'], {}), '(bedroom_path, f)\n', (2328, 2345), False, 'import os\n'), ((2443, 2473), 'os.path.join', 'os.path.join', (['bathroom_path', 'f'], {}), '(bathroom_path, f)\n', (2455, 2473), False, 'import os\n'), ((2825, 2856), 'os.path.join', 'os.path.join', (['root', '"""images"""', 'f'], {}), "(root, 'images', f)\n", (2837, 2856), False, 'import os\n'), ((2948, 2978), 'os.path.join', 'os.path.join', (['root', '"""masks"""', 'f'], {}), "(root, 'masks', f)\n", (2960, 2978), False, 'import os\n'), ((3069, 3098), 'os.path.join', 'os.path.join', (['root', '"""meta"""', 'f'], {}), "(root, 'meta', f)\n", (3081, 3098), False, 'import os\n'), ((3482, 3502), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (3492, 3502), False, 'from PIL import Image\n'), ((1751, 1775), 'os.listdir', 'os.listdir', (['kitchen_path'], {}), '(kitchen_path)\n', (1761, 1775), False, 'import os\n'), ((1811, 1834), 'os.listdir', 'os.listdir', (['living_path'], {}), '(living_path)\n', (1821, 1834), False, 'import os\n'), ((1871, 1895), 'os.listdir', 'os.listdir', (['bedroom_path'], {}), '(bedroom_path)\n', (1881, 1895), False, 'import os\n'), ((1933, 1958), 'os.listdir', 'os.listdir', (['bathroom_path'], {}), '(bathroom_path)\n', (1943, 1958), False, 'import os\n'), ((4359, 4388), 'numpy.all', 'np.all', (['(mask == color)'], {'axis': '(2)'}), '(mask == color, axis=2)\n', (4365, 4388), True, 'import numpy as np\n'), ((4415, 4430), 'numpy.where', 'np.where', (['smask'], {}), '(smask)\n', (4423, 4430), True, 'import numpy as np\n'), ((4504, 4518), 'numpy.min', 'np.min', (['pos[1]'], {}), '(pos[1])\n', (4510, 4518), True, 'import numpy as np\n'), ((4546, 4560), 'numpy.max', 'np.max', (['pos[1]'], {}), '(pos[1])\n', (4552, 4560), True, 'import numpy as np\n'), ((4588, 4602), 'numpy.min', 'np.min', (['pos[0]'], {}), '(pos[0])\n', (4594, 4602), True, 'import numpy as np\n'), ((4630, 4644), 'numpy.max', 'np.max', (['pos[0]'], {}), '(pos[0])\n', (4636, 4644), True, 'import numpy as np\n'), ((5136, 5149), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5144, 5149), True, 'import numpy as np\n'), ((5174, 5261), 'cv2.rectangle', 'cv2.rectangle', (['disp_img', '(xmin, ymin)', '(xmax, ymax)'], {'color': '(0, 255, 0)', 'thickness': '(2)'}), '(disp_img, (xmin, ymin), (xmax, ymax), color=(0, 255, 0),\n thickness=2)\n', (5187, 5261), False, 'import cv2\n'), ((5282, 5390), 'cv2.putText', 'cv2.putText', (['disp_img', 'object_class', '(xmin, ymin)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 0)'], {'thickness': '(2)'}), '(disp_img, object_class, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX,\n 1, (0, 255, 0), thickness=2)\n', (5293, 5390), False, 'import cv2\n'), ((5605, 5625), 'cv2.imshow', 'cv2.imshow', (['"""sg"""', 'sg'], {}), "('sg', sg)\n", (5615, 5625), False, 'import cv2\n'), ((5650, 5664), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5661, 5664), False, 'import cv2\n'), ((5416, 5449), 'numpy.uint8', 'np.uint8', (['smask[:, :, np.newaxis]'], {}), '(smask[:, :, np.newaxis])\n', (5424, 5449), True, 'import numpy as np\n'), ((5561, 5579), 'numpy.array', 'np.array', (['disp_img'], {}), '(disp_img)\n', (5569, 5579), True, 'import numpy as np\n'), ((2889, 2917), 'os.path.join', 'os.path.join', (['root', '"""images"""'], {}), "(root, 'images')\n", (2901, 2917), False, 'import os\n'), ((3011, 3038), 'os.path.join', 'os.path.join', (['root', '"""masks"""'], {}), "(root, 'masks')\n", (3023, 3038), False, 'import os\n'), ((3131, 3157), 'os.path.join', 'os.path.join', (['root', '"""meta"""'], {}), "(root, 'meta')\n", (3143, 3157), False, 'import os\n')] |
from __future__ import division
from past.utils import old_div
#===============================================================================
# SCG Scaled conjugate gradient optimization.
#
# Copyright (c) <NAME> (1996-2001)
# updates by <NAME> 2013
#
# Permission is granted for anyone to copy, use, or modify these
# programs and accompanying documents for purposes of research or
# education, provided this copyright notice is retained, and note is
# made of any changes that have been made.
#
# These programs and documents are distributed without any warranty,
# express or implied. As the programs were written for research
# purposes only, they have not been tested to the degree that would be
# advisable in any important application. All use of these programs is
# entirely at the user's own risk."
#===============================================================================
from math import sqrt
import numpy as np
import logging
def run(f, x, args=(), niters = 100, gradcheck = False, display = 0, flog = False, pointlog = False, scalelog = False, tolX = 1.0e-8, tolO = 1.0e-8, eval = None):
'''Scaled conjugate gradient optimization. '''
if display:
logging.getLogger(__name__).info('***** starting optimization (SCG) *****')
nparams = len(x);
# Check gradients
if gradcheck:
pass
eps = 1.0e-4
sigma0 = 1.0e-4
result = f(x, *args)
fold = result[0] # Initial function value.
fnow = fold
funcCount = 1 # Increment function evaluation counter.
gradnew = result[1] # Initial gradient.
gradold = gradnew
gradCount = 1 # Increment gradient evaluation counter.
d = -gradnew # Initial search direction.
success = 1 # Force calculation of directional derivs.
nsuccess = 0 # nsuccess counts number of successes.
beta = 1.0 # Initial scale parameter.
betamin = 1.0e-15 # Lower bound on scale.
betamax = 1.0e50 # Upper bound on scale.
j = 1 # j counts number of iterations.
if flog:
pass
#flog(j, :) = fold;
if pointlog:
pass
#pointlog(j, :) = x;
# Main optimization loop.
listF = [fold]
if eval is not None:
evalue, timevalue = eval(x, *args)
evalList = [evalue]
time = [timevalue]
while (j <= niters):
# Calculate first and second directional derivatives.
if (success == 1):
mu = np.dot(d, gradnew)
if (mu >= 0):
d = - gradnew
mu = np.dot(d, gradnew)
kappa = np.dot(d, d)
if (kappa < eps):
logging.getLogger(__name__).info("FNEW: " + str(fnow))
#options(8) = fnow
if eval is not None:
return x, listF, evalList, time
else:
return x, listF
sigma = old_div(sigma0,sqrt(kappa))
xplus = x + sigma*d
gplus = f(xplus, *args)[1]
gradCount += 1
theta = old_div((np.dot(d, (gplus - gradnew))),sigma);
# Increase effective curvature and evaluate step size alpha.
delta = theta + beta*kappa
if (delta <= 0):
delta = beta*kappa
beta = beta - old_div(theta,kappa)
alpha = old_div(- mu,delta)
# Calculate the comparison ratio.
xnew = x + alpha*d
fnew = f(xnew, *args)[0]
funcCount += 1;
Delta = 2*(fnew - fold)/(alpha*mu)
if (Delta >= 0):
success = 1;
nsuccess += 1;
x = xnew;
fnow = fnew;
listF.append(fnow)
if eval is not None:
evalue, timevalue = eval(x, *args)
evalList.append(evalue)
time.append(timevalue)
else:
success = 0;
fnow = fold;
if flog:
# Store relevant variables
#flog(j) = fnow; # Current function value
pass
if pointlog:
#pointlog(j,:) = x; # Current position
pass
if scalelog:
#scalelog(j) = beta; # Current scale parameter
pass
if display > 0:
logging.getLogger(__name__).info('***** Cycle %4d Error %11.6f Scale %e', j, fnow, beta)
if (success == 1):
# Test for termination
# print type (alpha), type(d), type(tolX), type(fnew), type(fold)
if ((max(abs(alpha*d)) < tolX) & (abs(fnew-fold) < tolO)):
# options(8) = fnew;
# print "FNEW: " , fnew
if eval is not None:
return x, listF, evalList, time
else:
return x, listF
else:
# Update variables for new position
fold = fnew
gradold = gradnew
gradnew = f(x, *args)[1]
gradCount += 1
# If the gradient is zero then we are done.
if (np.dot(gradnew, gradnew) == 0):
# print "FNEW: " , fnew
# options(8) = fnew;
if eval is not None:
return x, listF, evalList, time
else:
return x, listF
# Adjust beta according to comparison ratio.
if (Delta < 0.25):
beta = min(4.0*beta, betamax);
if (Delta > 0.75):
beta = max(0.5*beta, betamin);
# Update search direction using Polak-Ribiere formula, or re-start
# in direction of negative gradient after nparams steps.
if (nsuccess == nparams):
d = -gradnew;
nsuccess = 0;
else:
if (success == 1):
gamma = old_div(np.dot((gradold - gradnew), gradnew),(mu))
d = gamma*d - gradnew;
j += 1
# If we get here, then we haven't terminated in the given number of
# iterations.
# options(8) = fold;
if (display):
logging.getLogger(__name__).info("maximum number of iterations reached")
if eval is not None:
return x, listF, evalList, time
else:
return x, listF
| [
"logging.getLogger",
"numpy.dot",
"math.sqrt",
"past.utils.old_div"
] | [((3475, 3494), 'past.utils.old_div', 'old_div', (['(-mu)', 'delta'], {}), '(-mu, delta)\n', (3482, 3494), False, 'from past.utils import old_div\n'), ((2600, 2618), 'numpy.dot', 'np.dot', (['d', 'gradnew'], {}), '(d, gradnew)\n', (2606, 2618), True, 'import numpy as np\n'), ((2735, 2747), 'numpy.dot', 'np.dot', (['d', 'd'], {}), '(d, d)\n', (2741, 2747), True, 'import numpy as np\n'), ((1217, 1244), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1234, 1244), False, 'import logging\n'), ((2696, 2714), 'numpy.dot', 'np.dot', (['d', 'gradnew'], {}), '(d, gradnew)\n', (2702, 2714), True, 'import numpy as np\n'), ((3067, 3078), 'math.sqrt', 'sqrt', (['kappa'], {}), '(kappa)\n', (3071, 3078), False, 'from math import sqrt\n'), ((3207, 3233), 'numpy.dot', 'np.dot', (['d', '(gplus - gradnew)'], {}), '(d, gplus - gradnew)\n', (3213, 3233), True, 'import numpy as np\n'), ((3437, 3458), 'past.utils.old_div', 'old_div', (['theta', 'kappa'], {}), '(theta, kappa)\n', (3444, 3458), False, 'from past.utils import old_div\n'), ((6277, 6304), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6294, 6304), False, 'import logging\n'), ((4441, 4468), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4458, 4468), False, 'import logging\n'), ((5244, 5268), 'numpy.dot', 'np.dot', (['gradnew', 'gradnew'], {}), '(gradnew, gradnew)\n', (5250, 5268), True, 'import numpy as np\n'), ((6032, 6066), 'numpy.dot', 'np.dot', (['(gradold - gradnew)', 'gradnew'], {}), '(gradold - gradnew, gradnew)\n', (6038, 6066), True, 'import numpy as np\n'), ((2794, 2821), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2811, 2821), False, 'import logging\n')] |
import arabic_reshaper
import itertools
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
import sys
import warnings
from bidi.algorithm import get_display
from matplotlib import rc
from matplotlib.backends import backend_gtk3
from iran_stock import get_iran_stock_network
from settings import OUTPUT_DIR
warnings.filterwarnings('ignore', module=backend_gtk3.__name__)
# File Settings
XI_PATH = os.path.join(OUTPUT_DIR, 'xi.npy')
LIBRARY_PATH = os.path.join(OUTPUT_DIR, 'library.npy')
# Algorithm Settings
RSI_PERIOD = 7
CV_DAYS = 14
TEST_DAYS = 21
SINDY_ITERATIONS = 10
CANDIDATE_LAMBDAS_RSI = [10 ** i for i in range(-9, -1)] # empirical
def _exponential_moving_average(x, n):
alpha = 1 / n
s = np.zeros(x.shape)
s[0] = x[0] # this is automatically deep copy
for i in range(1, s.shape[0]):
s[i] = x[i] * alpha + s[i - 1] * (1 - alpha)
return s
def _get_iran_stock_rsi():
iran_stock_network = get_iran_stock_network()
x = iran_stock_network.x
u = x[1:] - x[:x.shape[0] - 1]
u = u.clip(min=0)
d = x[:x.shape[0] - 1] - x[1:]
d = d.clip(min=0)
rs = np.nan_to_num(_exponential_moving_average(u, RSI_PERIOD) / _exponential_moving_average(d, RSI_PERIOD))
rsi = 100 - (100 / (1 + rs))
return rsi, iran_stock_network.node_labels
def _normalize_x(x):
normalized_columns = []
normalization_parameters = []
for column_index in range(x.shape[1]):
column = x[:, column_index]
std = max(10 ** -9, np.std(column)) # to avoid division by zero
mean = np.mean(column)
normalized_column = (column - mean) / std
normalized_columns.append(normalized_column)
normalization_parameters.append((mean, std))
normalized_x = np.column_stack(normalized_columns)
return normalized_x, normalization_parameters
def _revert_x(normalized_x, normalization_parameters):
reverted_columns = []
for column_index in range(normalized_x.shape[1]):
column = normalized_x[:, column_index]
mean, std = normalization_parameters[column_index]
reverted_column = column * std + mean
reverted_columns.append(reverted_column)
reverted_x = np.column_stack(reverted_columns)
return reverted_x
def _get_x_dot(x):
x_dot = (x[1:] - x[:len(x) - 1])
return x_dot
def _get_theta(x): # empirical
time_frames = x.shape[0] - 1
column_list = [np.ones(time_frames)]
library = [1]
x_vectors = []
for i in range(x.shape[1]):
library.append((i,))
x_vectors.append(x[:time_frames, i])
column_list += x_vectors
for subset in itertools.combinations(range(x.shape[1]), 2):
library.append(subset)
library.append(tuple(reversed(subset)))
x_i = x[:time_frames, subset[0]]
x_j = x[:time_frames, subset[1]]
column_list.append(x_i / (1 + np.abs(x_j)))
column_list.append(x_j / (1 + np.abs(x_i)))
theta = np.column_stack(column_list)
return library, theta
def _single_node_sindy(x_dot_i, theta, candidate_lambda):
xi_i = np.linalg.lstsq(theta, x_dot_i, rcond=None)[0]
for j in range(SINDY_ITERATIONS):
small_indices = np.flatnonzero(np.absolute(xi_i) < candidate_lambda)
big_indices = np.flatnonzero(np.absolute(xi_i) >= candidate_lambda)
xi_i[small_indices] = 0
xi_i[big_indices] = np.linalg.lstsq(theta[:, big_indices], x_dot_i, rcond=None)[0]
return xi_i
def _optimum_sindy(x_dot, theta, candidate_lambdas):
cv_index = x_dot.shape[0] - CV_DAYS
x_dot_train = x_dot[:cv_index]
x_dot_cv = x_dot[cv_index:]
theta_train = theta[:cv_index]
theta_cv = theta[cv_index:]
xi = np.zeros((x_dot_train.shape[1], theta_train.shape[1]))
for i in range(x_dot_train.shape[1]):
# progress bar
sys.stdout.write('\rNode [%d/%d]' % (i + 1, x_dot_train.shape[1]))
sys.stdout.flush()
least_cost = sys.maxsize
best_xi_i = None
x_dot_i = x_dot_train[:, i]
x_dot_cv_i = x_dot_cv[:, i]
for candidate_lambda in candidate_lambdas:
xi_i = _single_node_sindy(x_dot_i, theta_train, candidate_lambda)
complexity = math.log(1 + np.count_nonzero(xi_i))
x_dot_hat_i = np.matmul(theta_cv, xi_i.T)
mse_cv = np.square(x_dot_cv_i - x_dot_hat_i).mean()
if complexity: # zero would mean no statements
cost = mse_cv * complexity
if cost < least_cost:
least_cost = cost
best_xi_i = xi_i
xi[i] = best_xi_i
print() # newline
return xi
def _get_xi_and_library(normalized_rsi):
if os.path.exists(XI_PATH) and os.path.exists(LIBRARY_PATH):
library = np.load(LIBRARY_PATH, allow_pickle=True)
xi_sindy = np.load(XI_PATH, allow_pickle=True)
else:
entire_x_dot = _get_x_dot(normalized_rsi)
library, entire_theta = _get_theta(normalized_rsi)
np.save(LIBRARY_PATH, library)
test_index = entire_x_dot.shape[0] - TEST_DAYS
x_dot_train = entire_x_dot[:test_index]
theta_train = entire_theta[:test_index]
xi_sindy = _optimum_sindy(x_dot_train, theta_train, CANDIDATE_LAMBDAS_RSI)
np.save(XI_PATH, xi_sindy)
return xi_sindy, library
def _predict_rsi(normalized_rsi, normalization_parameters, xi):
test_index = normalized_rsi.shape[0] - TEST_DAYS
normalized_rsi_hat = np.copy(normalized_rsi)
for time_frame in range(test_index, normalized_rsi.shape[0]):
library_hat, theta_hat = _get_theta(normalized_rsi_hat[time_frame - 1:time_frame + 1])
x_dot_hat = np.matmul(theta_hat, xi.T)
normalized_rsi_hat[time_frame] = normalized_rsi_hat[time_frame - 1] + x_dot_hat
rsi_hat = _revert_x(normalized_rsi_hat, normalization_parameters)
return rsi_hat
def _calculate_g(rsi_part):
g = np.zeros((rsi_part.shape[1], rsi_part.shape[1]))
for i in range(rsi_part.shape[1]):
avg_xi_2 = np.mean(np.square(rsi_part[:, i]))
for j in range(rsi_part.shape[1]):
if i == j:
g[i, j] = 1
else:
g[i, j] = np.mean(rsi_part[:, i] * rsi_part[:, j]) / avg_xi_2
return g
def _calculate_impact(g):
impact = np.zeros(g.shape[0])
for i in range(g.shape[0]):
impact[i] = np.mean(g.T[i, :])
return impact
def _calculate_stability(g):
stability = np.zeros(g.shape[0])
for i in range(g.shape[0]):
stability[i] = 1 / np.mean(g[i, :])
return stability
def _draw_distribution(data, x_label, y_label, title, file_name):
rc('font', weight=600)
plt.subplots(figsize=(10, 10))
ax = sns.distplot(
data,
bins=np.arange(np.min(data), np.max(data), (np.max(data) - np.min(data)) / 10),
norm_hist=True
)
ax.set_title(get_display(arabic_reshaper.reshape(title)), fontsize=28, fontweight=500)
ax.set_xlabel(get_display(arabic_reshaper.reshape(x_label)), fontsize=20, fontweight=500)
ax.set_ylabel(get_display(arabic_reshaper.reshape(y_label)), fontsize=20, fontweight=500)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(3)
ax.tick_params(width=3, length=10, labelsize=16)
plt.savefig(os.path.join(OUTPUT_DIR, file_name))
plt.close('all')
def _better_label(complete_node_label):
return complete_node_label.replace('_', '-').split('-')[-1]
def _save_table(data1, data2, data3, labels, table_name):
with open(os.path.join(OUTPUT_DIR, 'table_%s.txt' % table_name), 'w') as table_file:
for i in range(len(data1)):
table_file.write('%s & \\lr{%.2f} & \\lr{%.2f} & \\lr{%.2f} \\\\\n' % (
_better_label(labels[i]),
data1[i],
data2[i],
data3[i]
))
def run():
rsi, node_labels = _get_iran_stock_rsi()
normalized_rsi, normalization_parameters = _normalize_x(rsi)
xi, library = _get_xi_and_library(normalized_rsi)
predicted_rsi = _predict_rsi(normalized_rsi, normalization_parameters, xi)
g1 = _calculate_g(predicted_rsi[-2 * TEST_DAYS:-TEST_DAYS])
impact1 = _calculate_impact(g1)
stability1 = _calculate_stability(g1)
g2 = _calculate_g(predicted_rsi[-TEST_DAYS:])
impact2 = _calculate_impact(g2)
stability2 = _calculate_stability(g2)
g3 = _calculate_g(rsi[-TEST_DAYS:])
impact3 = _calculate_impact(g3)
stability3 = _calculate_stability(g3)
_draw_distribution(impact1, 'تأثیر', 'چگالی', 'مقدار تأثیر قبل از پیشبینی', 'impact_before_prediction.png')
_draw_distribution(impact2, 'تأثیر', 'چگالی', 'مقدار تأثیر پس از پیشبینی', 'impact_after_prediction.png')
_draw_distribution(impact3, 'تأثیر', 'چگالی', 'مقدار تأثیر پس از پیشبینی', 'impact_after_prediction_real.png')
_draw_distribution(stability1, 'ثبات', 'چگالی', 'مقدار ثبات قبل از پیشبینی', 'stability_before_prediction.png')
_draw_distribution(stability2, 'ثبات', 'چگالی', 'مقدار ثبات پس از پیشبینی', 'stability_after_prediction.png')
_draw_distribution(stability3, 'ثبات', 'چگالی', 'مقدار ثبات پس از پیشبینی', 'stability_after_prediction_real.png')
_save_table(impact1, impact2, impact3, node_labels, 'impact')
_save_table(stability1, stability2, stability3, node_labels, 'stability')
if __name__ == '__main__':
run()
| [
"numpy.column_stack",
"numpy.count_nonzero",
"matplotlib.rc",
"numpy.save",
"numpy.mean",
"os.path.exists",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.matmul",
"numpy.linalg.lstsq",
"numpy.min",
"sys.stdout.flush",
"arabic_reshaper.reshape",
"numpy.abs",
"numpy.ones",
"numpy.square... | [((354, 417), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'module': 'backend_gtk3.__name__'}), "('ignore', module=backend_gtk3.__name__)\n", (377, 417), False, 'import warnings\n'), ((446, 480), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""xi.npy"""'], {}), "(OUTPUT_DIR, 'xi.npy')\n", (458, 480), False, 'import os\n'), ((496, 535), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""library.npy"""'], {}), "(OUTPUT_DIR, 'library.npy')\n", (508, 535), False, 'import os\n'), ((761, 778), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (769, 778), True, 'import numpy as np\n'), ((985, 1009), 'iran_stock.get_iran_stock_network', 'get_iran_stock_network', ([], {}), '()\n', (1007, 1009), False, 'from iran_stock import get_iran_stock_network\n'), ((1788, 1823), 'numpy.column_stack', 'np.column_stack', (['normalized_columns'], {}), '(normalized_columns)\n', (1803, 1823), True, 'import numpy as np\n'), ((2229, 2262), 'numpy.column_stack', 'np.column_stack', (['reverted_columns'], {}), '(reverted_columns)\n', (2244, 2262), True, 'import numpy as np\n'), ((2983, 3011), 'numpy.column_stack', 'np.column_stack', (['column_list'], {}), '(column_list)\n', (2998, 3011), True, 'import numpy as np\n'), ((3725, 3779), 'numpy.zeros', 'np.zeros', (['(x_dot_train.shape[1], theta_train.shape[1])'], {}), '((x_dot_train.shape[1], theta_train.shape[1]))\n', (3733, 3779), True, 'import numpy as np\n'), ((5488, 5511), 'numpy.copy', 'np.copy', (['normalized_rsi'], {}), '(normalized_rsi)\n', (5495, 5511), True, 'import numpy as np\n'), ((5935, 5983), 'numpy.zeros', 'np.zeros', (['(rsi_part.shape[1], rsi_part.shape[1])'], {}), '((rsi_part.shape[1], rsi_part.shape[1]))\n', (5943, 5983), True, 'import numpy as np\n'), ((6321, 6341), 'numpy.zeros', 'np.zeros', (['g.shape[0]'], {}), '(g.shape[0])\n', (6329, 6341), True, 'import numpy as np\n'), ((6478, 6498), 'numpy.zeros', 'np.zeros', (['g.shape[0]'], {}), '(g.shape[0])\n', (6486, 6498), True, 'import numpy as np\n'), ((6668, 6690), 'matplotlib.rc', 'rc', (['"""font"""'], {'weight': '(600)'}), "('font', weight=600)\n", (6670, 6690), False, 'from matplotlib import rc\n'), ((6695, 6725), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (6707, 6725), True, 'import matplotlib.pyplot as plt\n'), ((7362, 7378), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7371, 7378), True, 'import matplotlib.pyplot as plt\n'), ((1597, 1612), 'numpy.mean', 'np.mean', (['column'], {}), '(column)\n', (1604, 1612), True, 'import numpy as np\n'), ((2447, 2467), 'numpy.ones', 'np.ones', (['time_frames'], {}), '(time_frames)\n', (2454, 2467), True, 'import numpy as np\n'), ((3109, 3152), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['theta', 'x_dot_i'], {'rcond': 'None'}), '(theta, x_dot_i, rcond=None)\n', (3124, 3152), True, 'import numpy as np\n'), ((3853, 3919), 'sys.stdout.write', 'sys.stdout.write', (["('\\rNode [%d/%d]' % (i + 1, x_dot_train.shape[1]))"], {}), "('\\rNode [%d/%d]' % (i + 1, x_dot_train.shape[1]))\n", (3869, 3919), False, 'import sys\n'), ((3928, 3946), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3944, 3946), False, 'import sys\n'), ((4716, 4739), 'os.path.exists', 'os.path.exists', (['XI_PATH'], {}), '(XI_PATH)\n', (4730, 4739), False, 'import os\n'), ((4744, 4772), 'os.path.exists', 'os.path.exists', (['LIBRARY_PATH'], {}), '(LIBRARY_PATH)\n', (4758, 4772), False, 'import os\n'), ((4792, 4832), 'numpy.load', 'np.load', (['LIBRARY_PATH'], {'allow_pickle': '(True)'}), '(LIBRARY_PATH, allow_pickle=True)\n', (4799, 4832), True, 'import numpy as np\n'), ((4852, 4887), 'numpy.load', 'np.load', (['XI_PATH'], {'allow_pickle': '(True)'}), '(XI_PATH, allow_pickle=True)\n', (4859, 4887), True, 'import numpy as np\n'), ((5015, 5045), 'numpy.save', 'np.save', (['LIBRARY_PATH', 'library'], {}), '(LIBRARY_PATH, library)\n', (5022, 5045), True, 'import numpy as np\n'), ((5288, 5314), 'numpy.save', 'np.save', (['XI_PATH', 'xi_sindy'], {}), '(XI_PATH, xi_sindy)\n', (5295, 5314), True, 'import numpy as np\n'), ((5693, 5719), 'numpy.matmul', 'np.matmul', (['theta_hat', 'xi.T'], {}), '(theta_hat, xi.T)\n', (5702, 5719), True, 'import numpy as np\n'), ((6394, 6412), 'numpy.mean', 'np.mean', (['g.T[i, :]'], {}), '(g.T[i, :])\n', (6401, 6412), True, 'import numpy as np\n'), ((7321, 7356), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', 'file_name'], {}), '(OUTPUT_DIR, file_name)\n', (7333, 7356), False, 'import os\n'), ((1537, 1551), 'numpy.std', 'np.std', (['column'], {}), '(column)\n', (1543, 1551), True, 'import numpy as np\n'), ((3407, 3466), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['theta[:, big_indices]', 'x_dot_i'], {'rcond': 'None'}), '(theta[:, big_indices], x_dot_i, rcond=None)\n', (3422, 3466), True, 'import numpy as np\n'), ((4295, 4322), 'numpy.matmul', 'np.matmul', (['theta_cv', 'xi_i.T'], {}), '(theta_cv, xi_i.T)\n', (4304, 4322), True, 'import numpy as np\n'), ((6050, 6075), 'numpy.square', 'np.square', (['rsi_part[:, i]'], {}), '(rsi_part[:, i])\n', (6059, 6075), True, 'import numpy as np\n'), ((6558, 6574), 'numpy.mean', 'np.mean', (['g[i, :]'], {}), '(g[i, :])\n', (6565, 6574), True, 'import numpy as np\n'), ((6909, 6939), 'arabic_reshaper.reshape', 'arabic_reshaper.reshape', (['title'], {}), '(title)\n', (6932, 6939), False, 'import arabic_reshaper\n'), ((7001, 7033), 'arabic_reshaper.reshape', 'arabic_reshaper.reshape', (['x_label'], {}), '(x_label)\n', (7024, 7033), False, 'import arabic_reshaper\n'), ((7095, 7127), 'arabic_reshaper.reshape', 'arabic_reshaper.reshape', (['y_label'], {}), '(y_label)\n', (7118, 7127), False, 'import arabic_reshaper\n'), ((7559, 7612), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', "('table_%s.txt' % table_name)"], {}), "(OUTPUT_DIR, 'table_%s.txt' % table_name)\n", (7571, 7612), False, 'import os\n'), ((3233, 3250), 'numpy.absolute', 'np.absolute', (['xi_i'], {}), '(xi_i)\n', (3244, 3250), True, 'import numpy as np\n'), ((3308, 3325), 'numpy.absolute', 'np.absolute', (['xi_i'], {}), '(xi_i)\n', (3319, 3325), True, 'import numpy as np\n'), ((6786, 6798), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (6792, 6798), True, 'import numpy as np\n'), ((6800, 6812), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (6806, 6812), True, 'import numpy as np\n'), ((2904, 2915), 'numpy.abs', 'np.abs', (['x_j'], {}), '(x_j)\n', (2910, 2915), True, 'import numpy as np\n'), ((2956, 2967), 'numpy.abs', 'np.abs', (['x_i'], {}), '(x_i)\n', (2962, 2967), True, 'import numpy as np\n'), ((4245, 4267), 'numpy.count_nonzero', 'np.count_nonzero', (['xi_i'], {}), '(xi_i)\n', (4261, 4267), True, 'import numpy as np\n'), ((4344, 4379), 'numpy.square', 'np.square', (['(x_dot_cv_i - x_dot_hat_i)'], {}), '(x_dot_cv_i - x_dot_hat_i)\n', (4353, 4379), True, 'import numpy as np\n'), ((6215, 6255), 'numpy.mean', 'np.mean', (['(rsi_part[:, i] * rsi_part[:, j])'], {}), '(rsi_part[:, i] * rsi_part[:, j])\n', (6222, 6255), True, 'import numpy as np\n'), ((6815, 6827), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (6821, 6827), True, 'import numpy as np\n'), ((6830, 6842), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (6836, 6842), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# test_mapQtoR.py
# This module provides the tests for the mapQtoR function.
# Copyright 2014 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""This module provides the test class for the mapQtoR() function.
"""
from __future__ import division
import unittest
import numpy as np
import deltasigma as ds
class TestMapQtoR(unittest.TestCase):
"""Test class for mapQtoR()"""
def setUp(self):
A = np.arange(1, 6*7 + 1, dtype=np.int16).reshape((7, 6)).T
self.A = A + 1j*A
self.Ares = \
np.array([[1, -1, 7, -7, 13, -13, 19, -19, 25, -25, 31, -31, 37, -37],
[1, 1, 7, 7, 13, 13, 19, 19, 25, 25, 31, 31, 37, 37],
[2, -2, 8, -8, 14, -14, 20, -20, 26, -26, 32, -32, 38, -38],
[2, 2, 8, 8, 14, 14, 20, 20, 26, 26, 32, 32, 38, 38],
[3, -3, 9, -9, 15, -15, 21, -21, 27, -27, 33, -33, 39, -39],
[3, 3, 9, 9, 15, 15, 21, 21, 27, 27, 33, 33, 39, 39],
[4, -4, 10, -10, 16, -16, 22, -22, 28, -28, 34, -34, 40, -40],
[4, 4, 10, 10, 16, 16, 22, 22, 28, 28, 34, 34, 40, 40],
[5, -5, 11, -11, 17, -17, 23, -23, 29, -29, 35, -35, 41, -41],
[5, 5, 11, 11, 17, 17, 23, 23, 29, 29, 35, 35, 41, 41],
[6, -6, 12, -12, 18, -18, 24, -24, 30, -30, 36, -36, 42, -42],
[6, 6, 12, 12, 18, 18, 24, 24, 30, 30, 36, 36, 42, 42]],
dtype=np.int16)
def test_mapQtoR(self):
"""Test function for mapQtoR()"""
At = ds.mapQtoR(self.A)
self.assertTrue(np.allclose(At, self.Ares))
| [
"numpy.array",
"numpy.allclose",
"deltasigma.mapQtoR",
"numpy.arange"
] | [((992, 1767), 'numpy.array', 'np.array', (['[[1, -1, 7, -7, 13, -13, 19, -19, 25, -25, 31, -31, 37, -37], [1, 1, 7, 7, \n 13, 13, 19, 19, 25, 25, 31, 31, 37, 37], [2, -2, 8, -8, 14, -14, 20, -\n 20, 26, -26, 32, -32, 38, -38], [2, 2, 8, 8, 14, 14, 20, 20, 26, 26, 32,\n 32, 38, 38], [3, -3, 9, -9, 15, -15, 21, -21, 27, -27, 33, -33, 39, -39\n ], [3, 3, 9, 9, 15, 15, 21, 21, 27, 27, 33, 33, 39, 39], [4, -4, 10, -\n 10, 16, -16, 22, -22, 28, -28, 34, -34, 40, -40], [4, 4, 10, 10, 16, 16,\n 22, 22, 28, 28, 34, 34, 40, 40], [5, -5, 11, -11, 17, -17, 23, -23, 29,\n -29, 35, -35, 41, -41], [5, 5, 11, 11, 17, 17, 23, 23, 29, 29, 35, 35, \n 41, 41], [6, -6, 12, -12, 18, -18, 24, -24, 30, -30, 36, -36, 42, -42],\n [6, 6, 12, 12, 18, 18, 24, 24, 30, 30, 36, 36, 42, 42]]'], {'dtype': 'np.int16'}), '([[1, -1, 7, -7, 13, -13, 19, -19, 25, -25, 31, -31, 37, -37], [1, \n 1, 7, 7, 13, 13, 19, 19, 25, 25, 31, 31, 37, 37], [2, -2, 8, -8, 14, -\n 14, 20, -20, 26, -26, 32, -32, 38, -38], [2, 2, 8, 8, 14, 14, 20, 20, \n 26, 26, 32, 32, 38, 38], [3, -3, 9, -9, 15, -15, 21, -21, 27, -27, 33, \n -33, 39, -39], [3, 3, 9, 9, 15, 15, 21, 21, 27, 27, 33, 33, 39, 39], [4,\n -4, 10, -10, 16, -16, 22, -22, 28, -28, 34, -34, 40, -40], [4, 4, 10, \n 10, 16, 16, 22, 22, 28, 28, 34, 34, 40, 40], [5, -5, 11, -11, 17, -17, \n 23, -23, 29, -29, 35, -35, 41, -41], [5, 5, 11, 11, 17, 17, 23, 23, 29,\n 29, 35, 35, 41, 41], [6, -6, 12, -12, 18, -18, 24, -24, 30, -30, 36, -\n 36, 42, -42], [6, 6, 12, 12, 18, 18, 24, 24, 30, 30, 36, 36, 42, 42]],\n dtype=np.int16)\n', (1000, 1767), True, 'import numpy as np\n'), ((2020, 2038), 'deltasigma.mapQtoR', 'ds.mapQtoR', (['self.A'], {}), '(self.A)\n', (2030, 2038), True, 'import deltasigma as ds\n'), ((2063, 2089), 'numpy.allclose', 'np.allclose', (['At', 'self.Ares'], {}), '(At, self.Ares)\n', (2074, 2089), True, 'import numpy as np\n'), ((880, 919), 'numpy.arange', 'np.arange', (['(1)', '(6 * 7 + 1)'], {'dtype': 'np.int16'}), '(1, 6 * 7 + 1, dtype=np.int16)\n', (889, 919), True, 'import numpy as np\n')] |
import torch
import numpy as np
from dltranz.seq_encoder.utils import NormEncoder
def test_norm_encoder():
x = torch.tensor([
[1.0, 0.0],
[0.0, 2.0],
[3.0, 4.0],
], dtype=torch.float64)
f = NormEncoder()
out = f(x).numpy()
exp = np.array([
[1.0, 0.0],
[0.0, 1.0],
[0.6, 0.8],
])
np.testing.assert_array_almost_equal(exp, out)
| [
"torch.tensor",
"numpy.array",
"numpy.testing.assert_array_almost_equal",
"dltranz.seq_encoder.utils.NormEncoder"
] | [((118, 189), 'torch.tensor', 'torch.tensor', (['[[1.0, 0.0], [0.0, 2.0], [3.0, 4.0]]'], {'dtype': 'torch.float64'}), '([[1.0, 0.0], [0.0, 2.0], [3.0, 4.0]], dtype=torch.float64)\n', (130, 189), False, 'import torch\n'), ((230, 243), 'dltranz.seq_encoder.utils.NormEncoder', 'NormEncoder', ([], {}), '()\n', (241, 243), False, 'from dltranz.seq_encoder.utils import NormEncoder\n'), ((277, 323), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0], [0.6, 0.8]]'], {}), '([[1.0, 0.0], [0.0, 1.0], [0.6, 0.8]])\n', (285, 323), True, 'import numpy as np\n'), ((359, 405), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['exp', 'out'], {}), '(exp, out)\n', (395, 405), True, 'import numpy as np\n')] |
import numpy as np
from adaptive_baselines.samplers.svgd import BandwidthHeuristic, OptimizationSVGDSampler, VanillaSVGDSampler
from scipy.stats import multivariate_normal
from sprl.distributions.kl_joint import KLGaussian, KLJoint, KLPolicy
class SVGDKLGaussian(KLGaussian):
def __init__(self, lower_bounds, upper_bounds, mu, sigma):
super().__init__(lower_bounds, upper_bounds, mu, sigma)
self._samples = None
self._counter = 0
self._keep_old_samples = False
self._sampler = VanillaSVGDSampler(BandwidthHeuristic.HEMETHOD,
stepsize=1e-1)
def sample(self, num_samples=1, mask=None):
if self._samples is None:
print("Complete resample")
return np.array(super().sample(num_samples))
else:
dis = multivariate_normal(self.mu, self.sigma)
if num_samples > self._samples.shape[0]:
self._samples = np.concatenate(
(self._samples,
dis.rvs(num_samples - self._samples.shape[0])))
elif self._keep_old_samples:
self._samples = np.concatenate(
(self._samples, dis.rvs(num_samples)))
print(f"Keeping old samples, but adding {num_samples} new ones.")
mask = np.full((self._samples.shape[0]), False)
mask[-num_samples:] = True
elif num_samples < self._samples.shape[0]:
raise RuntimeError(
"SVGDKLGaussian: num_samples must (currently) be greater than the number of samples already stored"
)
if mask is not None:
print(f"SVGDKLGaussian: Using mask")
# self._samples = OptimizationSVGDSampler(8).sample_with_mask(
# dis,
# self._samples,
# bounds=(self.lower_bounds, self.upper_bounds),
# mask=mask)[mask]
self._samples = self._sampler.sample_with_mask(dis,
self._samples,
n_iter=100,
bounds=(self.lower_bounds,
self.upper_bounds),
mask=mask)[1][mask]
else:
print(f"SVGDKLGaussian: NOT using mask")
self._samples = OptimizationSVGDSampler(8).sample_with_bounds(
dis,
self._samples,
bounds=(self.lower_bounds, self.upper_bounds))
self._counter += 1
return self._samples
@property
def mu(self):
return self._mu
@mu.setter
def mu(self, value):
self._mu = value
self._distribution_shift = True
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, value):
self._sigma = value
self._distribution_shift = True
def set_buffer_values(self, values: np.ndarray):
self._samples = values
self._keep_old_samples = True
def clear_sample_buffer(self):
self._samples = None
self._keep_old_samples = False
class SVGDKLPolicy(KLPolicy):
def __init__(self, lower_bounds, upper_bounds, mu_init, sigma_init,
feature_func):
super().__init__(lower_bounds, upper_bounds, mu_init, sigma_init,
feature_func)
self._samples = None
self._distribution_shift = False
def sample_action(self, state):
# TODO: Squeeze the output
if self._samples is None or not self._distribution_shift:
self._samples = super().sample_action(state)
else:
mu = self.compute_greedy_action(state)
sigma = self.compute_variance(state)
dis = multivariate_normal(mu, sigma)
self._samples = OptimizationSVGDSampler(3.).sample(
dis, self._samples)
if len(self._samples.shape) == 2 and self._samples.shape[0] == 1:
self._samples = np.squeeze(self._samples)
if np.any(self.lower_bounds > self._samples) or np.any(
self._samples > self.upper_bounds):
print(
f"Bounds were violated: {self._samples}\n Bounds: {self.lower_bounds} {self.upper_bounds}"
)
return self._samples
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, value):
self._theta = value
self._distribution_shift = True
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, value):
self._sigma = value
self._distribution_shift = True
class SVGDJoint(KLJoint):
def __init__(self,
lower_bounds_x,
upper_bounds_x,
mu_x,
sigma_x,
lower_bounds_y,
upper_bounds_y,
mu_y,
sigma_y,
feature_func,
epsilon,
max_eta=100,
svgd_type=None):
print(f"Using sampler type: {svgd_type}")
if svgd_type == 'prune_old':
self.distribution = SVGDPruningKLGaussian(lower_bounds_x,
upper_bounds_x, mu_x,
sigma_x)
elif svgd_type == 'simple':
self.distribution = SVGDKLGaussian(lower_bounds_x, upper_bounds_x,
mu_x, sigma_x)
else:
self.distribution = KLGaussian(lower_bounds_x, upper_bounds_x,
mu_x, sigma_x)
self.policy = KLPolicy(lower_bounds_y, upper_bounds_y, mu_y, sigma_y,
feature_func)
self.epsilon = epsilon
self.max_eta = max_eta
class SVGDPruningKLGaussian(SVGDKLGaussian):
def __init__(self, lower_bounds, upper_bounds, mu, sigma, prune_amount=10):
super().__init__(lower_bounds, upper_bounds, mu, sigma)
self._prune_amount = prune_amount
def sample(self, num_samples=1):
if self._samples is not None:
self._samples = self._samples[self._prune_amount:]
return super().sample(num_samples=num_samples)
| [
"scipy.stats.multivariate_normal",
"sprl.distributions.kl_joint.KLGaussian",
"numpy.any",
"numpy.squeeze",
"adaptive_baselines.samplers.svgd.VanillaSVGDSampler",
"numpy.full",
"adaptive_baselines.samplers.svgd.OptimizationSVGDSampler",
"sprl.distributions.kl_joint.KLPolicy"
] | [((524, 585), 'adaptive_baselines.samplers.svgd.VanillaSVGDSampler', 'VanillaSVGDSampler', (['BandwidthHeuristic.HEMETHOD'], {'stepsize': '(0.1)'}), '(BandwidthHeuristic.HEMETHOD, stepsize=0.1)\n', (542, 585), False, 'from adaptive_baselines.samplers.svgd import BandwidthHeuristic, OptimizationSVGDSampler, VanillaSVGDSampler\n'), ((5935, 6004), 'sprl.distributions.kl_joint.KLPolicy', 'KLPolicy', (['lower_bounds_y', 'upper_bounds_y', 'mu_y', 'sigma_y', 'feature_func'], {}), '(lower_bounds_y, upper_bounds_y, mu_y, sigma_y, feature_func)\n', (5943, 6004), False, 'from sprl.distributions.kl_joint import KLGaussian, KLJoint, KLPolicy\n'), ((851, 891), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['self.mu', 'self.sigma'], {}), '(self.mu, self.sigma)\n', (870, 891), False, 'from scipy.stats import multivariate_normal\n'), ((3982, 4012), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (4001, 4012), False, 'from scipy.stats import multivariate_normal\n'), ((4260, 4301), 'numpy.any', 'np.any', (['(self.lower_bounds > self._samples)'], {}), '(self.lower_bounds > self._samples)\n', (4266, 4301), True, 'import numpy as np\n'), ((4305, 4346), 'numpy.any', 'np.any', (['(self._samples > self.upper_bounds)'], {}), '(self._samples > self.upper_bounds)\n', (4311, 4346), True, 'import numpy as np\n'), ((4223, 4248), 'numpy.squeeze', 'np.squeeze', (['self._samples'], {}), '(self._samples)\n', (4233, 4248), True, 'import numpy as np\n'), ((5812, 5869), 'sprl.distributions.kl_joint.KLGaussian', 'KLGaussian', (['lower_bounds_x', 'upper_bounds_x', 'mu_x', 'sigma_x'], {}), '(lower_bounds_x, upper_bounds_x, mu_x, sigma_x)\n', (5822, 5869), False, 'from sprl.distributions.kl_joint import KLGaussian, KLJoint, KLPolicy\n'), ((1351, 1389), 'numpy.full', 'np.full', (['self._samples.shape[0]', '(False)'], {}), '(self._samples.shape[0], False)\n', (1358, 1389), True, 'import numpy as np\n'), ((4041, 4069), 'adaptive_baselines.samplers.svgd.OptimizationSVGDSampler', 'OptimizationSVGDSampler', (['(3.0)'], {}), '(3.0)\n', (4064, 4069), False, 'from adaptive_baselines.samplers.svgd import BandwidthHeuristic, OptimizationSVGDSampler, VanillaSVGDSampler\n'), ((2538, 2564), 'adaptive_baselines.samplers.svgd.OptimizationSVGDSampler', 'OptimizationSVGDSampler', (['(8)'], {}), '(8)\n', (2561, 2564), False, 'from adaptive_baselines.samplers.svgd import BandwidthHeuristic, OptimizationSVGDSampler, VanillaSVGDSampler\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from queue import Queue
from pathlib2 import Path
from threading import Thread
from functools import partial
import cv2
import numpy as np
from data.augmentations import AugmentationBase, Resize
class Producer(Thread):
DEBUG = False
def __init__(self, get_data_func, pull_size=1, in_queue=None, out_queue=None, tag=None):
"""
get_data_func must support iterable
empty list should be returned in case there's not output
"""
self.pull_size = pull_size
name = 'Producer' + ('' if not tag else ' %d' % tag)
super(Producer, self).__init__(name=name)
self.daemon = True
self.should_stop = False
self.thread_func = get_data_func
self.out_queue = out_queue
self.in_queue = in_queue
def run(self):
while not self.should_stop:
if self.in_queue is not None:
if self.pull_size > 1:
inputs = [self.in_queue.get(block=True, timeout=None) for _ in range(self.pull_size)]
else:
inputs = self.in_queue.get(block=True, timeout=None)
outputs = self.thread_func(inputs)
outputs = self._iterable_check(outputs)
if outputs is None:
continue
else:
outputs = self.thread_func()
outputs = self._iterable_check(outputs)
for output in outputs:
self.out_queue.put(output, block=True, timeout=None)
def _iterable_check(self, outputs):
if not isinstance(outputs, (tuple, list, np.ndarray)):
outputs = (outputs,)
return outputs
class InBatchKeys:
list = 'list'
vstack = 'vstack'
hstack = 'hstack'
class PipelineError(Exception):
pass
class DataBatch(object):
images = None
# Heatmap
truth_maps = None
trim_maps = None
gt_boxes = None
# RPN
rpn_labels = None
rpn_bbox_targets = None
rpn_bbox_inside_weights = None
rpn_bbox_outside_weights = None
# Center Boxes
wh_targets = None
wh_labels = None
# Meta data
meta_images = None
# Embedings
phocs = None
class PipelineBase(object):
"""
Multithreaded MetaImage loader
Has 4 queues:
(1) MetaImage loader - threads collect metaimages form data generators
(2) Image processing queue - Loads images and boxes from MetaImage and resizes them
(3) Input Format - trnasofrm images to format needed for model input
(4) Batcher - put input format images
"""
PERMUTATION_BATCH = 100
DEBUG = False
IMAGE_PROC_QUEUE_SIZE = 10
def __init__(self, iterator, batch_size, fmap_x=None, fmap_y=None, target_y=None, target_x=None, logger=None,
**kwargs):
self.fmap_height = fmap_y
self.fmap_width = fmap_x
self.target_h = target_y
self.target_w = target_x
self.batch_size = batch_size
self._iterator = iterator
self.meta_queue = Queue(maxsize=(self.PERMUTATION_BATCH + 1))
self.image_proc_queue = Queue(maxsize=(self.IMAGE_PROC_QUEUE_SIZE * self.batch_size))
self.input_format_queue = Queue(maxsize=(self.IMAGE_PROC_QUEUE_SIZE * self.batch_size))
self.batch_queue = Queue(maxsize=self.batch_size)
self.meta_producers = []
self.image_proc_producers = []
self.input_format_producers = []
self.batch_producers = []
self._augmentations = []
self._extenders = []
self._ready = False
self._finished_data_generation = False
self.logger = logger if logger is not None else print
def run(self, num_producers):
self.build_producers(num_producers=num_producers)
self.start_producers()
self._ready = True
def build_producers(self, num_producers):
# load metadata
p_meta = Producer(get_data_func=self.get_metadata, out_queue=self.meta_queue)
self.meta_producers.append(p_meta)
for i in range(num_producers):
# load image and properly reshape
for i in range(2):
p_image_proc = Producer(get_data_func=self.process_metadata_and_load_image, in_queue=self.meta_queue,
out_queue=self.image_proc_queue)
self.image_proc_producers.append(p_image_proc)
# prepare data for neural net
p_input_format = Producer(get_data_func=self.prepare_data_for_neural_net, in_queue=self.image_proc_queue,
out_queue=self.input_format_queue)
self.input_format_producers.append(p_input_format)
batcher = Producer(get_data_func=self.batcher_function, in_queue=self.input_format_queue,
out_queue=self.batch_queue, pull_size=self.batch_size)
self.batch_producers.append(batcher)
pass
def start_producers(self):
def start_producer(producer_list, tag=None):
if len(producer_list) > 0:
for p in producer_list:
if tag is not None:
self.logger('Starting %s Producers' % tag)
else:
self.logger('Starting Meta Producers')
p.start()
start_producer(self.meta_producers, tag='Meta')
start_producer(self.image_proc_producers, tag='Image Processing')
start_producer(self.input_format_producers, tag='Input Formatting')
start_producer(self.batch_producers, tag='Batch')
return
def stop_producers(self):
def stop_producer(producer_list):
if len(producer_list) > 0:
for p in producer_list:
p.should_stop = True
self.logger('Stopping Producers')
stop_producer(self.meta_producers)
stop_producer(self.image_proc_producers)
stop_producer(self.input_format_producers)
stop_producer(self.batch_producers)
return
def get_metadata(self):
"""load image metadata"""
generator = self._iterator
metas = []
for i in range(self.PERMUTATION_BATCH):
try:
meta_data = generator.__next__()
if meta_data is None:
continue
metas.append(meta_data)
except StopIteration:
self._finished_data_generation = True
break
return metas
def add_augmentation(self, aug, **kwargs):
if not issubclass(aug, AugmentationBase):
self.logger('Unsupported augmenataion %s... Ignoring...' % aug.__name__)
aug_inst = aug(target_width=self.target_w, target_height=self.target_h, **kwargs)
self._augmentations.append(aug_inst)
def process_metadata_and_load_image(self, meta_image):
"""
this function receives metaimage class\
class has path string, bboxes 2D np.array [x1,y1,x2,y2], getImage method and showImage method
"""
if self.DEBUG:
self.logger('Loading some images')
image = meta_image.getImage()
bboxes = meta_image.bboxes
# print(image)
# pdb.set_trace()
if self.target_h is not None and self.target_w is not None:
# if no augmentations is added, add the resize
if not self._augmentations:
self.add_augmentation(Resize)
# We support augmentations only if target size is specified
for aug in self._augmentations:
image, bboxes, meta_image = aug.apply(image, bboxes, meta_image)
output = (image, bboxes, meta_image)
return np.array([output], dtype=object)
def add_extender(self, names, extender_func, in_batch='list', **kwargs):
allowed = [InBatchKeys.list, InBatchKeys.vstack, InBatchKeys.hstack]
assert in_batch in allowed, '%s unsupported. %s' % (in_batch, str(allowed))
extender = partial(extender_func, names=names, **kwargs)
self._extenders.append((extender, in_batch))
def prepare_data_for_neural_net(self, image_and_meta):
image = image_and_meta[0]
gt_boxes = image_and_meta[1]
meta_image = image_and_meta[2]
output_dict = {'image': (image, InBatchKeys.vstack),
'gt_boxes': (gt_boxes, InBatchKeys.hstack),
'meta_image': (meta_image, InBatchKeys.list)
}
for extender, in_batch in self._extenders:
name, data = extender(image, gt_boxes, meta_image)
if name is None or data is None:
continue
if not isinstance(name, (list, tuple)) and not isinstance(data, (list, tuple)):
name = (name,)
data = (data,)
assert len(name) == len(data), 'Names must match data got %d names and %d data' % (len(name), len(data))
for i in range(len(name)):
output_dict.update({name[i]: (data[i], in_batch)})
return output_dict
def batcher_function(self, batch_slice):
if not isinstance(batch_slice, (list, np.ndarray)):
batch_slice = [batch_slice]
batch_dict = {}
in_batch_treatment = {}
# batch_slice is an output_dict from prepare_data_for_neural_net
for j, s in enumerate(batch_slice):
for k, v in s.items():
data, in_batch = v
in_batch_treatment[k] = in_batch
dlist = batch_dict.get(k, [])
if in_batch == InBatchKeys.vstack:
data = data[np.newaxis, :]
if in_batch == InBatchKeys.hstack:
# Assumed we treat something like bboxes or phocs for hstack -> (n, d) numpy arrays
# Assume we add a batch_id dim, (n, m) -> (n, m+1) where 0 dim is batch id
data = np.hstack((np.ones((data.shape[0], 1)) * j, data))
dlist.append(data)
batch_dict[k] = dlist
for k, v in in_batch_treatment.items():
raw_data = batch_dict[k]
if in_batch_treatment[k] == InBatchKeys.list:
# It's already a list. do nothing
data = raw_data
if in_batch_treatment[k] == InBatchKeys.vstack:
data = np.vstack(raw_data)
if in_batch_treatment[k] == InBatchKeys.hstack:
# Not a mistake. We stacking it after added batch id dim in first loop above
data = np.vstack(raw_data)
batch_dict[k] = data
return batch_dict
def pull_data(self):
if not self._ready:
raise PipelineError('use run() before pulling data from pipe')
data = self.batch_queue.get(block=True, timeout=None)
return data
@staticmethod
def image_resize(image, boxes, target_y, target_x, debug=False):
if float(image.shape[0]) / float(image.shape[1]) < target_y / target_x:
f = float(target_x) / image.shape[1]
dsize = (target_x, int(image.shape[0] * f))
else:
f = float(target_y) / image.shape[0]
dsize = (int(image.shape[1] * f), target_y)
image = cv2.resize(image, dsize=dsize)
scaled_boxes = boxes * np.atleast_2d(np.array([f, f, f, f]))
resized_image = cv2.copyMakeBorder(image,
top=0,
left=0,
right=target_x - image.shape[1],
bottom=target_y - image.shape[0],
borderType=cv2.BORDER_REPLICATE)
if debug:
pass
return resized_image, scaled_boxes
def get_relative_points(self, fmap_w, fmap_h, as_batch=False):
"""
Feature map relative points (centers of each feature pixel)
"""
sh_x, sh_y = np.meshgrid(np.arange(fmap_w), np.arange(fmap_h))
pts = np.vstack((sh_x.ravel(), sh_y.ravel())).transpose()
cntr_pts = pts + np.array([0.5] * 2, np.float32)[np.newaxis, :]
relative_pts = cntr_pts / np.array([fmap_w, fmap_h], np.float32)[np.newaxis, :]
if as_batch:
relative_pts = relative_pts[np.newaxis, :, :]
return relative_pts
class FolderLoader(object):
""" Loads images (resizes if needed) and their names from folder"""
_supported_image_formats = ['jpg', '.jpg', 'png', 'tif']
def __init__(self, folder, target_size=None):
if target_size is not None:
assert all([isinstance(target_size, (list, tuple)),
len(target_size) == 2]), \
"target size must be list or tuple of the format (x,y)"
self._target_size = target_size
self._p = Path(folder)
def _resize(self, image):
"""Resize the loaded image to a target size"""
return image_resize(image=image, target_x=self._target_size[0], target_y=self._target_size[1])
def _load(self, adress):
"""load image"""
try:
img = cv2.imread(adress)
except:
return None
maybe_resized_image = self._resize(image=img) if self._target_size is not None else img
return maybe_resized_image
def generator(self):
for f in self._p.glob('*.*'):
if f.is_file() and f.suffix in self._supported_image_formats:
img = self._load(adress=str(f))
name = f.stem
if img is not None:
yield img, name
def image_resize(image, boxes=None, target_y=None, target_x=None, debug=False):
if target_y is None or target_x is None:
return image, boxes
if float(image.shape[0]) / float(image.shape[1]) < target_y / target_x:
f = float(target_x) / image.shape[1]
dsize = (target_x, int(image.shape[0] * f))
else:
f = float(target_y) / image.shape[0]
dsize = (int(image.shape[1] * f), target_y)
image = cv2.resize(image, dsize=dsize)
resized_image = cv2.copyMakeBorder(image,
top=0,
left=0,
right=target_x - image.shape[1],
bottom=target_y - image.shape[0],
borderType=cv2.BORDER_REPLICATE)
if debug:
pass
if boxes is not None:
scaled_boxes = boxes * np.atleast_2d(np.array([f, f, f, f]))
return resized_image, scaled_boxes
return resized_image
if __name__ == '__main__':
from data.iamdb import IamDataset
from data.data_extenders import phoc_embedding
DATA_DIR = 'datasets/iamdb'
data = IamDataset(DATA_DIR)
data.run()
it = data.get_iterator(infinite=True)
pipe = PipelineBase(it, batch_size=1, fmap_x=112, fmap_y=150, trim=0.2, target_x=900, target_y=1200)
pipe.add_extender(('phocs', 'tf_gt_boxes'), phoc_embedding, in_batch='hstack')
pipe.run(1)
x = pipe.pull_data()
| [
"data.iamdb.IamDataset",
"numpy.ones",
"cv2.copyMakeBorder",
"queue.Queue",
"numpy.array",
"pathlib2.Path",
"functools.partial",
"numpy.vstack",
"cv2.resize",
"cv2.imread",
"numpy.arange"
] | [((14266, 14296), 'cv2.resize', 'cv2.resize', (['image'], {'dsize': 'dsize'}), '(image, dsize=dsize)\n', (14276, 14296), False, 'import cv2\n'), ((14318, 14462), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image'], {'top': '(0)', 'left': '(0)', 'right': '(target_x - image.shape[1])', 'bottom': '(target_y - image.shape[0])', 'borderType': 'cv2.BORDER_REPLICATE'}), '(image, top=0, left=0, right=target_x - image.shape[1],\n bottom=target_y - image.shape[0], borderType=cv2.BORDER_REPLICATE)\n', (14336, 14462), False, 'import cv2\n'), ((15008, 15028), 'data.iamdb.IamDataset', 'IamDataset', (['DATA_DIR'], {}), '(DATA_DIR)\n', (15018, 15028), False, 'from data.iamdb import IamDataset\n'), ((3147, 3188), 'queue.Queue', 'Queue', ([], {'maxsize': '(self.PERMUTATION_BATCH + 1)'}), '(maxsize=self.PERMUTATION_BATCH + 1)\n', (3152, 3188), False, 'from queue import Queue\n'), ((3223, 3282), 'queue.Queue', 'Queue', ([], {'maxsize': '(self.IMAGE_PROC_QUEUE_SIZE * self.batch_size)'}), '(maxsize=self.IMAGE_PROC_QUEUE_SIZE * self.batch_size)\n', (3228, 3282), False, 'from queue import Queue\n'), ((3319, 3378), 'queue.Queue', 'Queue', ([], {'maxsize': '(self.IMAGE_PROC_QUEUE_SIZE * self.batch_size)'}), '(maxsize=self.IMAGE_PROC_QUEUE_SIZE * self.batch_size)\n', (3324, 3378), False, 'from queue import Queue\n'), ((3408, 3438), 'queue.Queue', 'Queue', ([], {'maxsize': 'self.batch_size'}), '(maxsize=self.batch_size)\n', (3413, 3438), False, 'from queue import Queue\n'), ((7858, 7890), 'numpy.array', 'np.array', (['[output]'], {'dtype': 'object'}), '([output], dtype=object)\n', (7866, 7890), True, 'import numpy as np\n'), ((8149, 8194), 'functools.partial', 'partial', (['extender_func'], {'names': 'names'}), '(extender_func, names=names, **kwargs)\n', (8156, 8194), False, 'from functools import partial\n'), ((11423, 11453), 'cv2.resize', 'cv2.resize', (['image'], {'dsize': 'dsize'}), '(image, dsize=dsize)\n', (11433, 11453), False, 'import cv2\n'), ((11549, 11693), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image'], {'top': '(0)', 'left': '(0)', 'right': '(target_x - image.shape[1])', 'bottom': '(target_y - image.shape[0])', 'borderType': 'cv2.BORDER_REPLICATE'}), '(image, top=0, left=0, right=target_x - image.shape[1],\n bottom=target_y - image.shape[0], borderType=cv2.BORDER_REPLICATE)\n', (11567, 11693), False, 'import cv2\n'), ((13049, 13061), 'pathlib2.Path', 'Path', (['folder'], {}), '(folder)\n', (13053, 13061), False, 'from pathlib2 import Path\n'), ((12177, 12194), 'numpy.arange', 'np.arange', (['fmap_w'], {}), '(fmap_w)\n', (12186, 12194), True, 'import numpy as np\n'), ((12196, 12213), 'numpy.arange', 'np.arange', (['fmap_h'], {}), '(fmap_h)\n', (12205, 12213), True, 'import numpy as np\n'), ((13337, 13355), 'cv2.imread', 'cv2.imread', (['adress'], {}), '(adress)\n', (13347, 13355), False, 'import cv2\n'), ((10524, 10543), 'numpy.vstack', 'np.vstack', (['raw_data'], {}), '(raw_data)\n', (10533, 10543), True, 'import numpy as np\n'), ((10720, 10739), 'numpy.vstack', 'np.vstack', (['raw_data'], {}), '(raw_data)\n', (10729, 10739), True, 'import numpy as np\n'), ((11500, 11522), 'numpy.array', 'np.array', (['[f, f, f, f]'], {}), '([f, f, f, f])\n', (11508, 11522), True, 'import numpy as np\n'), ((12306, 12337), 'numpy.array', 'np.array', (['([0.5] * 2)', 'np.float32'], {}), '([0.5] * 2, np.float32)\n', (12314, 12337), True, 'import numpy as np\n'), ((12387, 12425), 'numpy.array', 'np.array', (['[fmap_w, fmap_h]', 'np.float32'], {}), '([fmap_w, fmap_h], np.float32)\n', (12395, 12425), True, 'import numpy as np\n'), ((14753, 14775), 'numpy.array', 'np.array', (['[f, f, f, f]'], {}), '([f, f, f, f])\n', (14761, 14775), True, 'import numpy as np\n'), ((10101, 10128), 'numpy.ones', 'np.ones', (['(data.shape[0], 1)'], {}), '((data.shape[0], 1))\n', (10108, 10128), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# coding=utf8
# Date:2021/05/04
# Author:Aiden
import sys
import cv2
import math
import time
import rospy
import threading
import numpy as np
from threading import Timer
from std_msgs.msg import *
from std_srvs.srv import *
from sensor_msgs.msg import Image
from sensor.msg import Led
from warehouse.srv import *
from warehouse.msg import Grasp
from hiwonder_servo_msgs.msg import MultiRawIdPosDur
from kinematics import ik_transform
from armpi_fpv import PID
from armpi_fpv import Misc
from armpi_fpv import apriltag
from armpi_fpv import bus_servo_control
# 入库
# 如未声明,使用的长度,距离单位均为m
d_tag_map = 0
tag_z_min = 0.01
tag_z_max = 0.015
d_color_map = 30
color_z_min = 0.01
color_z_max = 0.015
d_color_y = 20
color_y_adjust = 400
center_x = 340
__isRunning = False
lock = threading.RLock()
ik = ik_transform.ArmIK()
mask1 = cv2.imread('/home/ubuntu/armpi_fpv/src/object_sorting/scripts/mask1.jpg', 0)
mask2 = cv2.imread('/home/ubuntu/armpi_fpv/src/object_sorting/scripts/mask2.jpg', 0)
rows, cols = mask1.shape
range_rgb = {
'red': (0, 0, 255),
'blue': (255, 0, 0),
'green': (0, 255, 0),
'black': (0, 0, 0),
'white': (255, 255, 255),
}
# 找出面积最大的轮廓和对应面积
# 参数为要比较的轮廓的列表
def getAreaMaxContour(contours):
contour_area_temp = 0
contour_area_max = 0
area_max_contour = None
for c in contours: # 历遍所有轮廓
contour_area_temp = math.fabs(cv2.contourArea(c)) # 计算轮廓面积
if contour_area_temp > contour_area_max:
contour_area_max = contour_area_temp
if contour_area_temp > 100: # 只有在面积大于设定时,最大面积的轮廓才是有效的,以过滤干扰
area_max_contour = c
return area_max_contour, contour_area_max # 返回最大的轮廓
# 初始位置
def initMove(delay=True):
with lock:
bus_servo_control.set_servos(joints_pub, 1500, ((1, 75), (2, 500), (3, 80), (4, 825), (5, 625), (6, 500)))
if delay:
rospy.sleep(2)
# 关闭rgb
def turn_off_rgb():
led = Led()
led.index = 0
led.rgb.r = 0
led.rgb.g = 0
led.rgb.b = 0
rgb_pub.publish(led)
led.index = 1
rgb_pub.publish(led)
x_dis = 500
Y_DIS = 0
y_dis = Y_DIS
last_x_dis = x_dis
last_x_dis = y_dis
x_pid = PID.PID(P=0.01, I=0.001, D=0)#pid初始化
y_pid = PID.PID(P=0.00001, I=0, D=0)
tag_x_dis = 500
tag_y_dis = 0
tag_x_pid = PID.PID(P=0.01, I=0.001, D=0)#pid初始化
tag_y_pid = PID.PID(P=0.02, I=0, D=0)
stop_state = 0
move_state = 1
adjust = False
approach = False
rotation_angle = 0
start_move = False
adjust_error = False
last_X, last_Y = 0, 0
box_rotation_angle = 0
last_box_rotation_angle = 0
tag1 = ['tag1', -1, -1, -1, 0]
tag2 = ['tag2', -1, -1, -1, 0]
tag3 = ['tag3', -1, -1, -1, 0]
current_tag = ['tag1', 'tag2', 'tag3']
detect_color = ('red', 'green', 'blue')
count = 0
count2 = 0
count3 = 0
count_d = 0
count_timeout = 0
count_tag_timeout = 0
count_adjust_timeout = 0
# 变量重置
def reset():
global X, Y
global adjust
global approach
global move_state
global start_move
global current_tag
global detect_color
global x_dis, y_dis
global adjust_error
global last_X, last_Y
global tag1, tag2, tag3
global box_rotation_angle
global tag_x_dis, tag_y_dis
global last_x_dis, last_y_dis
global rotation_angle, last_box_rotation_angle
global count, count2, count3, count_timeout, count_adjust_timeout, count_d, count_tag_timeout
with lock:
X = 0
Y = 0
x_dis = 500
y_dis = Y_DIS
tag_x_dis = 500
tag_y_dis = 0
x_pid.clear()
y_pid.clear()
tag_x_pid.clear()
tag_y_pid.clear()
last_x_dis = x_dis
last_y_dis = y_dis
adjust = False
approach = False
start_move = False
adjust_error = False
move_state = 1
turn_off_rgb()
rotation_angle = 0
box_rotation_angle = 0
last_box_rotation_angle = 0
count = 0
count2 = 0
count3 = 0
count_d = 0
count_timeout = 0
count_tag_timeout = 0
count_adjust_timeout = 0
tag1 = ['tag1', -1, -1, -1, 0]
tag2 = ['tag2', -1, -1, -1, 0]
tag3 = ['tag3', -1, -1, -1, 0]
current_tag = ['tag1', 'tag2', 'tag3']
detect_color = ('red', 'green', 'blue')
color_range = None
# app初始化调用
def init():
global stop_state
global color_range
global __target_data
rospy.loginfo("in Init")
# 获取lab参数
color_range = rospy.get_param('/lab_config_manager/color_range_list', {}) # get lab range from ros param server
stop_state = 0
__target_data = ((), ())
initMove()
reset()
y_d = 0
roll_angle = 0
gripper_rotation = 0
# 木块对角长度一半
square_diagonal = 0.03*math.sin(math.pi/4)
F = 1000/240.0
# 夹取
def pick(grasps, have_adjust=False):
global roll_angle, last_x_dis
global adjust, x_dis, y_dis, tag_x_dis, tag_y_dis, adjust_error, gripper_rotation
position = grasps.grasp_pos.position
rotation = grasps.grasp_pos.rotation
approach = grasps.grasp_approach
retreat = grasps.grasp_retreat
# 计算是否能够到达目标位置,如果不能够到达,返回False
target1 = ik.setPitchRanges((position.x + approach.x, position.y + approach.y, position.z + approach.z), rotation.r, -180, 0)
target2 = ik.setPitchRanges((position.x, position.y, position.z), rotation.r, -180, 0)
target3 = ik.setPitchRanges((position.x, position.y, position.z + grasps.up), rotation.r, -180, 0)
target4 = ik.setPitchRanges((position.x + retreat.x, position.y + retreat.y, position.z + retreat.z), rotation.r, -180, 0)
if not __isRunning:
return False
if target1 and target2 and target3 and target4:
if not have_adjust:
servo_data = target1[1]
bus_servo_control.set_servos(joints_pub, 1800, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))
rospy.sleep(2)
if not __isRunning:
return False
# 第三步:移到目标点
servo_data = target2[1]
bus_servo_control.set_servos(joints_pub, 1500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))
rospy.sleep(2)
if not __isRunning:
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))
rospy.sleep(1)
return False
roll_angle = target2[2]
gripper_rotation = box_rotation_angle
x_dis = tag_x_dis = last_x_dis = target2[1]['servo6']
y_dis = tag_y_dis =0
if state == 'color':
# 第四步:微调整位置
if not adjust:
adjust = True
return True
else:
return True
else:
# 第五步: 对齐
bus_servo_control.set_servos(joints_pub, 500, ((2, 500 + int(F*gripper_rotation)), ))
rospy.sleep(0.8)
if not __isRunning:
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))
rospy.sleep(1)
return False
# 第六步:夹取
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.grasp_posture - 80), ))
rospy.sleep(0.8)
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.grasp_posture), ))
rospy.sleep(0.8)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.pre_grasp_posture), ))
rospy.sleep(0.5)
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))
rospy.sleep(1)
return False
# 第七步:抬升物体
if grasps.up != 0:
servo_data = target3[1]
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))
rospy.sleep(0.6)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.pre_grasp_posture), ))
rospy.sleep(0.5)
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))
rospy.sleep(1)
return False
# 第八步:移到撤离点
servo_data = target4[1]
if servo_data != target3[1]:
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))
rospy.sleep(0.5)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.pre_grasp_posture), ))
rospy.sleep(0.5)
return False
# 第九步:移到稳定点
servo_data = target1[1]
bus_servo_control.set_servos(joints_pub, 1000, ((2, 500), (3, 80), (4, 825), (5, 625)))
rospy.sleep(1)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.pre_grasp_posture), ))
rospy.sleep(0.5)
return False
return target2[2]
else:
rospy.loginfo('pick failed')
return False
def place(places):
position = places.grasp_pos.position
rotation = places.grasp_pos.rotation
approach = places.grasp_approach
retreat = places.grasp_retreat
# 计算是否能够到达目标位置,如果不能够到达,返回False
target1 = ik.setPitchRanges((position.x + approach.x, position.y + approach.y, position.z + approach.z), rotation.r, -180, 0)
target2 = ik.setPitchRanges((position.x, position.y, position.z), rotation.r, -180, 0)
target3 = ik.setPitchRanges((position.x, position.y, position.z + places.up), rotation.r, -180, 0)
target4 = ik.setPitchRanges((position.x + retreat.x, position.y + retreat.y, position.z + retreat.z), rotation.r, -180, 0)
if not __isRunning:
return False
if target1 and target2 and target3 and target4:
# 第一步:云台转到朝向目标方向
servo_data = target1[1]
bus_servo_control.set_servos(joints_pub, 800, ((1, places.pre_grasp_posture), (2, int(F*rotation.y)), (3, 80), (4, 825), (5, 625), (6, servo_data['servo6'])))
rospy.sleep(0.8)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, places.grasp_posture), ))
rospy.sleep(0.5)
return False
# 第二步:移到接近点
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, places.grasp_posture), ))
rospy.sleep(0.5)
return False
# 第三步:移到目标点
servo_data = target2[1]
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, places.grasp_posture), ))
rospy.sleep(0.5)
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第四步:抬升
if places.up != 0:
servo_data = target3[1]
bus_servo_control.set_servos(joints_pub, 400, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, places.grasp_posture), ))
rospy.sleep(0.5)
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第五步:放置
bus_servo_control.set_servos(joints_pub, 100, ((1, places.pre_grasp_posture - 20), ))
rospy.sleep(0.2)
bus_servo_control.set_servos(joints_pub, 500, ((1, places.grasp_posture), ))
rospy.sleep(1)
if not __isRunning:
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第六步:移到撤离点
servo_data = target4[1]
if servo_data != target3[1]:
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
return False
# 第七步:移到稳定点
servo_data = target1[1]
bus_servo_control.set_servos(joints_pub, 500, ((2, 500), (3, 80), (4, 825), (5, 625), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
return False
return True
else:
rospy.loginfo('place failed')
return False
###########################################
# 货架每层位置x, y, z(m)
shelf_position = {'R1':[0.275, 0, 0.02],
'R2':[0.275, 0, 0.12],
'R3':[0.275, 0, 0.21],
'L1':[-0.275, 0, 0.02],
'L2':[-0.275, 0, 0.12],
'L3':[-0.275, 0, 0.21]}
###########################################
# 每层放置时的俯仰角
roll_dict = {'R1': -130,
'R2': -120,
'R3': -90,
'L1': -130,
'L2': -120,
'L3': -90}
grasps = Grasp()
def move():
global y_d
global grasps
global approach
global x_adjust
global move_state
while True:
if __isRunning:
if approach:
position = None
approach = True
if not adjust and move_state == 1:
# 夹取的位置
grasps.grasp_pos.position.x = X
grasps.grasp_pos.position.y = Y
if state == 'color':
grasps.grasp_pos.position.z = Misc.map(Y - 0.15, 0, 0.15, color_z_min, color_z_max)
else:
grasps.grasp_pos.position.z = Misc.map(Y - 0.12, 0, 0.15, tag_z_min, tag_z_max)
# 夹取时的俯仰角
grasps.grasp_pos.rotation.r = -175
# 夹取后抬升的距离
grasps.up = 0
# 夹取时靠近的方向和距离
grasps.grasp_approach.y = -0.01
grasps.grasp_approach.z = 0.02
# 夹取后后撤的方向和距离
grasps.grasp_retreat.z = 0.04
# 夹取前后夹持器的开合
grasps.grasp_posture = 450
grasps.pre_grasp_posture = 75
buzzer_pub.publish(0.1)
result = pick(grasps)
if result:
move_state = 2
else:
initMove(delay=False)
reset()
elif not adjust and move_state == 2:
result = pick(grasps, have_adjust=True)
if not result:
initMove(delay=False)
reset()
move_state = 3
elif not adjust and move_state == 3:
if result:
if state == 'color':
if pick_color == 'red':
position = shelf_position[__target_data['red']]
roll = roll_dict[__target_data['red']]
elif pick_color == 'green':
position = shelf_position[__target_data['green']]
roll = roll_dict[__target_data['green']]
elif pick_color== 'blue':
position = shelf_position[__target_data['blue']]
roll = roll_dict[__target_data['blue']]
elif state == 'tag':
if current_tag == 'tag1':
position = shelf_position[__target_data['tag1']]
roll = roll_dict[__target_data['tag1']]
elif current_tag == 'tag2':
position = shelf_position[__target_data['tag2']]
roll = roll_dict[__target_data['tag2']]
elif current_tag == 'tag3':
position = shelf_position[__target_data['tag3']]
roll = roll_dict[__target_data['tag3']]
if position is not None:
if position[0] > 0:
approach_x = -0.07
else:
approach_x = 0.07
places = Grasp()
places.grasp_pos.position.x = position[0]
places.grasp_pos.position.y = position[1]
places.grasp_pos.position.z = position[2]
places.grasp_pos.rotation.r = roll
places.grasp_pos.rotation.y = 120
places.up = 0
places.grasp_approach.x = approach_x
places.grasp_approach.z = 0.04
places.grasp_retreat.x = approach_x
places.grasp_retreat.z = 0.02
places.grasp_posture = 75
places.pre_grasp_posture = 450
place(places)
initMove(delay=False)
reset()
else:
rospy.sleep(0.001)
else:
rospy.sleep(0.01)
else:
rospy.sleep(0.01)
th = threading.Thread(target=move)
th.setDaemon(True)
th.start()
# 检测apriltag
detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
def apriltagDetect(img):
global tag1, tag2, tag3
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detections = detector.detect(gray, return_image=False)
tag1 = ['tag1', -1, -1, -1, 0]
tag2 = ['tag2', -1, -1, -1, 0]
tag3 = ['tag3', -1, -1, -1, 0]
if len(detections) != 0:
for i, detection in enumerate(detections):
corners = np.rint(detection.corners) # 获取四个角点
cv2.drawContours(img, [np.array(corners, np.int)], -1, (0, 255, 255), 2)
tag_family = str(detection.tag_family, encoding='utf-8') # 获取tag_family
tag_id = int(detection.tag_id) # 获取tag_id
object_center_x, object_center_y = int(detection.center[0]), int(detection.center[1]) # 中心点
object_angle = int(math.degrees(math.atan2(corners[0][1] - corners[1][1], corners[0][0] - corners[1][0]))) # 计算旋转角
cv2.putText(img, str(tag_id), (object_center_x - 10, object_center_y + 10), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 255, 255], 2)
if tag_id == 1:
tag1 = ['tag1', object_center_x, object_center_y, object_angle]
elif tag_id == 2:
tag2 = ['tag2', object_center_x, object_center_y, object_angle]
elif tag_id == 3:
tag3 = ['tag3', object_center_x, object_center_y, object_angle]
# 获取roi,防止干扰
def getROI(rotation_angle):
rotate1 = cv2.getRotationMatrix2D((rows*0.5, cols*0.5), int(rotation_angle), 1)
rotate_rotate1 = cv2.warpAffine(mask2, rotate1, (cols, rows))
mask_and = cv2.bitwise_and(rotate_rotate1, mask1)
rotate2 = cv2.getRotationMatrix2D((rows*0.5, cols*0.5), int(-rotation_angle), 1)
rotate_rotate2 = cv2.warpAffine(mask_and, rotate2, (cols, rows))
frame_resize = cv2.resize(rotate_rotate2, (710, 710), interpolation=cv2.INTER_NEAREST)
roi = frame_resize[40:280, 184:504]
return roi
size = (320, 240)
last_x = 0
last_y = 0
state = None
x_adjust = 0
pick_color = ''
# 颜色夹取策略
def color_sort(img, target):
global X, Y
global count
global state
global adjust
global approach
global x_adjust
global pick_color
global current_tag
global adjust_error
global x_dis, y_dis
global detect_color
global count_timeout
global rotation_angle
global box_rotation_angle
global last_x_dis, last_y_dis
global last_box_rotation_angle
global last_x, last_y, count_d, start_move
img_copy = img.copy()
img_h, img_w = img.shape[:2]
frame_resize = cv2.resize(img_copy, size, interpolation=cv2.INTER_NEAREST)
frame_gray = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2GRAY)
frame_lab = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2LAB) # 将图像转换到LAB空间
max_area = 0
color_area_max = None
areaMaxContour_max = 0
roi = getROI(rotation_angle)
for i in color_range:
if i in target:
if i in detect_color:
target_color_range = color_range[i]
frame_mask1 = cv2.inRange(frame_lab, tuple(target_color_range['min']), tuple(target_color_range['max'])) # 对原图像和掩模进行位运算
#mask = cv2.bitwise_and(roi, frame_gray)
frame_mask2 = cv2.bitwise_and(roi, frame_mask1)
eroded = cv2.erode(frame_mask2, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))) #腐蚀
dilated = cv2.dilate(eroded, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))) #膨胀
#cv2.imshow('mask', dilated)
#cv2.waitKey(1)
contours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2] # 找出轮廓
areaMaxContour, area_max = getAreaMaxContour(contours) # 找出最大轮廓
if areaMaxContour is not None:
if area_max > max_area and area_max > 100:#找最大面积
max_area = area_max
color_area_max = i
areaMaxContour_max = areaMaxContour
if max_area > 100: # 有找到最大面积
rect = cv2.minAreaRect(areaMaxContour_max)
box_rotation_angle = rect[2]
if box_rotation_angle > 45:
box_rotation_angle = box_rotation_angle - 90
box = np.int0(cv2.boxPoints(rect))
for j in range(4): # 映射到原图大小
box[j, 0] = int(Misc.map(box[j, 0], 0, size[0], 0, img_w))
box[j, 1] = int(Misc.map(box[j, 1], 0, size[1], 0, img_h))
cv2.drawContours(img, [box], -1, range_rgb[color_area_max], 2)
centerX = int(Misc.map(((areaMaxContour_max[areaMaxContour_max[:,:,0].argmin()][0])[0] + (areaMaxContour_max[areaMaxContour_max[:,:,0].argmax()][0])[0])/2, 0, size[0], 0, img_w))
centerY = int(Misc.map((areaMaxContour_max[areaMaxContour_max[:,:,1].argmin()][0])[1], 0, size[1], 0, img_h))
#cv2.circle(img, (int(centerX), int(centerY)), 5, range_rgb[color_area_max], -1)
if abs(centerX - last_x) <= 5 and abs(centerY - last_y) <= 5 and not start_move:
count_d += 1
if count_d > 5:
count_d = 0
start_move = True
led = Led()
led.index = 0
led.rgb.r = range_rgb[color_area_max][2]
led.rgb.g = range_rgb[color_area_max][1]
led.rgb.b = range_rgb[color_area_max][0]
rgb_pub.publish(led)
led.index = 1
rgb_pub.publish(led)
rospy.sleep(0.1)
# 位置映射
if 298 + d_color_map < centerY <= 424 + d_color_map:
Y = Misc.map(centerY, 298 + d_color_map, 424 + d_color_map, 0.12, 0.12 - 0.04)
elif 198 + d_color_map < centerY <= 298 + d_color_map:
Y = Misc.map(centerY, 198 + d_color_map, 298 + d_color_map, 0.12 + 0.04, 0.12)
elif 114 + d_color_map < centerY <= 198 + d_color_map:
Y = Misc.map(centerY, 114 + d_color_map, 198 + d_color_map, 0.12 + 0.08, 0.12 + 0.04)
elif 50 + d_color_map < centerY <= 114 + d_color_map:
Y = Misc.map(centerY, 50 + d_color_map, 114 + d_color_map, 0.12 + 0.12, 0.12 + 0.08)
elif 0 + d_color_map < centerY <= 50 + d_color_map:
Y = Misc.map(centerY, 0 + d_color_map, 50 + d_color_map, 0.12 + 0.16, 0.12 + 0.12)
else:
Y = 1
else:
count_d = 0
last_x = centerX
last_y = centerY
if (not approach or adjust) and start_move: # pid调节
detect_color = (color_area_max, )
x_pid.SetPoint = center_x #设定
x_pid.update(centerX) #当前
dx = x_pid.output
x_dis += dx #输出
x_dis = 0 if x_dis < 0 else x_dis
x_dis = 1000 if x_dis > 1000 else x_dis
if adjust:
y_pid.SetPoint = color_y_adjust
start_move = True
centerY += abs(Misc.map(70*math.sin(math.pi/4)/2, 0, size[0], 0, img_w)*math.sin(math.radians(abs(gripper_rotation) + 45))) + 65*math.sin(math.radians(abs(roll_angle)))
if Y < 0.12 + 0.04:
centerY += d_color_y
if 0 < centerY - color_y_adjust <= 5:
centerY = color_y_adjust
y_pid.update(centerY)
dy = y_pid.output
y_dis += dy
y_dis = 0.1 if y_dis > 0.1 else y_dis
y_dis = -0.1 if y_dis < -0.1 else y_dis
else:
dy = 0
if abs(dx) < 0.1 and abs(dy) < 0.0001 and (abs(last_box_rotation_angle - rect[2]) <= 10 or abs(last_box_rotation_angle - rect[2] >= 80)):
count += 1
if (adjust and count > 10) or (not adjust and count >= 10):
count = 0
if adjust:
adjust = False
else:
rotation_angle = 240 * (x_dis - 500)/1000.0
X = round(-Y * math.tan(math.radians(rotation_angle)), 4)
state = 'color'
pick_color = detect_color[0]
adjust_error = False
approach = True
else:
count = 0
if adjust and (abs(last_x_dis - x_dis) >= 2 or abs(last_y_dis - y_dis) > 0.002):
position = grasps.grasp_pos.position
rotation = grasps.grasp_pos.rotation
target = ik.setPitchRanges((position.x, position.y + y_dis, position.z), rotation.r, -180, 0)
if target:
servo_data = target[1]
bus_servo_control.set_servos(joints_pub, 100, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, int(x_dis))))
rospy.sleep(0.1)
last_x_dis = x_dis
last_y_dis = y_dis
else:
bus_servo_control.set_servos(joints_pub, 20, ((6, int(x_dis)), ))
else:
bus_servo_control.set_servos(joints_pub, 20, ((6, int(x_dis)), ))
last_box_rotation_angle = rect[2]
else:
count_timeout += 1
if count_timeout > 20:
adjust_error = True
count_timeout = 0
current_tag = ['tag1', 'tag2', 'tag3']
detect_color = __target_data
return img
d_map = 0.015
tag_map = [425, 384, 346, 310, 272, 239, 208, 177, 153, 129, 106, 86, 68, 51]
# apriltag夹取策略
def tag_sort(img, target):
global X, Y
global state
global count2
global count3
global adjust
global approach
global start_move
global current_tag
global adjust_error
global last_X, last_Y
global box_rotation_angle
global tag_x_dis, tag_y_dis
img_copy = img.copy()
img_h, img_w = img.shape[:2]
centerX = target[1]
centerY = target[2]
box_rotation_angle = abs(target[3])
if box_rotation_angle > 90:
box_rotation_angle -= 90
if box_rotation_angle > 45:
box_rotation_angle = box_rotation_angle - 90
if target[3] < 0:
box_rotation_angle = -box_rotation_angle
distance = math.sqrt(pow(centerX - last_X, 2) + pow(centerY - last_Y, 2)) #对比上次坐标来判断是否移动
if distance < 5 and not start_move:
count2 += 1
if count2 > 20:
count2 = 0
start_move = True
else:
count2 = 0
if (not approach or adjust) and start_move:
tag_x_pid.SetPoint = center_x #设定
tag_x_pid.update(centerX) #当前
dx = tag_x_pid.output
tag_x_dis += dx #输出
tag_x_dis = 0 if tag_x_dis < 0 else tag_x_dis
tag_x_dis = 1000 if tag_x_dis > 1000 else tag_x_dis
if abs(centerX - last_X) <= 1 and X != -1:
count3 += 1
rospy.sleep(0.01)
if count3 > 30:
count3 = 0
if adjust:
adjust = False
else:
current_tag = target[0]
# 位置映射
if tag_map[1] + d_tag_map < centerY <= tag_map[0] + d_tag_map:
Y = Misc.map(centerY, tag_map[1] + d_tag_map, tag_map[0] + d_tag_map, 0.12 + d_map, 0.12) - 0.005
elif tag_map[2] + d_tag_map < centerY <= tag_map[1] + d_tag_map:
Y = Misc.map(centerY, tag_map[2] + d_tag_map, tag_map[1] + d_tag_map, 0.12 + 2*d_map, 0.12 + d_map)
elif tag_map[3] + d_tag_map < centerY <= tag_map[2] + d_tag_map:
Y = Misc.map(centerY, tag_map[3] + d_tag_map, tag_map[2] + d_tag_map, 0.12 + 3*d_map, 0.12 + 2*d_map)
elif tag_map[4] + d_tag_map < centerY <= tag_map[3] + d_tag_map:
Y = Misc.map(centerY, tag_map[4] + d_tag_map, tag_map[3] + d_tag_map, 0.12 + 4*d_map, 0.12 + 3*d_map)
elif tag_map[5] + d_tag_map < centerY <= tag_map[4] + d_tag_map:
Y = Misc.map(centerY, tag_map[5] + d_tag_map, tag_map[4] + d_tag_map, 0.12 + 5*d_map, 0.12 + 4*d_map)
elif tag_map[6] + d_tag_map < centerY <= tag_map[5] + d_tag_map:
Y = Misc.map(centerY, tag_map[6] + d_tag_map, tag_map[5] + d_tag_map, 0.12 + 6*d_map, 0.12 + 5*d_map)
elif tag_map[7] + d_tag_map < centerY <= tag_map[6] + d_tag_map:
Y = Misc.map(centerY, tag_map[7] + d_tag_map, tag_map[6] + d_tag_map, 0.12 + 7*d_map, 0.12 + 6*d_map)
elif tag_map[8] + d_tag_map < centerY <= tag_map[7] + d_tag_map:
Y = Misc.map(centerY, tag_map[8] + d_tag_map, tag_map[7] + d_tag_map, 0.12 + 8*d_map, 0.12 + 7*d_map)
elif tag_map[9] + d_tag_map < centerY <= tag_map[8] + d_tag_map:
Y = Misc.map(centerY, tag_map[9] + d_tag_map, tag_map[8] + d_tag_map, 0.12 + 9*d_map, 0.12 + 8*d_map)
elif tag_map[10] + d_tag_map < centerY <= tag_map[9] + d_tag_map:
Y = Misc.map(centerY, tag_map[10] + d_tag_map, tag_map[9] + d_tag_map, 0.12 + 10*d_map, 0.12 + 9*d_map)
elif tag_map[11] + d_tag_map < centerY <= tag_map[10] + d_tag_map:
Y = Misc.map(centerY, tag_map[11] + d_tag_map, tag_map[10] + d_tag_map, 0.12 + 11*d_map, 0.12 + 10*d_map)
elif tag_map[12] + d_tag_map < centerY <= tag_map[11] + d_tag_map:
Y = Misc.map(centerY, tag_map[12] + d_tag_map, tag_map[11] + d_tag_map, 0.12 + 12*d_map, 0.12 + 11*d_map)
elif tag_map[13] + d_tag_map < centerY <= tag_map[12] + d_tag_map:
Y = Misc.map(centerY, tag_map[13] + d_tag_map, tag_map[12] + d_tag_map, 0.12 + 13*d_map, 0.12 + 12*d_map)
else:
Y = 1
X = round(-Y * math.tan(math.radians(rotation_angle)), 4)
state = 'tag'
approach = True
adjust_error = False
adjust = False
else:
count3 = 0
bus_servo_control.set_servos(joints_pub, 20, ((6, int(tag_x_dis)), ))
last_X, last_Y = centerX, centerY
return img
def run(img):
global current_tag
global adjust_error
global count_tag_timeout
global count_adjust_timeout
if 'tag1' in __target_data or 'tag2' in __target_data or 'tag3' in __target_data:
apriltagDetect(img) # apriltag检测
# 选取策略,优先tag, 夹取超时处理
if 'tag1' in __target_data and 'tag1' in current_tag:
if tag1[1] != -1:
count_adjust_timeout = 0
img = tag_sort(img, tag1)
else:
if adjust:
count_adjust_timeout += 1
if count_adjust_timeout > 50:
count_adjust_timeout = 0
adjust_error = True
else:
count_tag_timeout += 1
if count_tag_timeout > 3:
count_tag_timeout = 0
if current_tag != 'tag1':
current_tag.remove('tag1')
elif 'tag2' in __target_data and 'tag2' in current_tag:
if tag2[1] != -1:
count_adjust_timeout = 0
img = tag_sort(img, tag2)
else:
if adjust:
count_adjust_timeout += 1
if count_adjust_timeout > 50:
count_adjust_timeout = 0
adjust_error = True
else:
count_tag_timeout += 1
if count_tag_timeout > 3:
count_tag_timeout = 0
if current_tag != 'tag2':
current_tag.remove('tag2')
elif 'tag3' in __target_data and 'tag3' in current_tag:
if tag3[1] != -1:
count_adjust_timeout = 0
img = tag_sort(img, tag3)
else:
if adjust:
count_adjust_timeout += 1
if count_adjust_timeout > 50:
count_adjust_timeout = 0
adjust_error = True
else:
count_tag_timeout += 1
if count_tag_timeout > 3:
count_tag_timeout = 0
if current_tag != 'tag3':
current_tag.remove('tag3')
elif ('red' in __target_data) or ('green' in __target_data) or ('blue' in __target_data):
img = color_sort(img, __target_data)
else:
current_tag = ['tag1', 'tag2', 'tag3']
img_h, img_w = img.shape[:2]
cv2.line(img, (int(img_w/2 - 10), int(img_h/2)), (int(img_w/2 + 10), int(img_h/2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w/2), int(img_h/2 - 10)), (int(img_w/2), int(img_h/2 + 10)), (0, 255, 255), 2)
return img
def image_callback(ros_image):
global lock
global stop_state
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8,
buffer=ros_image.data) # 将自定义图像消息转化为图像
cv2_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # 转为opencv格式
frame = cv2_img.copy()
frame_result = frame
with lock:
if __isRunning:
frame_result = run(frame)
else:
if stop_state:
stop_state = 0
initMove(delay=False)
rgb_image = cv2.cvtColor(frame_result, cv2.COLOR_BGR2RGB).tostring() # 转为ros格式
ros_image.data = rgb_image
image_pub.publish(ros_image)
org_image_sub_ed = False
def enter_func(msg):
global lock
global image_sub
global __isRunning
global org_image_sub_ed
rospy.loginfo("enter in")
with lock:
init()
if not org_image_sub_ed:
org_image_sub_ed = True
image_sub = rospy.Subscriber('/usb_cam/image_raw', Image, image_callback)
return [True, 'enter']
heartbeat_timer = None
def exit_func(msg):
global lock
global image_sub
global __isRunning
global org_image_sub_ed
rospy.loginfo("exit in")
with lock:
__isRunning = False
try:
if org_image_sub_ed:
org_image_sub_ed = False
if heartbeat_timer is not None:
heartbeat_timer.cancel()
image_sub.unregister()
except:
pass
return [True, 'exit']
def start_running():
global lock
global __isRunning
rospy.loginfo("start running in")
with lock:
__isRunning = True
def stop_running():
global lock
global stop_state
global __isRunning
rospy.loginfo("stop running in")
with lock:
__isRunning = False
if (not approach and start_move) or adjust:
stop_state = 1
reset()
def set_running(msg):
if msg.data:
start_running()
else:
stop_running()
return [True, 'set_running']
def set_target(msg):
global lock
global __target_data
rospy.loginfo('%s', msg)
with lock:
__target_data = dict(zip(msg.goods, msg.position))
return [True, 'set_target']
# 心跳
def heartbeat_srv_cb(msg):
global heartbeat_timer
if isinstance(heartbeat_timer, Timer):
heartbeat_timer.cancel()
if msg.data:
heartbeat_timer = Timer(5, rospy.ServiceProxy('/in/exit', Trigger))
heartbeat_timer.start()
rsp = SetBoolResponse()
rsp.success = msg.data
return rsp
if __name__ == '__main__':
# 初始化节点
rospy.init_node('in', log_level=rospy.DEBUG)
# 舵机发布
joints_pub = rospy.Publisher('/servo_controllers/port_id_1/multi_id_pos_dur', MultiRawIdPosDur, queue_size=1)
# 图像发布
image_pub = rospy.Publisher('/in/image_result', Image, queue_size=1) # register result image publisher
# app通信服务
enter_srv = rospy.Service('/in/enter', Trigger, enter_func)
exit_srv = rospy.Service('/in/exit', Trigger, exit_func)
running_srv = rospy.Service('/in/set_running', SetBool, set_running)
set_target_srv = rospy.Service('/in/set_target', SetInTarget, set_target)
heartbeat_srv = rospy.Service('/in/heartbeat', SetBool, heartbeat_srv_cb)
# 蜂鸣器
buzzer_pub = rospy.Publisher('/sensor/buzzer', Float32, queue_size=1)
# rgb 灯
rgb_pub = rospy.Publisher('/sensor/rgb_led', Led, queue_size=1)
config = rospy.get_param('config', {})
if config != {}:
d_tag_map = config['d_tag_map']
tag_z_min = config['tag_z_min']
tag_z_max = config['tag_z_max']
d_color_map = config['d_color_map']
color_z_min = config['color_z_min']
color_z_max = config['color_z_max']
d_color_y = config['d_color_y']
color_y_adjust = config['color_y_adjust']
center_x = config['center_x']
debug = False
if debug:
rospy.sleep(0.2)
enter_func(1)
msg = SetInTarget()
msg.goods = ['red', 'green', 'blue', 'tag1', 'tag2', 'tag3']
msg.position = ['R1', 'R2', 'R3', 'L1', 'L2', 'L3']
set_target(msg)
start_running()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
| [
"rospy.init_node",
"numpy.array",
"armpi_fpv.apriltag._get_demo_searchpath",
"rospy.Service",
"threading.RLock",
"rospy.ServiceProxy",
"cv2.contourArea",
"cv2.minAreaRect",
"armpi_fpv.PID.PID",
"numpy.rint",
"rospy.spin",
"armpi_fpv.Misc.map",
"rospy.sleep",
"rospy.Subscriber",
"warehous... | [((808, 825), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (823, 825), False, 'import threading\n'), ((831, 851), 'kinematics.ik_transform.ArmIK', 'ik_transform.ArmIK', ([], {}), '()\n', (849, 851), False, 'from kinematics import ik_transform\n'), ((861, 937), 'cv2.imread', 'cv2.imread', (['"""/home/ubuntu/armpi_fpv/src/object_sorting/scripts/mask1.jpg"""', '(0)'], {}), "('/home/ubuntu/armpi_fpv/src/object_sorting/scripts/mask1.jpg', 0)\n", (871, 937), False, 'import cv2\n'), ((946, 1022), 'cv2.imread', 'cv2.imread', (['"""/home/ubuntu/armpi_fpv/src/object_sorting/scripts/mask2.jpg"""', '(0)'], {}), "('/home/ubuntu/armpi_fpv/src/object_sorting/scripts/mask2.jpg', 0)\n", (956, 1022), False, 'import cv2\n'), ((2178, 2207), 'armpi_fpv.PID.PID', 'PID.PID', ([], {'P': '(0.01)', 'I': '(0.001)', 'D': '(0)'}), '(P=0.01, I=0.001, D=0)\n', (2185, 2207), False, 'from armpi_fpv import PID\n'), ((2223, 2249), 'armpi_fpv.PID.PID', 'PID.PID', ([], {'P': '(1e-05)', 'I': '(0)', 'D': '(0)'}), '(P=1e-05, I=0, D=0)\n', (2230, 2249), False, 'from armpi_fpv import PID\n'), ((2295, 2324), 'armpi_fpv.PID.PID', 'PID.PID', ([], {'P': '(0.01)', 'I': '(0.001)', 'D': '(0)'}), '(P=0.01, I=0.001, D=0)\n', (2302, 2324), False, 'from armpi_fpv import PID\n'), ((2344, 2369), 'armpi_fpv.PID.PID', 'PID.PID', ([], {'P': '(0.02)', 'I': '(0)', 'D': '(0)'}), '(P=0.02, I=0, D=0)\n', (2351, 2369), False, 'from armpi_fpv import PID\n'), ((14945, 14952), 'warehouse.msg.Grasp', 'Grasp', ([], {}), '()\n', (14950, 14952), False, 'from warehouse.msg import Grasp\n'), ((19612, 19641), 'threading.Thread', 'threading.Thread', ([], {'target': 'move'}), '(target=move)\n', (19628, 19641), False, 'import threading\n'), ((1948, 1953), 'sensor.msg.Led', 'Led', ([], {}), '()\n', (1951, 1953), False, 'from sensor.msg import Led\n'), ((4425, 4449), 'rospy.loginfo', 'rospy.loginfo', (['"""in Init"""'], {}), "('in Init')\n", (4438, 4449), False, 'import rospy\n'), ((4482, 4541), 'rospy.get_param', 'rospy.get_param', (['"""/lab_config_manager/color_range_list"""', '{}'], {}), "('/lab_config_manager/color_range_list', {})\n", (4497, 4541), False, 'import rospy\n'), ((4735, 4756), 'math.sin', 'math.sin', (['(math.pi / 4)'], {}), '(math.pi / 4)\n', (4743, 4756), False, 'import math\n'), ((19828, 19865), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (19840, 19865), False, 'import cv2\n'), ((21308, 21352), 'cv2.warpAffine', 'cv2.warpAffine', (['mask2', 'rotate1', '(cols, rows)'], {}), '(mask2, rotate1, (cols, rows))\n', (21322, 21352), False, 'import cv2\n'), ((21368, 21406), 'cv2.bitwise_and', 'cv2.bitwise_and', (['rotate_rotate1', 'mask1'], {}), '(rotate_rotate1, mask1)\n', (21383, 21406), False, 'import cv2\n'), ((21513, 21560), 'cv2.warpAffine', 'cv2.warpAffine', (['mask_and', 'rotate2', '(cols, rows)'], {}), '(mask_and, rotate2, (cols, rows))\n', (21527, 21560), False, 'import cv2\n'), ((21580, 21651), 'cv2.resize', 'cv2.resize', (['rotate_rotate2', '(710, 710)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(rotate_rotate2, (710, 710), interpolation=cv2.INTER_NEAREST)\n', (21590, 21651), False, 'import cv2\n'), ((22350, 22409), 'cv2.resize', 'cv2.resize', (['img_copy', 'size'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img_copy, size, interpolation=cv2.INTER_NEAREST)\n', (22360, 22409), False, 'import cv2\n'), ((22427, 22473), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_resize', 'cv2.COLOR_BGR2GRAY'], {}), '(frame_resize, cv2.COLOR_BGR2GRAY)\n', (22439, 22473), False, 'import cv2\n'), ((22490, 22535), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_resize', 'cv2.COLOR_BGR2LAB'], {}), '(frame_resize, cv2.COLOR_BGR2LAB)\n', (22502, 22535), False, 'import cv2\n'), ((37097, 37196), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(ros_image.height, ros_image.width, 3)', 'dtype': 'np.uint8', 'buffer': 'ros_image.data'}), '(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8,\n buffer=ros_image.data)\n', (37107, 37196), True, 'import numpy as np\n'), ((37247, 37285), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (37259, 37285), False, 'import cv2\n'), ((37839, 37864), 'rospy.loginfo', 'rospy.loginfo', (['"""enter in"""'], {}), "('enter in')\n", (37852, 37864), False, 'import rospy\n'), ((38234, 38258), 'rospy.loginfo', 'rospy.loginfo', (['"""exit in"""'], {}), "('exit in')\n", (38247, 38258), False, 'import rospy\n'), ((38659, 38692), 'rospy.loginfo', 'rospy.loginfo', (['"""start running in"""'], {}), "('start running in')\n", (38672, 38692), False, 'import rospy\n'), ((38826, 38858), 'rospy.loginfo', 'rospy.loginfo', (['"""stop running in"""'], {}), "('stop running in')\n", (38839, 38858), False, 'import rospy\n'), ((39211, 39235), 'rospy.loginfo', 'rospy.loginfo', (['"""%s"""', 'msg'], {}), "('%s', msg)\n", (39224, 39235), False, 'import rospy\n'), ((39724, 39768), 'rospy.init_node', 'rospy.init_node', (['"""in"""'], {'log_level': 'rospy.DEBUG'}), "('in', log_level=rospy.DEBUG)\n", (39739, 39768), False, 'import rospy\n'), ((39802, 39902), 'rospy.Publisher', 'rospy.Publisher', (['"""/servo_controllers/port_id_1/multi_id_pos_dur"""', 'MultiRawIdPosDur'], {'queue_size': '(1)'}), "('/servo_controllers/port_id_1/multi_id_pos_dur',\n MultiRawIdPosDur, queue_size=1)\n", (39817, 39902), False, 'import rospy\n'), ((39927, 39983), 'rospy.Publisher', 'rospy.Publisher', (['"""/in/image_result"""', 'Image'], {'queue_size': '(1)'}), "('/in/image_result', Image, queue_size=1)\n", (39942, 39983), False, 'import rospy\n'), ((40054, 40101), 'rospy.Service', 'rospy.Service', (['"""/in/enter"""', 'Trigger', 'enter_func'], {}), "('/in/enter', Trigger, enter_func)\n", (40067, 40101), False, 'import rospy\n'), ((40117, 40162), 'rospy.Service', 'rospy.Service', (['"""/in/exit"""', 'Trigger', 'exit_func'], {}), "('/in/exit', Trigger, exit_func)\n", (40130, 40162), False, 'import rospy\n'), ((40181, 40235), 'rospy.Service', 'rospy.Service', (['"""/in/set_running"""', 'SetBool', 'set_running'], {}), "('/in/set_running', SetBool, set_running)\n", (40194, 40235), False, 'import rospy\n'), ((40257, 40313), 'rospy.Service', 'rospy.Service', (['"""/in/set_target"""', 'SetInTarget', 'set_target'], {}), "('/in/set_target', SetInTarget, set_target)\n", (40270, 40313), False, 'import rospy\n'), ((40334, 40391), 'rospy.Service', 'rospy.Service', (['"""/in/heartbeat"""', 'SetBool', 'heartbeat_srv_cb'], {}), "('/in/heartbeat', SetBool, heartbeat_srv_cb)\n", (40347, 40391), False, 'import rospy\n'), ((40424, 40480), 'rospy.Publisher', 'rospy.Publisher', (['"""/sensor/buzzer"""', 'Float32'], {'queue_size': '(1)'}), "('/sensor/buzzer', Float32, queue_size=1)\n", (40439, 40480), False, 'import rospy\n'), ((40507, 40560), 'rospy.Publisher', 'rospy.Publisher', (['"""/sensor/rgb_led"""', 'Led'], {'queue_size': '(1)'}), "('/sensor/rgb_led', Led, queue_size=1)\n", (40522, 40560), False, 'import rospy\n'), ((40575, 40604), 'rospy.get_param', 'rospy.get_param', (['"""config"""', '{}'], {}), "('config', {})\n", (40590, 40604), False, 'import rospy\n'), ((1764, 1874), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1500)', '((1, 75), (2, 500), (3, 80), (4, 825), (5, 625), (6, 500))'], {}), '(joints_pub, 1500, ((1, 75), (2, 500), (3, 80),\n (4, 825), (5, 625), (6, 500)))\n', (1792, 1874), False, 'from armpi_fpv import bus_servo_control\n'), ((1893, 1907), 'rospy.sleep', 'rospy.sleep', (['(2)'], {}), '(2)\n', (1904, 1907), False, 'import rospy\n'), ((9962, 9990), 'rospy.loginfo', 'rospy.loginfo', (['"""pick failed"""'], {}), "('pick failed')\n", (9975, 9990), False, 'import rospy\n'), ((11011, 11027), 'rospy.sleep', 'rospy.sleep', (['(0.8)'], {}), '(0.8)\n', (11022, 11027), False, 'import rospy\n'), ((11263, 11428), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', "((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data[\n 'servo5']), (6, servo_data['servo6']))"], {}), "(joints_pub, 500, ((3, servo_data['servo3']), (\n 4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data[\n 'servo6'])))\n", (11291, 11428), False, 'from armpi_fpv import bus_servo_control\n'), ((11433, 11449), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (11444, 11449), False, 'import rospy\n'), ((11718, 11883), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', "((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data[\n 'servo5']), (6, servo_data['servo6']))"], {}), "(joints_pub, 500, ((3, servo_data['servo3']), (\n 4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data[\n 'servo6'])))\n", (11746, 11883), False, 'from armpi_fpv import bus_servo_control\n'), ((11883, 11897), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (11894, 11897), False, 'import rospy\n'), ((13134, 13222), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(100)', '((1, places.pre_grasp_posture - 20),)'], {}), '(joints_pub, 100, ((1, places.pre_grasp_posture -\n 20),))\n', (13162, 13222), False, 'from armpi_fpv import bus_servo_control\n'), ((13237, 13253), 'rospy.sleep', 'rospy.sleep', (['(0.2)'], {}), '(0.2)\n', (13248, 13253), False, 'import rospy\n'), ((13270, 13345), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, places.grasp_posture),)'], {}), '(joints_pub, 500, ((1, places.grasp_posture),))\n', (13298, 13345), False, 'from armpi_fpv import bus_servo_control\n'), ((13364, 13378), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (13375, 13378), False, 'import rospy\n'), ((14124, 14241), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', "((2, 500), (3, 80), (4, 825), (5, 625), (6, servo_data['servo6']))"], {}), "(joints_pub, 500, ((2, 500), (3, 80), (4, 825),\n (5, 625), (6, servo_data['servo6'])))\n", (14152, 14241), False, 'from armpi_fpv import bus_servo_control\n'), ((14246, 14262), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (14257, 14262), False, 'import rospy\n'), ((14363, 14392), 'rospy.loginfo', 'rospy.loginfo', (['"""place failed"""'], {}), "('place failed')\n", (14376, 14392), False, 'import rospy\n'), ((19726, 19757), 'armpi_fpv.apriltag._get_demo_searchpath', 'apriltag._get_demo_searchpath', ([], {}), '()\n', (19755, 19757), False, 'from armpi_fpv import apriltag\n'), ((23860, 23895), 'cv2.minAreaRect', 'cv2.minAreaRect', (['areaMaxContour_max'], {}), '(areaMaxContour_max)\n', (23875, 23895), False, 'import cv2\n'), ((24286, 24348), 'cv2.drawContours', 'cv2.drawContours', (['img', '[box]', '(-1)', 'range_rgb[color_area_max]', '(2)'], {}), '(img, [box], -1, range_rgb[color_area_max], 2)\n', (24302, 24348), False, 'import cv2\n'), ((41059, 41075), 'rospy.sleep', 'rospy.sleep', (['(0.2)'], {}), '(0.2)\n', (41070, 41075), False, 'import rospy\n'), ((41335, 41347), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (41345, 41347), False, 'import rospy\n'), ((1411, 1429), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (1426, 1429), False, 'import cv2\n'), ((5762, 5895), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1800)', "((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data[\n 'servo5']))"], {}), "(joints_pub, 1800, ((3, servo_data['servo3']),\n (4, servo_data['servo4']), (5, servo_data['servo5'])))\n", (5790, 5895), False, 'from armpi_fpv import bus_servo_control\n'), ((5905, 5919), 'rospy.sleep', 'rospy.sleep', (['(2)'], {}), '(2)\n', (5916, 5919), False, 'import rospy\n'), ((6066, 6199), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1500)', "((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data[\n 'servo5']))"], {}), "(joints_pub, 1500, ((3, servo_data['servo3']),\n (4, servo_data['servo4']), (5, servo_data['servo5'])))\n", (6094, 6199), False, 'from armpi_fpv import bus_servo_control\n'), ((6208, 6222), 'rospy.sleep', 'rospy.sleep', (['(2)'], {}), '(2)\n', (6219, 6222), False, 'import rospy\n'), ((7092, 7108), 'rospy.sleep', 'rospy.sleep', (['(0.8)'], {}), '(0.8)\n', (7103, 7108), False, 'import rospy\n'), ((7463, 7548), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, grasps.grasp_posture - 80),)'], {}), '(joints_pub, 500, ((1, grasps.grasp_posture - 80),)\n )\n', (7491, 7548), False, 'from armpi_fpv import bus_servo_control\n'), ((7572, 7588), 'rospy.sleep', 'rospy.sleep', (['(0.8)'], {}), '(0.8)\n', (7583, 7588), False, 'import rospy\n'), ((7601, 7676), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, grasps.grasp_posture),)'], {}), '(joints_pub, 500, ((1, grasps.grasp_posture),))\n', (7629, 7676), False, 'from armpi_fpv import bus_servo_control\n'), ((7705, 7721), 'rospy.sleep', 'rospy.sleep', (['(0.8)'], {}), '(0.8)\n', (7716, 7721), False, 'import rospy\n'), ((9568, 9659), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1000)', '((2, 500), (3, 80), (4, 825), (5, 625))'], {}), '(joints_pub, 1000, ((2, 500), (3, 80), (4, 825),\n (5, 625)))\n', (9596, 9659), False, 'from armpi_fpv import bus_servo_control\n'), ((9668, 9682), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (9679, 9682), False, 'import rospy\n'), ((11068, 11143), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, places.grasp_posture),)'], {}), '(joints_pub, 500, ((1, places.grasp_posture),))\n', (11096, 11143), False, 'from armpi_fpv import bus_servo_control\n'), ((11172, 11188), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (11183, 11188), False, 'import rospy\n'), ((11490, 11565), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, places.grasp_posture),)'], {}), '(joints_pub, 500, ((1, places.grasp_posture),))\n', (11518, 11565), False, 'from armpi_fpv import bus_servo_control\n'), ((11594, 11610), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (11605, 11610), False, 'import rospy\n'), ((11938, 12013), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, places.grasp_posture),)'], {}), '(joints_pub, 500, ((1, places.grasp_posture),))\n', (11966, 12013), False, 'from armpi_fpv import bus_servo_control\n'), ((12042, 12058), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (12053, 12058), False, 'import rospy\n'), ((12120, 12295), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1000)', "((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5,\n servo_data['servo5']), (6, servo_data['servo6']))"], {}), "(joints_pub, 1000, ((1, 200), (3, servo_data[\n 'servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6,\n servo_data['servo6'])))\n", (12148, 12295), False, 'from armpi_fpv import bus_servo_control\n'), ((12306, 12320), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (12317, 12320), False, 'import rospy\n'), ((12461, 12626), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(400)', "((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data[\n 'servo5']), (6, servo_data['servo6']))"], {}), "(joints_pub, 400, ((3, servo_data['servo3']), (\n 4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data[\n 'servo6'])))\n", (12489, 12626), False, 'from armpi_fpv import bus_servo_control\n'), ((12629, 12645), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (12640, 12645), False, 'import rospy\n'), ((12686, 12761), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, places.grasp_posture),)'], {}), '(joints_pub, 500, ((1, places.grasp_posture),))\n', (12714, 12761), False, 'from armpi_fpv import bus_servo_control\n'), ((12790, 12806), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (12801, 12806), False, 'import rospy\n'), ((12868, 13043), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1000)', "((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5,\n servo_data['servo5']), (6, servo_data['servo6']))"], {}), "(joints_pub, 1000, ((1, 200), (3, servo_data[\n 'servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6,\n servo_data['servo6'])))\n", (12896, 13043), False, 'from armpi_fpv import bus_servo_control\n'), ((13054, 13068), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (13065, 13068), False, 'import rospy\n'), ((13455, 13630), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1000)', "((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5,\n servo_data['servo5']), (6, servo_data['servo6']))"], {}), "(joints_pub, 1000, ((1, 200), (3, servo_data[\n 'servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6,\n servo_data['servo6'])))\n", (13483, 13630), False, 'from armpi_fpv import bus_servo_control\n'), ((13641, 13655), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (13652, 13655), False, 'import rospy\n'), ((13805, 13970), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', "((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data[\n 'servo5']), (6, servo_data['servo6']))"], {}), "(joints_pub, 500, ((3, servo_data['servo3']), (\n 4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data[\n 'servo6'])))\n", (13833, 13970), False, 'from armpi_fpv import bus_servo_control\n'), ((13973, 13989), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (13984, 13989), False, 'import rospy\n'), ((19576, 19593), 'rospy.sleep', 'rospy.sleep', (['(0.01)'], {}), '(0.01)\n', (19587, 19593), False, 'import rospy\n'), ((20151, 20177), 'numpy.rint', 'np.rint', (['detection.corners'], {}), '(detection.corners)\n', (20158, 20177), True, 'import numpy as np\n'), ((24066, 24085), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (24079, 24085), False, 'import cv2\n'), ((30955, 30972), 'rospy.sleep', 'rospy.sleep', (['(0.01)'], {}), '(0.01)\n', (30966, 30972), False, 'import rospy\n'), ((37564, 37609), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_result', 'cv2.COLOR_BGR2RGB'], {}), '(frame_result, cv2.COLOR_BGR2RGB)\n', (37576, 37609), False, 'import cv2\n'), ((37988, 38049), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/usb_cam/image_raw"""', 'Image', 'image_callback'], {}), "('/usb_cam/image_raw', Image, image_callback)\n", (38004, 38049), False, 'import rospy\n'), ((39536, 39575), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/in/exit"""', 'Trigger'], {}), "('/in/exit', Trigger)\n", (39554, 39575), False, 'import rospy\n'), ((41386, 41416), 'rospy.loginfo', 'rospy.loginfo', (['"""Shutting down"""'], {}), "('Shutting down')\n", (41399, 41416), False, 'import rospy\n'), ((6311, 6455), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1000)', "((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5,\n servo_data['servo5']))"], {}), "(joints_pub, 1000, ((1, 200), (3, servo_data[\n 'servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))\n", (6339, 6455), False, 'from armpi_fpv import bus_servo_control\n'), ((6474, 6488), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (6485, 6488), False, 'import rospy\n'), ((7197, 7341), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1000)', "((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5,\n servo_data['servo5']))"], {}), "(joints_pub, 1000, ((1, 200), (3, servo_data[\n 'servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))\n", (7225, 7341), False, 'from armpi_fpv import bus_servo_control\n'), ((7360, 7374), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (7371, 7374), False, 'import rospy\n'), ((7770, 7849), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, grasps.pre_grasp_posture),)'], {}), '(joints_pub, 500, ((1, grasps.pre_grasp_posture),))\n', (7798, 7849), False, 'from armpi_fpv import bus_servo_control\n'), ((7882, 7898), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (7893, 7898), False, 'import rospy\n'), ((7967, 8111), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1000)', "((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5,\n servo_data['servo5']))"], {}), "(joints_pub, 1000, ((1, 200), (3, servo_data[\n 'servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))\n", (7995, 8111), False, 'from armpi_fpv import bus_servo_control\n'), ((8130, 8144), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (8141, 8144), False, 'import rospy\n'), ((8310, 8443), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', "((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data[\n 'servo5']))"], {}), "(joints_pub, 500, ((3, servo_data['servo3']), (\n 4, servo_data['servo4']), (5, servo_data['servo5'])))\n", (8338, 8443), False, 'from armpi_fpv import bus_servo_control\n'), ((8455, 8471), 'rospy.sleep', 'rospy.sleep', (['(0.6)'], {}), '(0.6)\n', (8466, 8471), False, 'import rospy\n'), ((8520, 8599), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, grasps.pre_grasp_posture),)'], {}), '(joints_pub, 500, ((1, grasps.pre_grasp_posture),))\n', (8548, 8599), False, 'from armpi_fpv import bus_servo_control\n'), ((8632, 8648), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (8643, 8648), False, 'import rospy\n'), ((8717, 8861), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(1000)', "((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5,\n servo_data['servo5']))"], {}), "(joints_pub, 1000, ((1, 200), (3, servo_data[\n 'servo3']), (4, servo_data['servo4']), (5, servo_data['servo5'])))\n", (8745, 8861), False, 'from armpi_fpv import bus_servo_control\n'), ((8880, 8894), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (8891, 8894), False, 'import rospy\n'), ((9071, 9204), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', "((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data[\n 'servo5']))"], {}), "(joints_pub, 500, ((3, servo_data['servo3']), (\n 4, servo_data['servo4']), (5, servo_data['servo5'])))\n", (9099, 9204), False, 'from armpi_fpv import bus_servo_control\n'), ((9224, 9240), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (9235, 9240), False, 'import rospy\n'), ((9731, 9810), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, grasps.pre_grasp_posture),)'], {}), '(joints_pub, 500, ((1, grasps.pre_grasp_posture),))\n', (9759, 9810), False, 'from armpi_fpv import bus_servo_control\n'), ((9843, 9859), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (9854, 9859), False, 'import rospy\n'), ((19532, 19549), 'rospy.sleep', 'rospy.sleep', (['(0.01)'], {}), '(0.01)\n', (19543, 19549), False, 'import rospy\n'), ((23045, 23078), 'cv2.bitwise_and', 'cv2.bitwise_and', (['roi', 'frame_mask1'], {}), '(roi, frame_mask1)\n', (23060, 23078), False, 'import cv2\n'), ((24155, 24196), 'armpi_fpv.Misc.map', 'Misc.map', (['box[j, 0]', '(0)', 'size[0]', '(0)', 'img_w'], {}), '(box[j, 0], 0, size[0], 0, img_w)\n', (24163, 24196), False, 'from armpi_fpv import Misc\n'), ((24226, 24267), 'armpi_fpv.Misc.map', 'Misc.map', (['box[j, 1]', '(0)', 'size[1]', '(0)', 'img_h'], {}), '(box[j, 1], 0, size[1], 0, img_h)\n', (24234, 24267), False, 'from armpi_fpv import Misc\n'), ((25004, 25009), 'sensor.msg.Led', 'Led', ([], {}), '()\n', (25007, 25009), False, 'from sensor.msg import Led\n'), ((25331, 25347), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (25342, 25347), False, 'import rospy\n'), ((9297, 9376), 'armpi_fpv.bus_servo_control.set_servos', 'bus_servo_control.set_servos', (['joints_pub', '(500)', '((1, grasps.pre_grasp_posture),)'], {}), '(joints_pub, 500, ((1, grasps.pre_grasp_posture),))\n', (9325, 9376), False, 'from armpi_fpv import bus_servo_control\n'), ((9413, 9429), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (9424, 9429), False, 'import rospy\n'), ((20223, 20248), 'numpy.array', 'np.array', (['corners', 'np.int'], {}), '(corners, np.int)\n', (20231, 20248), True, 'import numpy as np\n'), ((20588, 20660), 'math.atan2', 'math.atan2', (['(corners[0][1] - corners[1][1])', '(corners[0][0] - corners[1][0])'], {}), '(corners[0][1] - corners[1][1], corners[0][0] - corners[1][0])\n', (20598, 20660), False, 'import math\n'), ((23127, 23176), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (23152, 23176), False, 'import cv2\n'), ((23228, 23277), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (23253, 23277), False, 'import cv2\n'), ((23387, 23454), 'cv2.findContours', 'cv2.findContours', (['dilated', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (23403, 23454), False, 'import cv2\n'), ((25481, 25555), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(298 + d_color_map)', '(424 + d_color_map)', '(0.12)', '(0.12 - 0.04)'], {}), '(centerY, 298 + d_color_map, 424 + d_color_map, 0.12, 0.12 - 0.04)\n', (25489, 25555), False, 'from armpi_fpv import Misc\n'), ((28862, 28878), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (28873, 28878), False, 'import rospy\n'), ((15493, 15546), 'armpi_fpv.Misc.map', 'Misc.map', (['(Y - 0.15)', '(0)', '(0.15)', 'color_z_min', 'color_z_max'], {}), '(Y - 0.15, 0, 0.15, color_z_min, color_z_max)\n', (15501, 15546), False, 'from armpi_fpv import Misc\n'), ((15627, 15676), 'armpi_fpv.Misc.map', 'Misc.map', (['(Y - 0.12)', '(0)', '(0.15)', 'tag_z_min', 'tag_z_max'], {}), '(Y - 0.12, 0, 0.15, tag_z_min, tag_z_max)\n', (15635, 15676), False, 'from armpi_fpv import Misc\n'), ((25651, 25725), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(198 + d_color_map)', '(298 + d_color_map)', '(0.12 + 0.04)', '(0.12)'], {}), '(centerY, 198 + d_color_map, 298 + d_color_map, 0.12 + 0.04, 0.12)\n', (25659, 25725), False, 'from armpi_fpv import Misc\n'), ((19479, 19497), 'rospy.sleep', 'rospy.sleep', (['(0.001)'], {}), '(0.001)\n', (19490, 19497), False, 'import rospy\n'), ((25821, 25906), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(114 + d_color_map)', '(198 + d_color_map)', '(0.12 + 0.08)', '(0.12 + 0.04)'], {}), '(centerY, 114 + d_color_map, 198 + d_color_map, 0.12 + 0.08, 0.12 +\n 0.04)\n', (25829, 25906), False, 'from armpi_fpv import Misc\n'), ((31294, 31383), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[1] + d_tag_map)', '(tag_map[0] + d_tag_map)', '(0.12 + d_map)', '(0.12)'], {}), '(centerY, tag_map[1] + d_tag_map, tag_map[0] + d_tag_map, 0.12 +\n d_map, 0.12)\n', (31302, 31383), False, 'from armpi_fpv import Misc\n'), ((31501, 31602), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[2] + d_tag_map)', '(tag_map[1] + d_tag_map)', '(0.12 + 2 * d_map)', '(0.12 + d_map)'], {}), '(centerY, tag_map[2] + d_tag_map, tag_map[1] + d_tag_map, 0.12 + 2 *\n d_map, 0.12 + d_map)\n', (31509, 31602), False, 'from armpi_fpv import Misc\n'), ((25997, 26082), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(50 + d_color_map)', '(114 + d_color_map)', '(0.12 + 0.12)', '(0.12 + 0.08)'], {}), '(centerY, 50 + d_color_map, 114 + d_color_map, 0.12 + 0.12, 0.12 + 0.08\n )\n', (26005, 26082), False, 'from armpi_fpv import Misc\n'), ((31710, 31815), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[3] + d_tag_map)', '(tag_map[2] + d_tag_map)', '(0.12 + 3 * d_map)', '(0.12 + 2 * d_map)'], {}), '(centerY, tag_map[3] + d_tag_map, tag_map[2] + d_tag_map, 0.12 + 3 *\n d_map, 0.12 + 2 * d_map)\n', (31718, 31815), False, 'from armpi_fpv import Misc\n'), ((34060, 34088), 'math.radians', 'math.radians', (['rotation_angle'], {}), '(rotation_angle)\n', (34072, 34088), False, 'import math\n'), ((18532, 18539), 'warehouse.msg.Grasp', 'Grasp', ([], {}), '()\n', (18537, 18539), False, 'from warehouse.msg import Grasp\n'), ((26170, 26248), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(0 + d_color_map)', '(50 + d_color_map)', '(0.12 + 0.16)', '(0.12 + 0.12)'], {}), '(centerY, 0 + d_color_map, 50 + d_color_map, 0.12 + 0.16, 0.12 + 0.12)\n', (26178, 26248), False, 'from armpi_fpv import Misc\n'), ((28023, 28051), 'math.radians', 'math.radians', (['rotation_angle'], {}), '(rotation_angle)\n', (28035, 28051), False, 'import math\n'), ((31921, 32026), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[4] + d_tag_map)', '(tag_map[3] + d_tag_map)', '(0.12 + 4 * d_map)', '(0.12 + 3 * d_map)'], {}), '(centerY, tag_map[4] + d_tag_map, tag_map[3] + d_tag_map, 0.12 + 4 *\n d_map, 0.12 + 3 * d_map)\n', (31929, 32026), False, 'from armpi_fpv import Misc\n'), ((26957, 26978), 'math.sin', 'math.sin', (['(math.pi / 4)'], {}), '(math.pi / 4)\n', (26965, 26978), False, 'import math\n'), ((32132, 32237), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[5] + d_tag_map)', '(tag_map[4] + d_tag_map)', '(0.12 + 5 * d_map)', '(0.12 + 4 * d_map)'], {}), '(centerY, tag_map[5] + d_tag_map, tag_map[4] + d_tag_map, 0.12 + 5 *\n d_map, 0.12 + 4 * d_map)\n', (32140, 32237), False, 'from armpi_fpv import Misc\n'), ((32343, 32448), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[6] + d_tag_map)', '(tag_map[5] + d_tag_map)', '(0.12 + 6 * d_map)', '(0.12 + 5 * d_map)'], {}), '(centerY, tag_map[6] + d_tag_map, tag_map[5] + d_tag_map, 0.12 + 6 *\n d_map, 0.12 + 5 * d_map)\n', (32351, 32448), False, 'from armpi_fpv import Misc\n'), ((32554, 32659), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[7] + d_tag_map)', '(tag_map[6] + d_tag_map)', '(0.12 + 7 * d_map)', '(0.12 + 6 * d_map)'], {}), '(centerY, tag_map[7] + d_tag_map, tag_map[6] + d_tag_map, 0.12 + 7 *\n d_map, 0.12 + 6 * d_map)\n', (32562, 32659), False, 'from armpi_fpv import Misc\n'), ((32765, 32870), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[8] + d_tag_map)', '(tag_map[7] + d_tag_map)', '(0.12 + 8 * d_map)', '(0.12 + 7 * d_map)'], {}), '(centerY, tag_map[8] + d_tag_map, tag_map[7] + d_tag_map, 0.12 + 8 *\n d_map, 0.12 + 7 * d_map)\n', (32773, 32870), False, 'from armpi_fpv import Misc\n'), ((32976, 33081), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[9] + d_tag_map)', '(tag_map[8] + d_tag_map)', '(0.12 + 9 * d_map)', '(0.12 + 8 * d_map)'], {}), '(centerY, tag_map[9] + d_tag_map, tag_map[8] + d_tag_map, 0.12 + 9 *\n d_map, 0.12 + 8 * d_map)\n', (32984, 33081), False, 'from armpi_fpv import Misc\n'), ((33188, 33296), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[10] + d_tag_map)', '(tag_map[9] + d_tag_map)', '(0.12 + 10 * d_map)', '(0.12 + 9 * d_map)'], {}), '(centerY, tag_map[10] + d_tag_map, tag_map[9] + d_tag_map, 0.12 + \n 10 * d_map, 0.12 + 9 * d_map)\n', (33196, 33296), False, 'from armpi_fpv import Misc\n'), ((33403, 33513), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[11] + d_tag_map)', '(tag_map[10] + d_tag_map)', '(0.12 + 11 * d_map)', '(0.12 + 10 * d_map)'], {}), '(centerY, tag_map[11] + d_tag_map, tag_map[10] + d_tag_map, 0.12 + \n 11 * d_map, 0.12 + 10 * d_map)\n', (33411, 33513), False, 'from armpi_fpv import Misc\n'), ((33620, 33730), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[12] + d_tag_map)', '(tag_map[11] + d_tag_map)', '(0.12 + 12 * d_map)', '(0.12 + 11 * d_map)'], {}), '(centerY, tag_map[12] + d_tag_map, tag_map[11] + d_tag_map, 0.12 + \n 12 * d_map, 0.12 + 11 * d_map)\n', (33628, 33730), False, 'from armpi_fpv import Misc\n'), ((33837, 33947), 'armpi_fpv.Misc.map', 'Misc.map', (['centerY', '(tag_map[13] + d_tag_map)', '(tag_map[12] + d_tag_map)', '(0.12 + 13 * d_map)', '(0.12 + 12 * d_map)'], {}), '(centerY, tag_map[13] + d_tag_map, tag_map[12] + d_tag_map, 0.12 + \n 13 * d_map, 0.12 + 12 * d_map)\n', (33845, 33947), False, 'from armpi_fpv import Misc\n')] |
import numpy as np
import time
import ray
FREE_DELAY_S = 10.0
MAX_FREE_QUEUE_SIZE = 100
_last_free_time = 0.0
_to_free = []
def ray_get_and_free(object_ids):
"""Call ray.get and then queue the object ids for deletion.
This function should be used whenever possible in RLlib, to optimize
memory usage. The only exception is when an object_id is shared among
multiple readers.
Args:
object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.
Returns:
The result of ray.get(object_ids).
"""
global _last_free_time
global _to_free
result = ray.get(object_ids)
if type(object_ids) is not list:
object_ids = [object_ids]
_to_free.extend(object_ids)
# batch calls to free to reduce overheads
now = time.time()
if (len(_to_free) > MAX_FREE_QUEUE_SIZE
or now - _last_free_time > FREE_DELAY_S):
ray.internal.free(_to_free)
_to_free = []
_last_free_time = now
return result
def aligned_array(size, dtype, align=64):
"""Returns an array of a given size that is 64-byte aligned.
The returned array can be efficiently copied into GPU memory by TensorFlow.
"""
n = size * dtype.itemsize
empty = np.empty(n + (align - 1), dtype=np.uint8)
data_align = empty.ctypes.data % align
offset = 0 if data_align == 0 else (align - data_align)
if n == 0:
# stop np from optimising out empty slice reference
output = empty[offset:offset + 1][0:0].view(dtype)
else:
output = empty[offset:offset + n].view(dtype)
assert len(output) == size, len(output)
assert output.ctypes.data % align == 0, output.ctypes.data
return output
def concat_aligned(items):
"""Concatenate arrays, ensuring the output is 64-byte aligned.
We only align float arrays; other arrays are concatenated as normal.
This should be used instead of np.concatenate() to improve performance
when the output array is likely to be fed into TensorFlow.
"""
if len(items) == 0:
return []
elif len(items) == 1:
# we assume the input is aligned. In any case, it doesn't help
# performance to force align it since that incurs a needless copy.
return items[0]
elif (isinstance(items[0], np.ndarray)
and items[0].dtype in [np.float32, np.float64, np.uint8]):
dtype = items[0].dtype
flat = aligned_array(sum(s.size for s in items), dtype)
batch_dim = sum(s.shape[0] for s in items)
new_shape = (batch_dim, ) + items[0].shape[1:]
output = flat.reshape(new_shape)
assert output.ctypes.data % 64 == 0, output.ctypes.data
np.concatenate(items, out=output)
return output
else:
return np.concatenate(items)
| [
"ray.get",
"ray.internal.free",
"numpy.empty",
"numpy.concatenate",
"time.time"
] | [((610, 629), 'ray.get', 'ray.get', (['object_ids'], {}), '(object_ids)\n', (617, 629), False, 'import ray\n'), ((790, 801), 'time.time', 'time.time', ([], {}), '()\n', (799, 801), False, 'import time\n'), ((1248, 1289), 'numpy.empty', 'np.empty', (['(n + (align - 1))'], {'dtype': 'np.uint8'}), '(n + (align - 1), dtype=np.uint8)\n', (1256, 1289), True, 'import numpy as np\n'), ((908, 935), 'ray.internal.free', 'ray.internal.free', (['_to_free'], {}), '(_to_free)\n', (925, 935), False, 'import ray\n'), ((2699, 2732), 'numpy.concatenate', 'np.concatenate', (['items'], {'out': 'output'}), '(items, out=output)\n', (2713, 2732), True, 'import numpy as np\n'), ((2780, 2801), 'numpy.concatenate', 'np.concatenate', (['items'], {}), '(items)\n', (2794, 2801), True, 'import numpy as np\n')] |
#%% Importing dependencies
import numpy as np
from PIL import Image
import glob
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#%% Defining functions
#%% Camera callibration
# Load images and concvert to grayscale
ny = 6
nx = 9
imgpoints = [] # Images
objpoints = []
images = []
objp = np.zeros((ny*nx,3), np.float32)
for filename in glob.glob('./camera_cal/*.jpg'):
#print(filename)
# Loading the image
im = cv2.imread(filename)
# Convert to grayscale
im_gry = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
#plt.imshow(im_gry, cmap="gray")
# Appending to the list of images
#image.append(im_gry)
images.append(im_gry)
ret, corners = cv2.findChessboardCorners(im_gry, (nx,ny), None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
#images = np.array(images)
shape = (im.shape[1],im.shape[0])
ret, cameraMatrix, distortionCoeffs, rvecs, tvecs = \
cv2.calibrateCamera(objpoints, imgpoints, shape,None,None)
im_undist = []
for i in images:
#plt.subplot(2,1,1)
#plt.imshow(i, cmap = "gray")
undist = cv2.undistort(i, cameraMatrix, distortionCoeffs, None, cameraMatrix)
im_undist.append(undist)
#plt.subplot(2,1,2)
#plt.imshow(undist, cmap = "gray")
plt.subplot(2,1,1)
plt.imshow(images[18],cmap = "gray")
plt.subplot(2,1,2)
plt.imshow(im_undist[18],cmap = "gray")
plt.show()
print('======================================================================')
print('Done')
print('======================================================================')
| [
"matplotlib.pyplot.imshow",
"cv2.imread",
"cv2.undistort",
"numpy.zeros",
"cv2.cvtColor",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"matplotlib.pyplot.subplot",
"glob.glob",
"matplotlib.pyplot.show"
] | [((358, 392), 'numpy.zeros', 'np.zeros', (['(ny * nx, 3)', 'np.float32'], {}), '((ny * nx, 3), np.float32)\n', (366, 392), True, 'import numpy as np\n'), ((406, 437), 'glob.glob', 'glob.glob', (['"""./camera_cal/*.jpg"""'], {}), "('./camera_cal/*.jpg')\n", (415, 437), False, 'import glob\n'), ((1000, 1060), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'shape', 'None', 'None'], {}), '(objpoints, imgpoints, shape, None, None)\n', (1019, 1060), False, 'import cv2\n'), ((1329, 1349), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1340, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1383), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[18]'], {'cmap': '"""gray"""'}), "(images[18], cmap='gray')\n", (1358, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1385, 1405), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1396, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1404, 1442), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_undist[18]'], {'cmap': '"""gray"""'}), "(im_undist[18], cmap='gray')\n", (1414, 1442), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1454), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1452, 1454), True, 'import matplotlib.pyplot as plt\n'), ((493, 513), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (503, 513), False, 'import cv2\n'), ((554, 590), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (566, 590), False, 'import cv2\n'), ((737, 786), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['im_gry', '(nx, ny)', 'None'], {}), '(im_gry, (nx, ny), None)\n', (762, 786), False, 'import cv2\n'), ((1163, 1231), 'cv2.undistort', 'cv2.undistort', (['i', 'cameraMatrix', 'distortionCoeffs', 'None', 'cameraMatrix'], {}), '(i, cameraMatrix, distortionCoeffs, None, cameraMatrix)\n', (1176, 1231), False, 'import cv2\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""utility functions for mindspore.scipy st tests"""
import numpy as onp
from mindspore import Tensor
import mindspore.numpy as mnp
def to_tensor(obj, dtype=None):
if dtype is None:
res = Tensor(obj)
if res.dtype == mnp.float64:
res = res.astype(mnp.float32)
if res.dtype == mnp.int64:
res = res.astype(mnp.int32)
else:
res = Tensor(obj, dtype)
return res
def match_array(actual, expected, error=0):
if isinstance(actual, int):
actual = onp.asarray(actual)
if isinstance(expected, (int, tuple)):
expected = onp.asarray(expected)
if error > 0:
onp.testing.assert_almost_equal(actual, expected, decimal=error)
else:
onp.testing.assert_equal(actual, expected)
| [
"numpy.testing.assert_almost_equal",
"numpy.asarray",
"mindspore.Tensor",
"numpy.testing.assert_equal"
] | [((869, 880), 'mindspore.Tensor', 'Tensor', (['obj'], {}), '(obj)\n', (875, 880), False, 'from mindspore import Tensor\n'), ((1059, 1077), 'mindspore.Tensor', 'Tensor', (['obj', 'dtype'], {}), '(obj, dtype)\n', (1065, 1077), False, 'from mindspore import Tensor\n'), ((1188, 1207), 'numpy.asarray', 'onp.asarray', (['actual'], {}), '(actual)\n', (1199, 1207), True, 'import numpy as onp\n'), ((1271, 1292), 'numpy.asarray', 'onp.asarray', (['expected'], {}), '(expected)\n', (1282, 1292), True, 'import numpy as onp\n'), ((1320, 1384), 'numpy.testing.assert_almost_equal', 'onp.testing.assert_almost_equal', (['actual', 'expected'], {'decimal': 'error'}), '(actual, expected, decimal=error)\n', (1351, 1384), True, 'import numpy as onp\n'), ((1403, 1445), 'numpy.testing.assert_equal', 'onp.testing.assert_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (1427, 1445), True, 'import numpy as onp\n')] |
#!/usr/bin/env python
import sys, os
import pandas as pd
import numpy as np
from scipy import stats as sts
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.multitest import fdrcorrection
def paule_mandel_tau(eff, var_eff, tau2_start=0, atol=1e-5, maxiter=50):
tau2 = tau2_start
k = eff.shape[0]
converged = False
for i in range(maxiter):
w = 1 / (var_eff + tau2)
m = w.dot(eff) / w.sum(0)
resid_sq = (eff - m)**2
q_w = w.dot(resid_sq)
# estimating equation
ee = q_w - (k - 1)
if ee < 0:
tau2 = 0
converged = 0
break
if np.allclose(ee, 0, atol=atol):
converged = True
break
# update tau2
delta = ee / (w**2).dot(resid_sq)
tau2 += delta
return tau2, converged
class singleStudyEffect(object):
def __init__(self, Rho_and_P, Name, Len, REG=True):
self.effect, self.Pvalue = Rho_and_P
self.accepted = (self.effect==self.effect)
self.Name = Name
self.Len = Len if REG else (Len[0] + Len[1])
if REG:
self.ncases = None
self.ncontrols = None
else:
self.ncases = Len[1]
self.ncontrols = Len[0]
## class for meta-analysis (with cohen d)
class RE_meta_binary(object):
def __init__(self, effects, Pvalues, studies, n_cases, n_controls, \
responseName, EFF="D", variances_from_outside=False, CI=False, HET="PM"):
self.responseName = responseName
self.studies = studies
self.n_cases = n_cases
self.n_controls = n_controls
self.singleStudyPvalues = Pvalues
self.effects = np.array(effects, dtype=np.float64)
self.n = float(len(studies))
self.HET = HET
if (EFF.lower() != "d") and (EFF.lower() != "precomputed"):
raise NotImplementedError("Sorry: this script works just with EFF=D or precomputed")
if EFF.lower() != "precomputed":
self.n_studies = [(a+b) for a,b in zip(n_cases,n_controls)]
else:
self.n_studies = "NULL"
if EFF.lower() == "precomputed":
self.vi = np.array(variances_from_outside, dtype=np.float64)
if not CI:
self.devs = np.sqrt( self.vi )
self.CI_of_d = [ (d-( 1.96*dv ), d+( 1.96*dv )) for d,dv in zip(self.effects, self.devs)]
else:
self.CI_of_d = CI
else:
self.n_studies = "NULL"
self.vi = np.array([(((nc+nt-1)/float(nc+nt-3)) * ((4./float(nc+nt))*(1+((eff**2.)/8.)))) for nt,nc,eff in \
zip(n_cases,n_controls,effects)], dtype=np.float64)
if not CI:
self.devs = np.sqrt(self.vi)
self.CI_of_d = []
for d,n1,n2 in zip(self.effects, n_controls, n_cases):
SEd = np.sqrt(((n1+n2-1)/float(n1+n2-3)) * ((4./float(n1+n2))*(1+((d**2.)/8.))))
d_lw = d-(1.96*SEd)
d_up = d+(1.96*SEd)
self.CI_of_d += [[d_lw, d_up]]
else:
self.CI_of_d = CI
self.w = np.array([(1./float(v)) for v in self.vi], dtype=np.float64)
mu_bar = np.sum(a*b for a,b in zip(self.w, self.effects))/np.sum(self.w)
self.Q = np.sum(a*b for a,b in zip(self.w, [(x - mu_bar)**2 for x in self.effects]))
H = np.sqrt(self.Q/(self.n - 1))
self.I2 = np.max([0., (self.Q-(len(self.vi)-1))/float(self.Q)])
self.t2_PM, self.t2PM_conv = paule_mandel_tau(self.effects, self.vi)
self.t2_DL = ((self.Q - self.n + 1) / self.scaling( self.w )) if (self.Q > (self.n-1)) else 0.
if self.HET == "PM":
self.W = [(1./float(v+self.t2_PM)) for v in self.vi]
elif self.HET.startswith("FIX"):
self.W = [(1./float(v)) for v in self.vi]
else:
self.W = [(1./float(v+self.t2_DL)) for v in self.vi]
self.RE = self.CombinedEffect()
self.stdErr = self.StdErrCombinedEffect(self.CombinedEffectVar())
self.Zscore = self.CombinedEffectZScore(self.RE, self.stdErr)
self.REvar = self.CombinedEffectVar()
self.Pval = self.Pvalue(self.Zscore)
self.conf_int = self.CombinedEffectConfInt(self.RE, self.stdErr)
self.result = self.nice_shape()
def tot_var(self, Effects, Weights):
Q = np.sum(Weights * [x**2 for x in Effects]) - ((np.sum(Weights*Effects)**2)/np.sum(Weights))
return Q
def scaling(self, W):
C = np.sum(W) - (np.sum([w**2 for w in W])/float(np.sum(W)))
return C
def tau_squared_DL(self, Q, df, C):
return (Q-df)/float(C) if (Q>df) else 0.
def CombinedEffect(self):
return np.sum(self.W*self.effects)/float(np.sum(self.W))
def CombinedEffectVar(self):
return 1/float(np.sum(self.W))
def StdErrCombinedEffect(self, CVar):
return np.sqrt(CVar)
def CombinedEffectConfInt(self, CE, SE):
low = CE - 1.96*SE
upp = CE + 1.96*SE
return low, upp
def CombinedEffectZScore(self, CE, SE):
return CE / float(SE)
def Pvalue(self, Z):
return 2.*(1 - sts.norm.cdf(np.abs(Z)))
def nice_shape(self):
NS = {}
for eff,P,study in zip(self.effects, self.singleStudyPvalues, self.studies):
NS[str(study) + "_Effect"] = eff
NS[str(study) + "_Pvalue"] = P
NS["RE_Effect"] = self.RE
NS["RE_Pvalue"] = self.Pval
NS["RE_stdErr"] = self.stdErr
NS["RE_conf_int"] = ";".join(list(map(str,self.conf_int)))
NS["RE_Var"] = self.REvar
NS["Zscore"] = self.Zscore
NS["Tau2_DL"] = self.t2_DL
NS["Tau2_PM"] = self.t2_PM
NS["I2"] = self.I2
NS = pd.DataFrame(NS, index=[self.responseName])
return NS
## Class for meta-regression (with Fisher-Z included)
class RE_meta(object):
def __init__(self, effects, Pvalues, studies, n_studies, responseName, het="PM", REG=True):
self.HET = het
self.responseName = responseName
self.studies = studies
self.singleStudyPvalues = Pvalues
self.effects = np.arctanh(np.array(effects, dtype=np.float64)) #if REG else effects
self.n_studies = n_studies
self.n = float(len(studies))
self.vi = np.array([(1./float(n-3)) for n in self.n_studies], dtype=np.float64)
self.devs = np.sqrt( self.vi )
self.CI_of_z = [ (z-( 1.96*dv ), z+( 1.96*dv )) for z,dv in zip(self.effects, self.devs)]
self.w = np.array([(1./float(v)) for v in self.vi], dtype=np.float64)
mu_bar = np.sum(a*b for a,b in zip(self.w, self.effects))/np.sum(self.w)
self.Q = np.sum(a*b for a,b in zip(self.w, [(x - mu_bar)**2 for x in self.effects]))
H = np.sqrt(self.Q/(self.n - 1))
self.I2 = np.max([0., (self.Q-(len(self.vi)-1))/float(self.Q)])
self.t2_PM, self.t2PM_conv = paule_mandel_tau(self.effects, self.vi)
self.t2_DL = ((self.Q - self.n + 1) / self.scaling( self.w )) if (self.Q > (self.n-1)) else 0.
if self.HET == "PM":
self.W = [(1./float(v+self.t2_PM)) for v in self.vi]
elif self.HET.startswith("FIX"):
self.W = [(1./float(v)) for v in self.vi]
else:
self.W = [(1./float(v+self.t2_DL)) for v in self.vi]
self.RE = self.CombinedEffect()
self.stdErr = self.StdErrCombinedEffect(self.CombinedEffectVar())
self.Zscore = self.CombinedEffectZScore(self.RE, self.stdErr)
self.REvar = self.CombinedEffectVar()
self.Pval = self.Pvalue(self.Zscore)
self.conf_int = self.CombinedEffectConfInt(self.RE, self.stdErr)
self.result = self.nice_shape(True)
def tot_var(self, Effects, Weights):
Q = np.sum(Weights * [x**2 for x in Effects]) - ((np.sum(Weights*Effects)**2)/np.sum(Weights))
return Q
def scaling(self, W):
C = np.sum(W) - (np.sum([w**2 for w in W])/float(np.sum(W)))
return C
def tau_squared_DL(self, Q, df, C):
return (Q-df)/float(C) if (Q>df) else 0.
def CombinedEffect(self):
return np.sum(self.W*self.effects)/float(np.sum(self.W))
def CombinedEffectVar(self):
return 1/float(np.sum(self.W))
def StdErrCombinedEffect(self, CVar):
return np.sqrt(CVar)
def CombinedEffectConfInt(self, CE, SE):
low = CE - 1.96*SE
upp = CE + 1.96*SE
return low, upp
def CombinedEffectZScore(self, CE, SE):
return CE / float(SE)
def Pvalue(self, Z):
return 2.*(1 - sts.norm.cdf(np.abs(Z)))
def nice_shape(self, REG):
NS = {}
for rho,ci,P,study in zip(self.effects, self.CI_of_z, self.singleStudyPvalues, self.studies):
eff = rho if (not REG) else np.tanh(rho)
NS[study + "_Correlation"] = eff
NS[study + "_Pvalue"] = P
NS[study + "_conf_int"] = ";".join(list(map(str, [np.tanh(c) for c in ci])))
NS["RE_Correlation"] = np.tanh(self.RE)
NS["RE_Pvalue"] = self.Pval
NS["RE_stdErr"] = np.tanh(self.stdErr)
NS["RE_conf_int"] = ";".join(list(map(str, [np.tanh(c) for c in self.conf_int])))
NS["RE_Var"] = self.REvar if (not REG) else np.tanh(self.REvar)
NS["Zscore"] = self.Zscore if (not REG) else np.tanh(self.Zscore)
NS["Tau2_DL"] = self.t2_DL
NS["Tau2_PM"] = self.t2_PM
NS["I2"] = self.I2
NS = pd.DataFrame(NS, index=[self.responseName])
return NS
| [
"numpy.abs",
"numpy.allclose",
"numpy.sqrt",
"numpy.tanh",
"numpy.array",
"numpy.sum",
"pandas.DataFrame"
] | [((678, 707), 'numpy.allclose', 'np.allclose', (['ee', '(0)'], {'atol': 'atol'}), '(ee, 0, atol=atol)\n', (689, 707), True, 'import numpy as np\n'), ((1728, 1763), 'numpy.array', 'np.array', (['effects'], {'dtype': 'np.float64'}), '(effects, dtype=np.float64)\n', (1736, 1763), True, 'import numpy as np\n'), ((3467, 3497), 'numpy.sqrt', 'np.sqrt', (['(self.Q / (self.n - 1))'], {}), '(self.Q / (self.n - 1))\n', (3474, 3497), True, 'import numpy as np\n'), ((4998, 5011), 'numpy.sqrt', 'np.sqrt', (['CVar'], {}), '(CVar)\n', (5005, 5011), True, 'import numpy as np\n'), ((5855, 5898), 'pandas.DataFrame', 'pd.DataFrame', (['NS'], {'index': '[self.responseName]'}), '(NS, index=[self.responseName])\n', (5867, 5898), True, 'import pandas as pd\n'), ((6504, 6520), 'numpy.sqrt', 'np.sqrt', (['self.vi'], {}), '(self.vi)\n', (6511, 6520), True, 'import numpy as np\n'), ((6894, 6924), 'numpy.sqrt', 'np.sqrt', (['(self.Q / (self.n - 1))'], {}), '(self.Q / (self.n - 1))\n', (6901, 6924), True, 'import numpy as np\n'), ((8430, 8443), 'numpy.sqrt', 'np.sqrt', (['CVar'], {}), '(CVar)\n', (8437, 8443), True, 'import numpy as np\n'), ((9124, 9140), 'numpy.tanh', 'np.tanh', (['self.RE'], {}), '(self.RE)\n', (9131, 9140), True, 'import numpy as np\n'), ((9203, 9223), 'numpy.tanh', 'np.tanh', (['self.stdErr'], {}), '(self.stdErr)\n', (9210, 9223), True, 'import numpy as np\n'), ((9570, 9613), 'pandas.DataFrame', 'pd.DataFrame', (['NS'], {'index': '[self.responseName]'}), '(NS, index=[self.responseName])\n', (9582, 9613), True, 'import pandas as pd\n'), ((2218, 2268), 'numpy.array', 'np.array', (['variances_from_outside'], {'dtype': 'np.float64'}), '(variances_from_outside, dtype=np.float64)\n', (2226, 2268), True, 'import numpy as np\n'), ((3347, 3361), 'numpy.sum', 'np.sum', (['self.w'], {}), '(self.w)\n', (3353, 3361), True, 'import numpy as np\n'), ((4460, 4505), 'numpy.sum', 'np.sum', (['(Weights * [(x ** 2) for x in Effects])'], {}), '(Weights * [(x ** 2) for x in Effects])\n', (4466, 4505), True, 'import numpy as np\n'), ((4607, 4616), 'numpy.sum', 'np.sum', (['W'], {}), '(W)\n', (4613, 4616), True, 'import numpy as np\n'), ((4817, 4846), 'numpy.sum', 'np.sum', (['(self.W * self.effects)'], {}), '(self.W * self.effects)\n', (4823, 4846), True, 'import numpy as np\n'), ((6264, 6299), 'numpy.array', 'np.array', (['effects'], {'dtype': 'np.float64'}), '(effects, dtype=np.float64)\n', (6272, 6299), True, 'import numpy as np\n'), ((6774, 6788), 'numpy.sum', 'np.sum', (['self.w'], {}), '(self.w)\n', (6780, 6788), True, 'import numpy as np\n'), ((7892, 7937), 'numpy.sum', 'np.sum', (['(Weights * [(x ** 2) for x in Effects])'], {}), '(Weights * [(x ** 2) for x in Effects])\n', (7898, 7937), True, 'import numpy as np\n'), ((8039, 8048), 'numpy.sum', 'np.sum', (['W'], {}), '(W)\n', (8045, 8048), True, 'import numpy as np\n'), ((8249, 8278), 'numpy.sum', 'np.sum', (['(self.W * self.effects)'], {}), '(self.W * self.effects)\n', (8255, 8278), True, 'import numpy as np\n'), ((9366, 9385), 'numpy.tanh', 'np.tanh', (['self.REvar'], {}), '(self.REvar)\n', (9373, 9385), True, 'import numpy as np\n'), ((9439, 9459), 'numpy.tanh', 'np.tanh', (['self.Zscore'], {}), '(self.Zscore)\n', (9446, 9459), True, 'import numpy as np\n'), ((2320, 2336), 'numpy.sqrt', 'np.sqrt', (['self.vi'], {}), '(self.vi)\n', (2327, 2336), True, 'import numpy as np\n'), ((2788, 2804), 'numpy.sqrt', 'np.sqrt', (['self.vi'], {}), '(self.vi)\n', (2795, 2804), True, 'import numpy as np\n'), ((4534, 4549), 'numpy.sum', 'np.sum', (['Weights'], {}), '(Weights)\n', (4540, 4549), True, 'import numpy as np\n'), ((4620, 4649), 'numpy.sum', 'np.sum', (['[(w ** 2) for w in W]'], {}), '([(w ** 2) for w in W])\n', (4626, 4649), True, 'import numpy as np\n'), ((4851, 4865), 'numpy.sum', 'np.sum', (['self.W'], {}), '(self.W)\n', (4857, 4865), True, 'import numpy as np\n'), ((4924, 4938), 'numpy.sum', 'np.sum', (['self.W'], {}), '(self.W)\n', (4930, 4938), True, 'import numpy as np\n'), ((7966, 7981), 'numpy.sum', 'np.sum', (['Weights'], {}), '(Weights)\n', (7972, 7981), True, 'import numpy as np\n'), ((8052, 8081), 'numpy.sum', 'np.sum', (['[(w ** 2) for w in W]'], {}), '([(w ** 2) for w in W])\n', (8058, 8081), True, 'import numpy as np\n'), ((8283, 8297), 'numpy.sum', 'np.sum', (['self.W'], {}), '(self.W)\n', (8289, 8297), True, 'import numpy as np\n'), ((8356, 8370), 'numpy.sum', 'np.sum', (['self.W'], {}), '(self.W)\n', (8362, 8370), True, 'import numpy as np\n'), ((8907, 8919), 'numpy.tanh', 'np.tanh', (['rho'], {}), '(rho)\n', (8914, 8919), True, 'import numpy as np\n'), ((4506, 4531), 'numpy.sum', 'np.sum', (['(Weights * Effects)'], {}), '(Weights * Effects)\n', (4512, 4531), True, 'import numpy as np\n'), ((4652, 4661), 'numpy.sum', 'np.sum', (['W'], {}), '(W)\n', (4658, 4661), True, 'import numpy as np\n'), ((5273, 5282), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (5279, 5282), True, 'import numpy as np\n'), ((7938, 7963), 'numpy.sum', 'np.sum', (['(Weights * Effects)'], {}), '(Weights * Effects)\n', (7944, 7963), True, 'import numpy as np\n'), ((8084, 8093), 'numpy.sum', 'np.sum', (['W'], {}), '(W)\n', (8090, 8093), True, 'import numpy as np\n'), ((8705, 8714), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (8711, 8714), True, 'import numpy as np\n'), ((9276, 9286), 'numpy.tanh', 'np.tanh', (['c'], {}), '(c)\n', (9283, 9286), True, 'import numpy as np\n'), ((9065, 9075), 'numpy.tanh', 'np.tanh', (['c'], {}), '(c)\n', (9072, 9075), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import pickle
## plot conf
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 7})
width = 8.5/2.54
height = width*(3/4)
###
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
plot_path = './'
male_rarity, female_rarity = pickle.load(open(script_dir+'/plot_pickles/raritys.p', 'rb'))
## Load true fits
real_fit_male = pd.read_csv(script_dir+'/plot_pickles/real_male_fit.csv')
real_fit_female = pd.read_csv(script_dir+'/plot_pickles/real_female_fit.csv')
coef_names_male = real_fit_male['Unnamed: 0']
real_coef_male = real_fit_male['Estimate']
real_coef_male_dict = {key : real_coef_male.iloc[i]\
for i, key in enumerate(real_fit_male['Unnamed: 0'])}
coef_names_female = real_fit_female['Unnamed: 0']
real_coef_female = real_fit_female['Estimate']
## Load DPVI fits
epsilons = [0.74, 1.99, 3.92]
epsilons = np.array(epsilons)
n_runs = 100
# Load DP results
syn_dpvi_coef_female_dict = pickle.load(open(script_dir+'/plot_pickles/female_coef_dict.p', 'rb'))
syn_dpvi_coef_male_dict = pickle.load(open(script_dir+'/plot_pickles/male_coef_dict.p', 'rb'))
syn_pb_coef_female_dict = pickle.load(open(script_dir+'/plot_pickles/pb_female_coef_dict.p', 'rb'))
syn_pb_coef_male_dict = pickle.load(open(script_dir+'/plot_pickles/pb_male_coef_dict.p', 'rb'))
## Pick significant coefficients
p_value = 0.025
significant_coef_names_male = coef_names_male[(real_fit_male['Pr(>|z|)']<p_value).values]
significant_coef_names_female = coef_names_female[(real_fit_female['Pr(>|z|)']<p_value).values]
for key, value in significant_coef_names_male.items():
if 'shp' in value:
significant_coef_names_male.pop(key)
for key, value in significant_coef_names_female.items():
if 'shp' in value:
significant_coef_names_female.pop(key)
real_significant_male = real_coef_male[(real_fit_male['Pr(>|z|)']<p_value).values]
real_significant_male = pd.DataFrame(real_significant_male[significant_coef_names_male.index].values[\
np.newaxis],
columns=significant_coef_names_male.values)
real_significant_female = real_coef_female[(real_fit_female['Pr(>|z|)']<p_value).values]
real_significant_female = pd.DataFrame(real_significant_female[significant_coef_names_female.index].values[\
np.newaxis],
columns=significant_coef_names_female.values)
for key in real_significant_female.columns:
if 'shp' in key:
real_significant_female.pop(key)
# Cleaner coef_names
male_names = []
for value in significant_coef_names_male.values:
if 'lex' in value:
male_names.append('lex.dur : '+value[value.find('))')+2:])
elif 'DM' in value:
male_names.append(value.replace('DM.type', 'DM.type : '))
elif 'shp' in value:
male_names.append(value.replace('factor(shp)', 'shp'))
elif '.i.cancer' in value:
male_names.append(value.replace('.i.cancer', '.i.cancer : '))
elif 'C10' in value:
male_names.append(value.replace('TRUE', ''))
elif 'per' in value:
male_names.append('per.cat')
female_names = []
for value in significant_coef_names_female.values:
if 'lex' in value:
female_names.append('lex.dur : '+value[value.find('))')+2:])
elif 'DM' in value:
female_names.append(value.replace('DM.type', 'DM.type : '))
elif 'shp' in value:
female_names.append(value.replace('factor(shp)', 'shp'))
elif '.i.cancer' in value:
female_names.append(value.replace('.i.cancer', '.i.cancer : '))
elif 'C10' in value:
female_names.append(value.replace('TRUE', ''))
elif 'per' in value:
female_names.append('per.cat')
# Significant coefs for DPVI
# males
syn_dpvi_significant_male_dict = {eps : syn_dpvi_coef_male_dict[eps][significant_coef_names_male] \
for eps in epsilons}
syn_dpvi_significant_male_mean = {eps : syn_dpvi_significant_male_dict[eps].mean(0) \
for eps in epsilons}
syn_dpvi_significant_male_sem = {eps : syn_dpvi_significant_male_dict[eps].std(0)/np.sqrt(n_runs) \
for eps in epsilons}
# females
syn_dpvi_significant_female_dict = {eps : syn_dpvi_coef_female_dict[eps][significant_coef_names_female] \
for eps in epsilons}
syn_dpvi_significant_female_mean = {eps : syn_dpvi_significant_female_dict[eps].mean(0) \
for eps in epsilons}
syn_dpvi_significant_female_sem = {eps : syn_dpvi_significant_female_dict[eps].std(0)/np.sqrt(n_runs) \
for eps in epsilons}
from collections import OrderedDict as od
female_significant_rarity = od({key:female_rarity[key] for key in list(significant_coef_names_female)})
female_significant_rarity_list = list(female_significant_rarity.values())
real_significant_female = real_significant_female[list(significant_coef_names_female)]
male_significant_rarity = od({key:male_rarity[key] for key in list(significant_coef_names_male)})
male_significant_rarity_list = list(male_significant_rarity.values())
real_significant_male = real_significant_male[list(significant_coef_names_male)]
## Join male and female
inverse = True
#aggregation = "median"
aggregation = "mean"
fig, axis = plt.subplots(figsize=(width, height))
rarity_list = female_significant_rarity_list+male_significant_rarity_list
rarity = np.sort(rarity_list)
groups = np.split(np.array(sorted(rarity_list)), 4)
for eps in epsilons:
diff_female = np.abs(syn_dpvi_significant_female_dict[eps].values - \
real_significant_female.values.flatten())
diff_male = np.abs(syn_dpvi_significant_male_dict[eps].values - \
real_significant_male.values.flatten())
diff = np.concatenate([diff_female, diff_male], axis=1)
diff = diff[:, np.argsort(rarity_list)]
group_accs_mean = np.mean(np.mean(np.split(diff, len(groups), axis=1), axis=-1), axis=1)
group_accs_sem = np.std(np.mean(np.split(diff, len(groups), axis=1), axis=-1), axis=1)/np.sqrt(n_runs)
axis.errorbar(np.min(groups, 1), group_accs_mean, yerr=group_accs_sem, label='$\epsilon={}$'.format(round(eps, 0)))
# plot 1/n effect
ax2 = axis.twinx()
if inverse==False:
if aggregation == "median":
asymptotic_error = 1./np.median(groups, axis=1)
if aggregation == "mean":
asymptotic_error = 1./np.mean(groups, axis=1)
else:
if aggregation == "median":
asymptotic_error = np.median(1./np.array(groups), axis=1)
if aggregation == "mean":
asymptotic_error = np.mean(1./np.array(groups), axis=1)
ax2.plot(np.min(groups, 1), asymptotic_error, linestyle="dashed", color="black", label="1/n")
ax2.set_ylabel("Inverse group size")
##
axis.set_xlabel('Number of cases')
axis.set_ylabel('Error')
axis.set_xticks(np.min(np.split(rarity, len(groups)), axis=1))
axis.set_xticklabels(["[{},{})".format(groups[i][0], groups[i+1][0]) if i<3 else\
"[{},{})".format(groups[i][0], groups[i][-1]) for i in range(len(groups))], fontsize=6)
h1, l1 = axis.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
axis.legend(h1+h2, l1+l2)
#axis.legend()
axis.set_title("ARD")
if inverse==False:
plt.savefig(plot_path + 'rarity_opt_{}.pdf'.format(aggregation), format='pdf', bbox_inches='tight')
else:
plt.savefig(plot_path + 'rarity_opt_inverse_{}.pdf'.format(aggregation), format='pdf', bbox_inches='tight')
#plt.show()
plt.savefig('rarity_opt_{}.png'.format(aggregation), dpi=300, bbox_inches='tight')
plt.close()
| [
"numpy.mean",
"numpy.median",
"numpy.sqrt",
"pandas.read_csv",
"numpy.sort",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"numpy.argsort",
"numpy.concatenate",
"numpy.min",
"pandas.DataFrame",
"os.path.abspath",
"matplotlib.pyplot.subplots"
] | [((99, 136), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 7}"], {}), "({'font.size': 7})\n", (118, 136), True, 'import matplotlib.pyplot as plt\n'), ((390, 449), 'pandas.read_csv', 'pd.read_csv', (["(script_dir + '/plot_pickles/real_male_fit.csv')"], {}), "(script_dir + '/plot_pickles/real_male_fit.csv')\n", (401, 449), True, 'import pandas as pd\n'), ((466, 527), 'pandas.read_csv', 'pd.read_csv', (["(script_dir + '/plot_pickles/real_female_fit.csv')"], {}), "(script_dir + '/plot_pickles/real_female_fit.csv')\n", (477, 527), True, 'import pandas as pd\n'), ((889, 907), 'numpy.array', 'np.array', (['epsilons'], {}), '(epsilons)\n', (897, 907), True, 'import numpy as np\n'), ((1909, 2047), 'pandas.DataFrame', 'pd.DataFrame', (['real_significant_male[significant_coef_names_male.index].values[np.newaxis]'], {'columns': 'significant_coef_names_male.values'}), '(real_significant_male[significant_coef_names_male.index].\n values[np.newaxis], columns=significant_coef_names_male.values)\n', (1921, 2047), True, 'import pandas as pd\n'), ((2176, 2320), 'pandas.DataFrame', 'pd.DataFrame', (['real_significant_female[significant_coef_names_female.index].values[np.newaxis]'], {'columns': 'significant_coef_names_female.values'}), '(real_significant_female[significant_coef_names_female.index].\n values[np.newaxis], columns=significant_coef_names_female.values)\n', (2188, 2320), True, 'import pandas as pd\n'), ((4941, 4978), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (4953, 4978), True, 'import matplotlib.pyplot as plt\n'), ((5062, 5082), 'numpy.sort', 'np.sort', (['rarity_list'], {}), '(rarity_list)\n', (5069, 5082), True, 'import numpy as np\n'), ((7092, 7103), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7101, 7103), True, 'import matplotlib.pyplot as plt\n'), ((219, 244), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (234, 244), False, 'import os\n'), ((5393, 5441), 'numpy.concatenate', 'np.concatenate', (['[diff_female, diff_male]'], {'axis': '(1)'}), '([diff_female, diff_male], axis=1)\n', (5407, 5441), True, 'import numpy as np\n'), ((6194, 6211), 'numpy.min', 'np.min', (['groups', '(1)'], {}), '(groups, 1)\n', (6200, 6211), True, 'import numpy as np\n'), ((3865, 3880), 'numpy.sqrt', 'np.sqrt', (['n_runs'], {}), '(n_runs)\n', (3872, 3880), True, 'import numpy as np\n'), ((4245, 4260), 'numpy.sqrt', 'np.sqrt', (['n_runs'], {}), '(n_runs)\n', (4252, 4260), True, 'import numpy as np\n'), ((5661, 5676), 'numpy.sqrt', 'np.sqrt', (['n_runs'], {}), '(n_runs)\n', (5668, 5676), True, 'import numpy as np\n'), ((5692, 5709), 'numpy.min', 'np.min', (['groups', '(1)'], {}), '(groups, 1)\n', (5698, 5709), True, 'import numpy as np\n'), ((5458, 5481), 'numpy.argsort', 'np.argsort', (['rarity_list'], {}), '(rarity_list)\n', (5468, 5481), True, 'import numpy as np\n'), ((5904, 5929), 'numpy.median', 'np.median', (['groups'], {'axis': '(1)'}), '(groups, axis=1)\n', (5913, 5929), True, 'import numpy as np\n'), ((5981, 6004), 'numpy.mean', 'np.mean', (['groups'], {'axis': '(1)'}), '(groups, axis=1)\n', (5988, 6004), True, 'import numpy as np\n'), ((6074, 6090), 'numpy.array', 'np.array', (['groups'], {}), '(groups)\n', (6082, 6090), True, 'import numpy as np\n'), ((6159, 6175), 'numpy.array', 'np.array', (['groups'], {}), '(groups)\n', (6167, 6175), True, 'import numpy as np\n')] |
import argparse
import time
import numpy as np
import networkx as nx
import json
from sklearn.utils import check_random_state
import zmq
from . import agglo, agglo2, features, classify, evaluate as ev
# constants
# labels for machine learning libs
MERGE_LABEL = 0
SEPAR_LABEL = 1
class Solver:
"""ZMQ-based interface between proofreading clients and gala RAGs.
This docstring is intentionally incomplete until the interface settles.
Parameters
----------
labels : array-like of int, shape (..., P, R, C)
The fragment map.
image : array-like of float, shape (..., P, R, C[, Ch]), optional
The image, from which to compute intensity features.
feature_manager : gala.features.Manager object
Object exposing the feature manager interface, to compute the
feature caches and features of the RAG.
address : string, optional
URL of client.
relearn_threshold : int, optional
Minimum batch size to trigger a new learning round.
config_file : string, optional
A JSON file specifying the URLs of the Solver, Client, and ID service.
See `Solver._configure_from_file` for the file specification.
Attributes
----------
This section intentionally left blank.
"""
def __init__(self, labels, image=np.array([]),
feature_manager=features.default.snemi3d(),
address=None, relearn_threshold=20,
config_file=None):
self.labels = labels
self.image = image
self.feature_manager = feature_manager
self._build_rag()
config_address, id_address = self._configure_from_file(config_file)
self.id_service = self._connect_to_id_service(id_address)
self._connect_to_client(address or config_address)
self.history = []
self.separate = []
self.features = []
self.targets = []
self.relearn_threshold = relearn_threshold
self.relearn_trigger = relearn_threshold
self.recently_solved = True
def _build_rag(self):
"""Build the region-adjacency graph from the label image."""
self.rag = agglo.Rag(self.labels, self.image,
feature_manager=self.feature_manager,
normalize_probabilities=True)
self.original_rag = self.rag.copy()
def _configure_from_file(self, filename):
"""Get all configuration parameters from a JSON file.
The file specification is currently in flux, but looks like:
```
{'id_service_url': 'tcp://localhost:5555',
'client_url': 'tcp://*:9001',
'solver_url': 'tcp://localhost:9001'}
```
Parameters
----------
filename : str
The input filename.
Returns
-------
address : str
The URL to bind a ZMQ socket to.
id_address : str
The URL to bind an ID service to
"""
if filename is None:
return None, None
with open(filename, 'r') as fin:
config = json.load(fin)
return (config.get('client_url', None),
config.get('id_service_url', None))
def _connect_to_client(self, address):
self.comm = zmq.Context().socket(zmq.PAIR)
self.comm.bind(address)
def _connect_to_id_service(self, url):
if url is not None:
service_comm = zmq.Context().socket(zmq.REQ)
service_comm.connect(url)
def get_ids(count):
print('requesting %i ids...' % count)
service_comm.send_json({'count': count})
print('receiving %i ids...' % count)
received = service_comm.recv_json()
id_range = received['begin'], received['end']
return id_range
else:
def get_ids(count):
start = np.max(self.labels) + 2
return start, start + count
return get_ids
def send_segmentation(self):
"""Send a segmentation to ZMQ as a fragment-to-segment lookup table.
The format of the lookup table (LUT) is specified in the BigCat
wiki [1]_.
References
----------
.. [1] https://github.com/saalfeldlab/bigcat/wiki/Actors,-responsibilities,-and-inter-process-communication
"""
if len(self.targets) < self.relearn_threshold:
print('server has insufficient data to resolve')
return
self.relearn() # correct way to do it is to implement RAG splits
self.rag.agglomerate(0.5)
self.recently_solved = True
dst_tree = [int(i) for i in self.rag.tree.get_map(0.5)]
unique = set(dst_tree)
start, end = self.id_service(len(unique))
remap = dict(zip(unique, range(start, end)))
dst = list(map(remap.__getitem__, dst_tree))
src = list(range(len(dst)))
message = {'type': 'fragment-segment-lut',
'data': {'fragments': src, 'segments': dst}}
print('server sending:', message)
try:
self.comm.send_json(message, flags=zmq.NOBLOCK)
except zmq.error.Again:
return
def listen(self, send_every=None):
"""Listen to ZMQ port for instructions and data.
The instructions conform to the proofreading protocol defined in the
BigCat wiki [1]_.
Parameters
----------
send_every : int or float, optional
Send a new segmentation every `send_every` seconds.
References
----------
.. [1] https://github.com/saalfeldlab/bigcat/wiki/Actors,-responsibilities,-and-inter-process-communication
"""
start_time = time.time()
recv_flags = zmq.NOBLOCK
while True:
if send_every is not None:
elapsed_time = time.time() - start_time
if elapsed_time > send_every:
print('server resolving')
self.send_segmentation()
start_time = time.time()
try:
if recv_flags == zmq.NOBLOCK:
print('server receiving no blocking...')
else:
print('server receiving blocking...')
message = self.comm.recv_json(flags=recv_flags)
print('server received:', message)
recv_flags = zmq.NOBLOCK
except zmq.error.Again: # no message received
recv_flags = zmq.NULL
print('server: no message received in time')
if not self.recently_solved:
print('server resolving')
self.send_segmentation()
continue
command = message['type']
data = message['data']
if command == 'merge':
segments = data['fragments']
self.learn_merge(segments)
elif command == 'separate':
fragment = data['fragment']
separate_from = data['from']
self.learn_separation(fragment, separate_from)
elif command == 'request':
what = data['what']
if what == 'fragment-segment-lut':
self.send_segmentation()
elif command == 'stop':
return
else:
print('command %s not recognized.' % command)
continue
def learn_merge(self, segments):
"""Learn that a pair of segments should be merged.
Parameters
----------
segments : tuple of int
A pair of segment identifiers.
"""
segments = set(self.rag.tree.highest_ancestor(s) for s in segments)
# ensure the segments are ordered such that every subsequent
# pair shares an edge
ordered = nx.dfs_preorder_nodes(nx.subgraph(self.rag, segments))
s0 = next(ordered)
for s1 in ordered:
self.features.append(self.feature_manager(self.rag, s0, s1))
self.history.append((s0, s1))
s0 = self.rag.merge_nodes(s0, s1)
self.targets.append(MERGE_LABEL)
self.recently_solved = False or len(set(self.targets)) < 2
def learn_separation(self, fragment, separate_from):
"""Learn that a pair of fragments should never be in the same segment.
Parameters
----------
fragments : tuple of int
A pair of fragment identifiers.
"""
f0 = fragment
if not separate_from:
separate_from = self.original_rag.neighbors(f0)
s0 = self.rag.tree.highest_ancestor(f0)
for f1 in separate_from:
if self.rag.boundary_body in (f0, f1):
continue
s1 = self.rag.tree.highest_ancestor(f1)
if self.rag.has_edge(s0, s1):
self.features.append(self.feature_manager(self.rag, s0, s1))
self.targets.append(SEPAR_LABEL)
if self.original_rag.has_edge(f0, f1):
self.features.append(self.feature_manager(self.original_rag,
f0, f1))
self.targets.append(SEPAR_LABEL)
self.separate.append((f0, f1))
self.recently_solved = False or len(set(self.targets)) < 2
def relearn(self):
"""Learn a new merge policy using data gathered so far.
This resets the state of the RAG to contain only the merges and
separations received over the course of its history.
"""
clf = classify.DefaultRandomForest().fit(self.features, self.targets)
self.policy = agglo.classifier_probability(self.feature_manager, clf)
self.rag = self.original_rag.copy()
self.rag.merge_priority_function = self.policy
self.rag.rebuild_merge_queue()
for i, (s0, s1) in enumerate(self.separate):
self.rag.node[s0]['exclusions'].add(i)
self.rag.node[s1]['exclusions'].add(i)
def proofread(fragments, true_segmentation, host='tcp://localhost', port=5556,
num_operations=10, mode='fast paint', stop_when_finished=False,
request_seg=True, random_state=None):
"""Simulate a proofreader by sending and receiving messages to a Solver.
Parameters
----------
fragments : array of int
The initial segmentation to be proofread.
true_segmentation : array of int
The target segmentation. Should be a superset of `fragments`.
host : string
The host to serve ZMQ commands to.
port : int
Port on which to connect ZMQ.
num_operations : int, optional
How many proofreading operations to perform before returning.
mode : string, optional
The mode with which to simulate proofreading.
stop_when_finished : bool, optional
Send the solver a "stop" action when done proofreading. Useful
when running tests so we don't intend to continue proofreading.
random_state : None or int or numpy.RandomState instance, optional
Fix the random state for proofreading.
Returns
-------
lut : tuple of array-like of int
A look-up table from fragments (first array) to segments
(second array), obtained by requesting it from the Solver after
initial proofreading simulation.
"""
true = agglo2.best_segmentation(fragments, true_segmentation)
base_graph = agglo2.fast_rag(fragments)
comm = zmq.Context().socket(zmq.PAIR)
comm.connect(host + ':' + str(port))
ctable = ev.contingency_table(fragments, true).tocsc()
true_labels = np.unique(true)
random = check_random_state(random_state)
random.shuffle(true_labels)
for _, label in zip(range(num_operations), true_labels):
time.sleep(3)
components = [int(i) for i in ctable.getcol(int(label)).indices]
merge_msg = {'type': 'merge', 'data': {'fragments': components}}
print('proofreader sends:', merge_msg)
comm.send_json(merge_msg)
for fragment in components:
others = [int(neighbor) for neighbor in base_graph[fragment]
if neighbor not in components]
if not others:
continue
split_msg = {'type': 'separate',
'data': {'fragment': int(fragment), 'from': others}}
print('proofreader sends:', split_msg)
comm.send_json(split_msg)
if request_seg: # if no request, assume server sends periodic updates
req_msg = {'type': 'request', 'data': {'what': 'fragment-segment-lut'}}
print('proofreader sends:', req_msg)
comm.send_json(req_msg)
print('proofreader receiving...')
response = comm.recv_json()
print('proofreader received:', response)
src = response['data']['fragments']
dst = response['data']['segments']
if stop_when_finished:
stop_msg = {'type': 'stop', 'data': {}}
print('proofreader sends: ', stop_msg)
comm.send_json(stop_msg)
return src, dst
def main():
parser = argparse.ArgumentParser('gala-serve')
parser.add_argument('-f', '--config-file', help='JSON configuration file')
parser.add_argument('input_file', help='Input image file')
parser.add_argument('-F', '--fragment-group',
default='volumes/labels/fragments',
help='Group path in HDF file for fragments')
parser.add_argument('-p', '--membrane-probabilities',
default='volumes/membrane',
help='Group path in HDF file for membrane prob map')
args = parser.parse_args()
from . import imio
frags, probs = imio.read_cremi(args.input_file,
[args.fragment_group,
args.membrane_probabilities])
solver = Solver(frags, probs, config_file=args.config_file)
solver.listen()
| [
"sklearn.utils.check_random_state",
"numpy.unique",
"argparse.ArgumentParser",
"zmq.Context",
"time.sleep",
"networkx.subgraph",
"numpy.max",
"numpy.array",
"json.load",
"time.time"
] | [((11729, 11744), 'numpy.unique', 'np.unique', (['true'], {}), '(true)\n', (11738, 11744), True, 'import numpy as np\n'), ((11758, 11790), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (11776, 11790), False, 'from sklearn.utils import check_random_state\n'), ((13187, 13224), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""gala-serve"""'], {}), "('gala-serve')\n", (13210, 13224), False, 'import argparse\n'), ((1311, 1323), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1319, 1323), True, 'import numpy as np\n'), ((5771, 5782), 'time.time', 'time.time', ([], {}), '()\n', (5780, 5782), False, 'import time\n'), ((11892, 11905), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (11902, 11905), False, 'import time\n'), ((3105, 3119), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (3114, 3119), False, 'import json\n'), ((7953, 7984), 'networkx.subgraph', 'nx.subgraph', (['self.rag', 'segments'], {}), '(self.rag, segments)\n', (7964, 7984), True, 'import networkx as nx\n'), ((11580, 11593), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (11591, 11593), False, 'import zmq\n'), ((3284, 3297), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (3295, 3297), False, 'import zmq\n'), ((3446, 3459), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (3457, 3459), False, 'import zmq\n'), ((3927, 3946), 'numpy.max', 'np.max', (['self.labels'], {}), '(self.labels)\n', (3933, 3946), True, 'import numpy as np\n'), ((5906, 5917), 'time.time', 'time.time', ([], {}), '()\n', (5915, 5917), False, 'import time\n'), ((6101, 6112), 'time.time', 'time.time', ([], {}), '()\n', (6110, 6112), False, 'import time\n')] |
import codecs
from os import replace
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from scipy import sparse, stats
from sklearn.model_selection import train_test_split
def create_validation_dataset(test: np.ndarray, val_size: float, random_state: int) -> Tuple[np.ndarray, np.ndarray]:
users = test[:, 0]
items = test[:, 1]
ratings = test[:, 2]
val = []
test = []
for user in set(users):
indices = users == user
pos_items = items[indices]
val_items = np.random.RandomState(random_state).choice(pos_items, int(val_size*len(pos_items)), replace=False)
test_items = np.setdiff1d(pos_items, val_items)
for val_item in val_items:
item_indices = (items == val_item) & (users == user)
val_rating = int(ratings[item_indices])
val.append([user, val_item, val_rating])
for test_item in test_items:
item_indices = (items == test_item) & (users == user)
test_rating = int(ratings[item_indices])
test.append([user, test_item, test_rating])
val = np.array(val)
test = np.array(test)
return test, val
def preprocess_dataset(data: str, threshold: int = 4, alpha: float = 0.5, beta: float = 0.5) -> Tuple:
"""Load and Preprocess datasets."""
# load dataset.
if data == 'yahoo':
col = {0: 'user', 1: 'item', 2: 'rate'}
with codecs.open(f'../data/yahoo/train.txt', 'r', 'utf-8', errors='ignore') as f:
data_train = pd.read_csv(f, delimiter='\t', header=None)
data_train.rename(columns=col, inplace=True)
with codecs.open(f'../data/yahoo/test.txt', 'r', 'utf-8', errors='ignore') as f:
data_test = pd.read_csv(f, delimiter='\t', header=None)
data_test.rename(columns=col, inplace=True)
data_train.user, data_train.item = data_train.user - 1, data_train.item - 1
data_test.user, data_test.item = data_test.user - 1, data_test.item - 1
elif data == 'coat':
cols = {'level_0': 'user', 'level_1': 'item', 2: 'rate', 0: 'rate'}
with codecs.open(f'../data/coat/train.ascii', 'r', 'utf-8', errors='ignore') as f:
data_train = pd.read_csv(f, delimiter=' ', header=None)
data_train = data_train.stack().reset_index().rename(columns=cols)
data_train = data_train[data_train.rate != 0].reset_index(drop=True)
with codecs.open(f'../data/coat/test.ascii', 'r', 'utf-8', errors='ignore') as f:
data_test = pd.read_csv(f, delimiter=' ', header=None)
data_test = data_test.stack().reset_index().rename(columns=cols)
data_test = data_test[data_test.rate != 0].reset_index(drop=True)
num_users, num_items = max(data_train.user.max()+1, data_test.user.max()+1), max(data_train.item.max()+1, data_test.item.max()+1)
# binalize rating.
if data in ['yahoo', 'coat']:
data_train.rate[data_train.rate < threshold] = 0
data_train.rate[data_train.rate >= threshold] = 1
# binalize rating.
data_test.rate[data_test.rate < threshold] = 0
data_test.rate[data_test.rate >= threshold] = 1
print(data_train)
print(data_test)
# split data to train-val-test
train, test = data_train.values, data_test.values
train, val = create_validation_dataset(train, val_size=0.3, random_state=12345)
# train data freq
item_freq = np.zeros(num_items, dtype=int)
for ss in train:
if ss[2] == 1:
item_freq[int(ss[1])] += 1
# for training, only tr's ratings frequency used
pscore = (item_freq / item_freq.max()) ** alpha
inv_pscore = (1-(item_freq / item_freq.max())) ** beta
# for testing, we use validation data freq
for ss in val:
if ss[2] == 1:
item_freq[int(ss[1])] += 1
# the information of test data is not used (becuase it is real-world environment)
item_freq = item_freq**1.5 # pop^{(1+2)/2} gamma = 2
# creating training data
train = train[train[:, 2] == 1, :2]
all_data = pd.DataFrame(
np.zeros((num_users, num_items))).stack().reset_index()
all_data = all_data.values[:, :2]
unlabeled_data = np.array(
list(set(map(tuple, all_data)) - set(map(tuple, train))), dtype=int)
train = np.r_[np.c_[train, np.ones(train.shape[0])],
np.c_[unlabeled_data, np.zeros(unlabeled_data.shape[0])]]
# save datasets
path_data = Path(f'../data/{data}')
point_path = path_data / f'point_{alpha}_{beta}'
point_path.mkdir(parents=True, exist_ok=True)
# pointwise
np.save(file=point_path / 'train.npy', arr=train.astype(np.int))
np.save(file=point_path / 'val.npy', arr=val.astype(np.int))
np.save(file=point_path / 'test.npy', arr=test.astype(np.int))
np.save(file=point_path / 'pscore.npy', arr=pscore)
np.save(file=point_path / 'inv_pscore.npy', arr=inv_pscore)
np.save(file=point_path / 'item_freq.npy', arr=item_freq) # for testing
| [
"numpy.ones",
"pandas.read_csv",
"pathlib.Path",
"numpy.array",
"numpy.zeros",
"numpy.setdiff1d",
"numpy.random.RandomState",
"codecs.open",
"numpy.save"
] | [((1168, 1181), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (1176, 1181), True, 'import numpy as np\n'), ((1193, 1207), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (1201, 1207), True, 'import numpy as np\n'), ((3506, 3536), 'numpy.zeros', 'np.zeros', (['num_items'], {'dtype': 'int'}), '(num_items, dtype=int)\n', (3514, 3536), True, 'import numpy as np\n'), ((4536, 4559), 'pathlib.Path', 'Path', (['f"""../data/{data}"""'], {}), "(f'../data/{data}')\n", (4540, 4559), False, 'from pathlib import Path\n'), ((4885, 4936), 'numpy.save', 'np.save', ([], {'file': "(point_path / 'pscore.npy')", 'arr': 'pscore'}), "(file=point_path / 'pscore.npy', arr=pscore)\n", (4892, 4936), True, 'import numpy as np\n'), ((4941, 5000), 'numpy.save', 'np.save', ([], {'file': "(point_path / 'inv_pscore.npy')", 'arr': 'inv_pscore'}), "(file=point_path / 'inv_pscore.npy', arr=inv_pscore)\n", (4948, 5000), True, 'import numpy as np\n'), ((5005, 5062), 'numpy.save', 'np.save', ([], {'file': "(point_path / 'item_freq.npy')", 'arr': 'item_freq'}), "(file=point_path / 'item_freq.npy', arr=item_freq)\n", (5012, 5062), True, 'import numpy as np\n'), ((705, 739), 'numpy.setdiff1d', 'np.setdiff1d', (['pos_items', 'val_items'], {}), '(pos_items, val_items)\n', (717, 739), True, 'import numpy as np\n'), ((1479, 1549), 'codecs.open', 'codecs.open', (['f"""../data/yahoo/train.txt"""', '"""r"""', '"""utf-8"""'], {'errors': '"""ignore"""'}), "(f'../data/yahoo/train.txt', 'r', 'utf-8', errors='ignore')\n", (1490, 1549), False, 'import codecs\n'), ((1581, 1624), 'pandas.read_csv', 'pd.read_csv', (['f'], {'delimiter': '"""\t"""', 'header': 'None'}), "(f, delimiter='\\t', header=None)\n", (1592, 1624), True, 'import pandas as pd\n'), ((1695, 1764), 'codecs.open', 'codecs.open', (['f"""../data/yahoo/test.txt"""', '"""r"""', '"""utf-8"""'], {'errors': '"""ignore"""'}), "(f'../data/yahoo/test.txt', 'r', 'utf-8', errors='ignore')\n", (1706, 1764), False, 'import codecs\n'), ((1795, 1838), 'pandas.read_csv', 'pd.read_csv', (['f'], {'delimiter': '"""\t"""', 'header': 'None'}), "(f, delimiter='\\t', header=None)\n", (1806, 1838), True, 'import pandas as pd\n'), ((585, 620), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (606, 620), True, 'import numpy as np\n'), ((2175, 2246), 'codecs.open', 'codecs.open', (['f"""../data/coat/train.ascii"""', '"""r"""', '"""utf-8"""'], {'errors': '"""ignore"""'}), "(f'../data/coat/train.ascii', 'r', 'utf-8', errors='ignore')\n", (2186, 2246), False, 'import codecs\n'), ((2278, 2320), 'pandas.read_csv', 'pd.read_csv', (['f'], {'delimiter': '""" """', 'header': 'None'}), "(f, delimiter=' ', header=None)\n", (2289, 2320), True, 'import pandas as pd\n'), ((2494, 2564), 'codecs.open', 'codecs.open', (['f"""../data/coat/test.ascii"""', '"""r"""', '"""utf-8"""'], {'errors': '"""ignore"""'}), "(f'../data/coat/test.ascii', 'r', 'utf-8', errors='ignore')\n", (2505, 2564), False, 'import codecs\n'), ((2595, 2637), 'pandas.read_csv', 'pd.read_csv', (['f'], {'delimiter': '""" """', 'header': 'None'}), "(f, delimiter=' ', header=None)\n", (2606, 2637), True, 'import pandas as pd\n'), ((4399, 4422), 'numpy.ones', 'np.ones', (['train.shape[0]'], {}), '(train.shape[0])\n', (4406, 4422), True, 'import numpy as np\n'), ((4463, 4496), 'numpy.zeros', 'np.zeros', (['unlabeled_data.shape[0]'], {}), '(unlabeled_data.shape[0])\n', (4471, 4496), True, 'import numpy as np\n'), ((4166, 4198), 'numpy.zeros', 'np.zeros', (['(num_users, num_items)'], {}), '((num_users, num_items))\n', (4174, 4198), True, 'import numpy as np\n')] |
import numpy as np
class Conv1D():
def __init__(self, in_channel, out_channel, kernel_size, stride,
weight_init_fn=None, bias_init_fn=None):
self.in_channel = in_channel
self.out_channel = out_channel
self.kernel_size = kernel_size
self.stride = stride
if weight_init_fn is None:
self.W = np.random.normal(0, 1.0, (out_channel, in_channel, kernel_size))
else:
self.W = weight_init_fn(out_channel, in_channel, kernel_size)
if bias_init_fn is None:
self.b = np.zeros(out_channel)
else:
self.b = bias_init_fn(out_channel)
self.dW = np.zeros(self.W.shape)
self.db = np.zeros(self.b.shape)
self.x = None
self.gg = None
def __call__(self, x):
return self.forward(x)
def forward(self, x):
self.x = x
out = np.empty((x.shape[0],self.out_channel, (x.shape[2] - (self.kernel_size - self.stride))//self.stride))
for i in range(0, x.shape[2] - self.kernel_size + self.stride, self.stride):
if (i+self.kernel_size > x.shape[2]):
break
out[:,:,i//self.stride] = np.tensordot(x[:,:,i:i + self.kernel_size], self.W,([1,2],[1,2])) +self.b
self.out = out
return out
def backward(self, delta):
dx = np.zeros((self.x.shape[0], self.x.shape[1], self.x.shape[2]))
for b in range(delta.shape[0]):
for j in range(self.dW.shape[0]):
for i in range(0, self.x.shape[2] - self.kernel_size + self.stride, self.stride):
if (i+self.kernel_size > self.x.shape[2]):
break
self.dW[j,:,:] += self.x[b,:,i:i+self.kernel_size] * delta[b,j,i//self.stride]
dx[b,:,i:i+self.kernel_size] += (self.W[j,:,:].copy() * delta[b,j,i//self.stride])
self.db = delta.sum((0,2))
self.gg = dx
return dx
class Flatten():
def __call__(self, x):
return self.forward(x)
def forward(self, x):
self.b, self.c, self.w = x.shape
return x.reshape(self.b, self.c*self.w)
def backward(self, delta):
return delta.reshape(self.b, self.c, self.w)
| [
"numpy.random.normal",
"numpy.zeros",
"numpy.tensordot",
"numpy.empty"
] | [((685, 707), 'numpy.zeros', 'np.zeros', (['self.W.shape'], {}), '(self.W.shape)\n', (693, 707), True, 'import numpy as np\n'), ((726, 748), 'numpy.zeros', 'np.zeros', (['self.b.shape'], {}), '(self.b.shape)\n', (734, 748), True, 'import numpy as np\n'), ((915, 1023), 'numpy.empty', 'np.empty', (['(x.shape[0], self.out_channel, (x.shape[2] - (self.kernel_size - self.\n stride)) // self.stride)'], {}), '((x.shape[0], self.out_channel, (x.shape[2] - (self.kernel_size -\n self.stride)) // self.stride))\n', (923, 1023), True, 'import numpy as np\n'), ((1375, 1436), 'numpy.zeros', 'np.zeros', (['(self.x.shape[0], self.x.shape[1], self.x.shape[2])'], {}), '((self.x.shape[0], self.x.shape[1], self.x.shape[2]))\n', (1383, 1436), True, 'import numpy as np\n'), ((367, 431), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1.0)', '(out_channel, in_channel, kernel_size)'], {}), '(0, 1.0, (out_channel, in_channel, kernel_size))\n', (383, 431), True, 'import numpy as np\n'), ((583, 604), 'numpy.zeros', 'np.zeros', (['out_channel'], {}), '(out_channel)\n', (591, 604), True, 'import numpy as np\n'), ((1212, 1283), 'numpy.tensordot', 'np.tensordot', (['x[:, :, i:i + self.kernel_size]', 'self.W', '([1, 2], [1, 2])'], {}), '(x[:, :, i:i + self.kernel_size], self.W, ([1, 2], [1, 2]))\n', (1224, 1283), True, 'import numpy as np\n')] |
import argparse
import cv2
import json
import numpy as np
import os
import pickle
import torch
from argparse import Namespace
from scipy.special import softmax
from sklearn.externals import joblib
from pyquaternion import Quaternion
from tqdm import tqdm
from network import CameraBranch
class Camera_Branch_Inference():
def __init__(self, cfg, device):
self.cfg = cfg
# img preprocess
self.img_input_shape = tuple([int(_) for _ in cfg.img_resize.split('x')])
self.img_mean = np.load(cfg.img_mean)
# device
self.device = device
# Model
self.model = CameraBranch(cfg)
self.model = torch.nn.DataParallel(self.model)
self.model = self.model.to(device)
self.model.load_state_dict(torch.load(cfg.model_weight))
self.model = self.model.eval()
self.model = self.model.to(device)
# bin -> vectors
self.kmeans_trans = joblib.load(cfg.kmeans_trans_path)
self.kmeans_rots = joblib.load(cfg.kmeans_rots_path)
def inference(self, img1_path, img2_path):
img1 = cv2.imread(img1_path)
img2 = cv2.imread(img2_path)
img1 = cv2.resize(img1, self.img_input_shape) - self.img_mean
img2 = cv2.resize(img2, self.img_input_shape) - self.img_mean
img1 = np.transpose(img1, (2, 0, 1))
img2 = np.transpose(img2, (2, 0, 1))
img1 = torch.FloatTensor([img1]).to(self.device)
img2 = torch.FloatTensor([img2]).to(self.device)
with torch.no_grad():
pred = self.model(img1, img2)
pred_tran = pred['tran'].cpu().detach().numpy()
pred_rot = pred['rot'].cpu().detach().numpy()
pred_tran_sm = softmax(pred_tran, axis=1)
pred_rot_sm = softmax(pred_rot, axis=1)
pred_sm = {'rot': pred_rot_sm, 'tran': pred_tran_sm}
return pred_sm
def xyz2class(self, x, y, z):
return self.kmeans_trans.predict([[x,y,z]])
def quat2class(self, w, xi, yi, zi):
return self.kmeans_rots.predict([[w, xi, yi, zi]])
def class2xyz(self, cls):
assert((cls >= 0).all() and (cls < self.kmeans_trans.n_clusters).all())
return self.kmeans_trans.cluster_centers_[cls]
def class2quat(self, cls):
assert((cls >= 0).all() and (cls < self.kmeans_rots.n_clusters).all())
return self.kmeans_rots.cluster_centers_[cls]
def get_relative_pose(pose):
assert pose.shape[0] == 14
q1 = Quaternion(pose[3:7])
q2 = Quaternion(pose[10:14])
t1 = pose[:3]
t2 = pose[7:10]
relative_rotation = (q2.inverse * q1).elements
relative_translation = get_relative_T_in_cam2_ref(q2.inverse.rotation_matrix, t1, t2)
rel_pose = np.hstack((relative_translation, relative_rotation))
return rel_pose.reshape(-1)
def get_relative_T_in_cam2_ref(R2, t1, t2):
new_c2 = - np.dot(R2, t2)
return np.dot(R2, t1) + new_c2
def suncg_parse_path(dataset_dir, img_path):
splits = img_path.split('/')
house_id = splits[-2]
img_id = splits[-1]
img_path = os.path.join(dataset_dir, house_id, img_id)
return img_path
def inference_by_dataset(cam_model, split_file, log_dir, dataset_dir):
summary = {}
with open(split_file, 'r') as f:
lines = f.readlines()[3:]
os.makedirs(log_dir, exist_ok=True)
log_file_path = os.path.join(log_dir, 'summary.pkl')
for line in tqdm(lines):
annot = line.split(' ')
img1_path, img2_path = annot[0], annot[8]
house_idx = img1_path.split('/')[-2]
cam1_idx = img1_path.split('/')[-1].split('_')[0]
cam2_idx = img2_path.split('/')[-1].split('_')[0]
key = house_idx + '_' + cam1_idx + '_' + cam2_idx
gt_relative_pose = get_relative_pose(np.hstack((annot[1:8], annot[9:])).astype('f4'))
prediction = cam_model.inference(suncg_parse_path(dataset_dir, img1_path),
suncg_parse_path(dataset_dir, img2_path))
summary[key] = {
'tran_gt': gt_relative_pose[:3],
'rot_gt': gt_relative_pose[3:],
'tran_logits': prediction['tran'],
'rot_logits': prediction['rot'],
'tran_pred': cam_model.class2xyz(np.argmax(prediction['tran'])),
'rot_pred': cam_model.class2quat(np.argmax(prediction['rot'])),
}
with open(log_file_path, 'wb') as log_f:
pickle.dump(summary, log_f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--img1-path", type=str, help='path to img 1', default='./example/000009_mlt.png')
parser.add_argument("--img2-path", type=str, help='path to img 2', default='./example/000029_mlt.png')
parser.add_argument("--config-path", type=str, default='./config.txt', help='path to config')
parser.add_argument("--log-dir", type=str, default="./output", help='log dir')
parser.add_argument("--split-file", type=str, default="", help='split file path')
parser.add_argument("--dataset-dir", type=str, default="./suncg_dataset", help="dataset directory")
args, _ = parser.parse_known_args()
print(args)
with open(args.config_path, 'r') as f:
cfg = Namespace(**json.load(f))
print(cfg)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cam_model = Camera_Branch_Inference(cfg, device)
if len(args.split_file) == 0:
result = cam_model.inference(args.img1_path, args.img2_path)
print(f"Predicted top 1 translation: {cam_model.class2xyz(np.argmax(result['tran']))}")
print(f"Predicted top 1 rotation: {cam_model.class2quat(np.argmax(result['rot']))}")
else:
inference_by_dataset(cam_model, args.split_file, args.log_dir, args.dataset_dir)
if __name__ == '__main__':
main()
| [
"numpy.hstack",
"sklearn.externals.joblib.load",
"network.CameraBranch",
"torch.cuda.is_available",
"argparse.ArgumentParser",
"numpy.dot",
"numpy.argmax",
"cv2.resize",
"numpy.transpose",
"pyquaternion.Quaternion",
"cv2.imread",
"pickle.dump",
"os.makedirs",
"torch.load",
"tqdm.tqdm",
... | [((2453, 2474), 'pyquaternion.Quaternion', 'Quaternion', (['pose[3:7]'], {}), '(pose[3:7])\n', (2463, 2474), False, 'from pyquaternion import Quaternion\n'), ((2484, 2507), 'pyquaternion.Quaternion', 'Quaternion', (['pose[10:14]'], {}), '(pose[10:14])\n', (2494, 2507), False, 'from pyquaternion import Quaternion\n'), ((2703, 2755), 'numpy.hstack', 'np.hstack', (['(relative_translation, relative_rotation)'], {}), '((relative_translation, relative_rotation))\n', (2712, 2755), True, 'import numpy as np\n'), ((3044, 3087), 'os.path.join', 'os.path.join', (['dataset_dir', 'house_id', 'img_id'], {}), '(dataset_dir, house_id, img_id)\n', (3056, 3087), False, 'import os\n'), ((3273, 3308), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (3284, 3308), False, 'import os\n'), ((3329, 3365), 'os.path.join', 'os.path.join', (['log_dir', '"""summary.pkl"""'], {}), "(log_dir, 'summary.pkl')\n", (3341, 3365), False, 'import os\n'), ((3383, 3394), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (3387, 3394), False, 'from tqdm import tqdm\n'), ((4433, 4458), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4456, 4458), False, 'import argparse\n'), ((516, 537), 'numpy.load', 'np.load', (['cfg.img_mean'], {}), '(cfg.img_mean)\n', (523, 537), True, 'import numpy as np\n'), ((621, 638), 'network.CameraBranch', 'CameraBranch', (['cfg'], {}), '(cfg)\n', (633, 638), False, 'from network import CameraBranch\n'), ((660, 693), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.model'], {}), '(self.model)\n', (681, 693), False, 'import torch\n'), ((937, 971), 'sklearn.externals.joblib.load', 'joblib.load', (['cfg.kmeans_trans_path'], {}), '(cfg.kmeans_trans_path)\n', (948, 971), False, 'from sklearn.externals import joblib\n'), ((999, 1032), 'sklearn.externals.joblib.load', 'joblib.load', (['cfg.kmeans_rots_path'], {}), '(cfg.kmeans_rots_path)\n', (1010, 1032), False, 'from sklearn.externals import joblib\n'), ((1096, 1117), 'cv2.imread', 'cv2.imread', (['img1_path'], {}), '(img1_path)\n', (1106, 1117), False, 'import cv2\n'), ((1133, 1154), 'cv2.imread', 'cv2.imread', (['img2_path'], {}), '(img2_path)\n', (1143, 1154), False, 'import cv2\n'), ((1310, 1339), 'numpy.transpose', 'np.transpose', (['img1', '(2, 0, 1)'], {}), '(img1, (2, 0, 1))\n', (1322, 1339), True, 'import numpy as np\n'), ((1355, 1384), 'numpy.transpose', 'np.transpose', (['img2', '(2, 0, 1)'], {}), '(img2, (2, 0, 1))\n', (1367, 1384), True, 'import numpy as np\n'), ((1704, 1730), 'scipy.special.softmax', 'softmax', (['pred_tran'], {'axis': '(1)'}), '(pred_tran, axis=1)\n', (1711, 1730), False, 'from scipy.special import softmax\n'), ((1753, 1778), 'scipy.special.softmax', 'softmax', (['pred_rot'], {'axis': '(1)'}), '(pred_rot, axis=1)\n', (1760, 1778), False, 'from scipy.special import softmax\n'), ((2849, 2863), 'numpy.dot', 'np.dot', (['R2', 't2'], {}), '(R2, t2)\n', (2855, 2863), True, 'import numpy as np\n'), ((2875, 2889), 'numpy.dot', 'np.dot', (['R2', 't1'], {}), '(R2, t1)\n', (2881, 2889), True, 'import numpy as np\n'), ((4378, 4405), 'pickle.dump', 'pickle.dump', (['summary', 'log_f'], {}), '(summary, log_f)\n', (4389, 4405), False, 'import pickle\n'), ((772, 800), 'torch.load', 'torch.load', (['cfg.model_weight'], {}), '(cfg.model_weight)\n', (782, 800), False, 'import torch\n'), ((1170, 1208), 'cv2.resize', 'cv2.resize', (['img1', 'self.img_input_shape'], {}), '(img1, self.img_input_shape)\n', (1180, 1208), False, 'import cv2\n'), ((1240, 1278), 'cv2.resize', 'cv2.resize', (['img2', 'self.img_input_shape'], {}), '(img2, self.img_input_shape)\n', (1250, 1278), False, 'import cv2\n'), ((1512, 1527), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1525, 1527), False, 'import torch\n'), ((5234, 5259), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5257, 5259), False, 'import torch\n'), ((1400, 1425), 'torch.FloatTensor', 'torch.FloatTensor', (['[img1]'], {}), '([img1])\n', (1417, 1425), False, 'import torch\n'), ((1457, 1482), 'torch.FloatTensor', 'torch.FloatTensor', (['[img2]'], {}), '([img2])\n', (1474, 1482), False, 'import torch\n'), ((4207, 4236), 'numpy.argmax', 'np.argmax', (["prediction['tran']"], {}), "(prediction['tran'])\n", (4216, 4236), True, 'import numpy as np\n'), ((4284, 4312), 'numpy.argmax', 'np.argmax', (["prediction['rot']"], {}), "(prediction['rot'])\n", (4293, 4312), True, 'import numpy as np\n'), ((5169, 5181), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5178, 5181), False, 'import json\n'), ((3742, 3776), 'numpy.hstack', 'np.hstack', (['(annot[1:8], annot[9:])'], {}), '((annot[1:8], annot[9:]))\n', (3751, 3776), True, 'import numpy as np\n'), ((5494, 5519), 'numpy.argmax', 'np.argmax', (["result['tran']"], {}), "(result['tran'])\n", (5503, 5519), True, 'import numpy as np\n'), ((5588, 5612), 'numpy.argmax', 'np.argmax', (["result['rot']"], {}), "(result['rot'])\n", (5597, 5612), True, 'import numpy as np\n')] |
#
# Created on 2020/08/25
#
import os
import yaml
from pathlib import Path
import argparse
import torch
import numpy as np
from utils.logger import get_logger
from trainers import get_trainer
def setup_seed():
# make the result reproducible
torch.manual_seed(3928)
torch.cuda.manual_seed_all(2342)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(2933)
def write_config(logger, prefix, config):
for k, v in config.items():
if isinstance(v, dict):
logger.info('{}: '.format(k))
write_config(logger, prefix+' '*4, v)
else:
logger.info('{}{}: {}'.format(prefix, k, v))
def main():
setup_seed()
parser = argparse.ArgumentParser()
parser.add_argument('--gpus', type=str, default='0')
parser.add_argument('--configs', type=str, required=True)
parser.add_argument('--indicator', type=str, required=True)
args = parser.parse_args()
# read configs
with open(args.configs, 'r') as f:
config = yaml.load(f)
# initialize ckpt_path
ckpt_path = Path('ckpt', config['name']+'_'+args.indicator)
ckpt_path.mkdir(parents=True, exist_ok=True)
config['ckpt_path'] = str(ckpt_path)
# initialize logger
log_path = Path('log', config['name']+'_'+args.indicator)
log_path.mkdir(parents=True, exist_ok=True)
config['logger'] = get_logger(str(log_path))
# write config
write_config(config['logger'], '', config)
# set gpu devices
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
args.gpus = [i for i in range(len(args.gpus.split(',')))]
config['logger'].info("Set CUDA_VISIBLE_DEVICES to %s" % args.gpus)
# initialize trainer and train
with get_trainer(config['trainer'])(**config) as trainer:
trainer.train()
if __name__ == '__main__':
main()
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"trainers.get_trainer",
"argparse.ArgumentParser",
"pathlib.Path",
"yaml.load",
"numpy.random.seed"
] | [((272, 295), 'torch.manual_seed', 'torch.manual_seed', (['(3928)'], {}), '(3928)\n', (289, 295), False, 'import torch\n'), ((301, 333), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(2342)'], {}), '(2342)\n', (327, 333), False, 'import torch\n'), ((430, 450), 'numpy.random.seed', 'np.random.seed', (['(2933)'], {}), '(2933)\n', (444, 450), True, 'import numpy as np\n'), ((780, 805), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (803, 805), False, 'import argparse\n'), ((1164, 1215), 'pathlib.Path', 'Path', (['"""ckpt"""', "(config['name'] + '_' + args.indicator)"], {}), "('ckpt', config['name'] + '_' + args.indicator)\n", (1168, 1215), False, 'from pathlib import Path\n'), ((1347, 1397), 'pathlib.Path', 'Path', (['"""log"""', "(config['name'] + '_' + args.indicator)"], {}), "('log', config['name'] + '_' + args.indicator)\n", (1351, 1397), False, 'from pathlib import Path\n'), ((1104, 1116), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1113, 1116), False, 'import yaml\n'), ((1824, 1854), 'trainers.get_trainer', 'get_trainer', (["config['trainer']"], {}), "(config['trainer'])\n", (1835, 1854), False, 'from trainers import get_trainer\n')] |
"""
Basic two sided matching markets.
"""
import numpy as np
from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, \
generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum
from MatchingMarkets.matching_alg import deferred_acceptance
class ManyToOneMarket(object):
"""
Basic class for the model of a many-to-one two-sided matching market.
Attributes
----------
num_doctors : int
The number of doctors.
num_hospitals : int
The number of hospitals.
doctor_prefs : 2d-array(int)
The list of doctors' preference list over the hospitals and the outside option.
The elements must be 0 <= x <= num_hospitals.
The number `num_hospitals` is considered as an outside option.
hospital_prefs : 2d-array(int)
The list of hospital' preferences over the doctors and the outside option.
The elements must be 0 <= x <= num_doctors.
The number `num_doctors` is considered as an outside option.
hospital_caps : 1d-array(int, optional)
The list of the capacities of the hospitals. The elements must be non-negative.
If nothing is specified, then all caps are set to be 1.
"""
def __init__(self, doctor_prefs, hospital_prefs, hospital_caps=None, no_validation=False):
self.num_doctors = len(doctor_prefs)
self.num_hospitals = len(hospital_prefs)
self.doctor_outside_option = self.num_hospitals
self.hospital_outside_option = self.num_doctors
self.doctor_prefs = doctor_prefs
self.hospital_prefs = hospital_prefs
if hospital_caps is None:
self.hospital_caps = np.ones(self.num_hospitals, dtype=int)
else:
self.hospital_caps = hospital_caps
if not no_validation:
self._convert_prefs()
self._convert_caps()
def _convert_prefs(self):
"""
Check the validity of doctor_prefs and hospital_prefs and
convert them into 2d-ndarrays.
"""
converted_doctor_prefs = np.full(
(self.num_doctors, self.num_hospitals+1),
self.doctor_outside_option,
dtype=int
)
converted_hospital_prefs = np.full(
(self.num_hospitals, self.num_doctors+1),
self.hospital_outside_option,
dtype=int
)
# doctor prefs validation
try:
for d, d_pref in enumerate(self.doctor_prefs):
for rank, h in enumerate(d_pref):
converted_doctor_prefs[d, rank] = h
except IndexError as e:
msg = "A preference list is too long.\n" + \
f"'doctor_prefs': {self.doctore_prefs}"
raise InvalidPrefsError(msg, "doctor_prefs")
except Exception as e:
msg = "Each pref must be a matrix of integers.\n" + \
f"'doctor_prefs': {self.doctor_prefs}"
raise InvalidPrefsError(msg, "doctor_prefs")
if np.min(converted_doctor_prefs) < 0 or \
np.max(converted_doctor_prefs) > self.doctor_outside_option:
msg = \
"Elements of 'doctor_prefs' must be 0 <= x <= 'num_hospitals'.\n" +\
f"'doctor_prefs': {self.doctor_prefs}"
raise InvalidPrefsError(msg, "doctor_prefs")
# hospital prefs validation
try:
for h, h_pref in enumerate(self.hospital_prefs):
for rank, d in enumerate(h_pref):
converted_hospital_prefs[h, rank] = d
except IndexError as e:
msg = "A preference list is too long.\n" +\
f"'hospital_prefs': {self.hospital_prefs}"
raise InvalidPrefsError(msg, "hospital_prefs")
except Exception as e:
msg = "Each pref must be a matrix of integers.\n" +\
f"'hospital_prefs': {self.hospital_prefs}"
raise InvalidPrefsError(msg, "hospital_prefs")
if np.min(converted_hospital_prefs) < 0 or \
np.max(converted_hospital_prefs) > self.hospital_outside_option:
msg = \
"Elements of 'hospital_prefs' must be 0 <= x <= 'num_doctors'\n" +\
f"'hospital_prefs': {self.hospital_prefs}"
raise InvalidPrefsError(msg, "hospital_prefs")
self.doctor_prefs = converted_doctor_prefs
self.hospital_prefs = converted_hospital_prefs
def _convert_caps(self):
"""
Check the validity of the hospital_caps and convert it into
a ndarray.
"""
try:
self.hospital_caps = np.array(self.hospital_caps, dtype=int)
except Exception as e:
msg = f"'hospital_caps' must be a list of non-negative integers.\n" +\
f"'hospital_caps': {self.hospital_caps}"
raise InvalidCapsError(msg, "hospital_caps")
if len(self.hospital_caps) != self.num_hospitals:
msg = f"The length of 'hospital_caps' must be equal to 'num_hospitals'.\n" +\
f"'hospital_caps': {self.hospital_caps}"
raise InvalidCapsError(msg, "hospital_caps")
if np.any(self.hospital_caps < 0):
msg = f"'hospital_caps' must be a list of non-negative integers.\n" +\
f"'hospital_caps': {self.hospital_caps}"
raise InvalidCapsError(msg, "hospital_caps")
@staticmethod
def _convert_prefs_to_ranks(prefs, num_objects):
num_people = len(prefs)
outside_option = num_objects
rank_table = np.full(
[num_people, num_objects+1], outside_option, dtype=int)
for p, pref in enumerate(prefs):
for rank, obj in enumerate(pref):
rank_table[p, obj] = rank
if obj == outside_option:
break
return rank_table
@staticmethod
def create_setup(
num_doctors,
num_hospitals,
outside_score_doctor=0.0,
outside_score_hospital=0.0,
random_type="normal",
random_seed=None
):
random_generator = np.random.default_rng(seed=random_seed)
setup = {}
if outside_score_doctor in ["min", "max"]:
prefs = generate_prefs_from_random_scores(
num_doctors,
num_hospitals,
outside_score=None,
random_type=random_type,
random_generator=random_generator
)
if outside_score_doctor == "min":
setup["d_prefs"] = prefs
else:
setup["d_prefs"] = prefs[:, 0:1]
else:
setup["d_prefs"] = generate_prefs_from_random_scores(
num_doctors,
num_hospitals,
outside_score_doctor,
random_type,
random_generator
)
if outside_score_hospital in ["min", "max"]:
prefs = generate_prefs_from_random_scores(
num_hospitals,
num_doctors,
outside_score=None,
random_type=random_type,
random_generator=random_generator
)
if outside_score_hospital == "min":
setup["h_prefs"] = prefs
else:
setup["h_prefs"] = prefs[:, 0:1]
else:
setup["h_prefs"] = generate_prefs_from_random_scores(
num_hospitals,
num_doctors,
outside_score_hospital,
random_type,
random_generator
)
setup["hospital_caps"] = generate_caps_given_sum(
num_hospitals, int(num_doctors*3/2), random_generator)
return setup
def boston(self, doctor_proposing=True):
"""
Run the Boston algorithm in a many-to-one two-sided matching market.
By default, this method runs the doctor proposing algorithm.
Args:
doctor_proposing : bool, optional
If True, it runs the doctor proposing alg. Otherwise it
runs the hospital proposing alg.
Returns:
matching : 1d-ndarray
List of the matched hospitals. The n-th element indicates
the hospital which the n-th doctor matches.
"""
if not doctor_proposing:
raise NotImplementedError("Reverse boston is not implemented")
doctors = list(range(self.num_doctors))
hospital_rank_table = self._convert_prefs_to_ranks(
self.hospital_prefs, self.num_doctors)
remaining_caps = np.copy(self.hospital_caps)
matching = np.full(
self.num_doctors,
self.doctor_outside_option,
dtype=int
)
next_proposing_rank = 0
while len(doctors) > 0:
unmatched_doctors = []
applied_doctor_ranks = {
h: [] for h in range(self.num_hospitals)
}
for d in doctors:
h = self.doctor_prefs[d][next_proposing_rank]
# if d's preference list is exhausted
if h == self.doctor_outside_option:
continue
d_rank = hospital_rank_table[h, d]
# if d is unacceptable for h
if d_rank == self.hospital_outside_option:
unmatched_doctors.append(d)
continue
applied_doctor_ranks[h].append(d_rank)
for h in range(len(self.hospitals)):
if len(applied_doctor_ranks[h]) > remaining_caps:
applied_doctor_ranks[h].sort()
for d_rank in applied_doctor_ranks[h]:
d = self.hospital_prefs[h, d_rank]
if remaining_caps[h] > 0:
matching[d] = h
remaining_caps -= 1
else:
unmatched_doctors.append(d)
doctors = unmatched_doctors
next_proposing_rank += 1
return matching
def serial_dictatorship(self, doctor_proposing=True, application_order=None):
"""
Run the serial dictatorship algorithm in a many-to-one two-sided matching market.
By default, this method runs the doctor proposing algorithm.
Args:
doctor_proposing : bool, optional
If True, it runs the doctor proposing alg. Otherwise it
runs the hospital proposing alg.
application_order : 1d-array(int), optional
List of proposing side agents (if doctor_proposing == True, list of doctors).
If None, then [0, 1, ..., num_doctors-1] is used as application order.
Returns:
matching : 1d-ndarray
List of the matched hospitals. The n-th element indicates
the hospital which the n-th doctor matches.
"""
if not doctor_proposing:
raise NotImplementedError("Reverse boston is not implemented")
if application_order is None:
if doctor_proposing:
application_order = list(range(self.num_doctors))
else:
application_order = list(range(self.num_hospitals))
hospital_rank_table = self._convert_prefs_to_ranks(
self.hospital_prefs, self.num_doctors)
remaining_caps = np.copy(self.hospital_caps)
matching = np.full(
self.num_doctors,
self.doctor_outside_option,
dtype=int
)
for d in application_order:
for rank in range(self.num_hospitals+1):
h = self.doctor_prefs[d][rank]
# if d's preference list is exhausted
if h == self.doctor_outside_option:
break
d_rank = hospital_rank_table[h, d]
# if d is unacceptable for h
if d_rank == self.hospital_outside_option:
pass
# if cap of h is full
elif remaining_caps[h] == 0:
pass
else:
matching[d] = h
remaining_caps[h] -= 1
break
return matching
def _convert_matching_heap_to_list(self, matched_ranks):
matching = np.full(
self.num_doctors,
self.doctor_outside_option,
dtype=int
)
for h, heap in matched_ranks.items():
for d_rank in heap.values():
d = self.hospital_prefs[h, d_rank]
matching[d] = h
return matching
def deferred_acceptance_raw_python(self, doctor_proposing=True):
doctors = list(range(self.num_doctors))
next_proposing_ranks = np.zeros(self.num_doctors, dtype=int)
hospital_rank_table = self._convert_prefs_to_ranks(
self.hospital_prefs, self.num_doctors)
matched_doctor_ranks = {
h: MaxHeap(self.hospital_caps[h]) for h in range(self.num_hospitals)
}
while len(doctors) > 0:
d = doctors.pop()
first_rank = next_proposing_ranks[d]
d_pref = self.doctor_prefs[d]
for rank in range(first_rank, self.num_hospitals+1):
next_proposing_ranks[d] += 1
h = d_pref[rank]
# if this doctor's preference list is exhausted
if h == self.doctor_outside_option:
break
d_rank = hospital_rank_table[h, d]
# if the doctor's rank is below the outside option
if d_rank == self.hospital_outside_option:
continue
# if the hospital cap is 0
if self.hospital_caps[h] == 0:
pass
# if the cap is not full
elif matched_doctor_ranks[h].length < self.hospital_caps[h]:
matched_doctor_ranks[h].push(d_rank)
break
# if the cap is full but a less favorable doctor is matched
elif d_rank < matched_doctor_ranks[h].root():
worst_rank = matched_doctor_ranks[h].replace(d_rank)
worst_doctor = self.hospital_prefs[h, worst_rank]
doctors.append(worst_doctor)
break
matching = self._convert_matching_heap_to_list(matched_doctor_ranks)
return matching
def deferred_acceptance(self, doctor_proposing=True, no_numba=False, new=False):
"""
Run the deferred acceptance (Gale-Shapley) algorithm in
a many-to-one two-sided matching market.
By default, this method runs the doctor proposing DA
and returns a stable matching in the market.
Args:
doctor_proposing : bool, optional
If True, it runs the doctor proposing DA. Otherwise it
runs the hospital proposing DA.
Returns:
matching : 1d-ndarray
List of the matched hospitals (and the outside option).
The n-th element indicates the hospital which
the n-th doctor matches.
"""
if not doctor_proposing:
raise NotImplementedError("Reverse DA is not implemented")
if no_numba:
return self.deferred_acceptance_raw_python(doctor_proposing)
return deferred_acceptance(
self.num_doctors,
self.num_hospitals,
self.doctor_prefs,
self.hospital_prefs,
self.hospital_caps
)
def list_blocking_pairs(self, matching):
doctor_rank_table = self._convert_prefs_to_ranks(
self.doctor_prefs, self.num_hospitals)
hospital_rank_table = self._convert_prefs_to_ranks(
self.hospital_prefs, self.num_doctors)
# fill current matching rank table
unmatched_flag = -1
doctor_matching_ranks = np.empty(self.num_doctors, dtype=int)
hospital_worst_matching_ranks = np.full(self.num_hospitals, unmatched_flag)
for d, h in enumerate(matching):
if h == self.num_hospitals:
continue
doctor_matching_ranks[d] = doctor_rank_table[d, h]
if hospital_worst_matching_ranks[h] == unmatched_flag:
hospital_worst_matching_ranks[h] = hospital_rank_table[h, d]
elif hospital_worst_matching_ranks[h] < hospital_rank_table[h, d]:
hospital_worst_matching_ranks[h] = hospital_rank_table[h, d]
# find blocking pairs
blocking_pairs = []
for d, h in enumerate(matching):
if h != self.num_hospitals:
if doctor_rank_table[d, h] == self.num_hospitals:
blocking_pairs.append((d, self.num_hospitals))
if hospital_rank_table[h, d] == self.num_doctors:
blocking_pairs.append((self.num_doctors, h))
for d in range(self.num_doctors):
for h in self.doctor_prefs[d]:
if (h == self.num_hospitals) or (h == matching[d]):
break
if doctor_rank_table[d, h] < doctor_matching_ranks[d]:
# if hospital cap is not full
if hospital_worst_matching_ranks[h] == unmatched_flag:
if hospital_rank_table[h, d] < self.num_doctors:
blocking_pairs.append((d, h))
# if hospital is matched with worse doctor
elif hospital_rank_table[h, d] < hospital_worst_matching_ranks[h]:
blocking_pairs.append((d, h))
return blocking_pairs
def get_doctor_matching_ranks(self, matching, doctor_rank_table=None):
if doctor_rank_table is None:
doctor_rank_table = self._convert_prefs_to_ranks(
self.doctor_prefs, self.num_hospitals)
matching_ranks = np.zeros_like(matching)
for d, h in enumerate(matching):
matching_ranks[d] = doctor_rank_table[d, h]
return matching_ranks
def get_hospital_matching_ranks(self, matching, hospital_rank_table=None):
if hospital_rank_table is None:
hospital_rank_table = self._convert_prefs_to_ranks(
self.hospital_prefs, self.num_doctors)
matching_ranks = [[] for h in range(self.num_hospitals)]
for d, h in enumerate(matching):
if h != self.doctor_outside_option:
matching_ranks[h].append(hospital_rank_table[h, d])
return matching_ranks
@staticmethod
def count_pref_length(prefs, outside_option):
pref_lengths = []
for d, li in enumerate(prefs):
for c, h in enumerate(li):
if h == outside_option:
pref_lengths.append(c)
break
else:
pref_lengths.append(len(li))
return pref_lengths
def analyze_matching(self, matching):
result = {}
hospital_matching = {h: [] for h in range(self.num_hospitals+1)}
for d, h in enumerate(matching):
hospital_matching[h].append(d)
result["doctor_pref_lengths"] = self.count_pref_length(
self.doctor_prefs,
self.doctor_outside_option
)
result["hospital_pref_lengths"] = self.count_pref_length(
self.hospital_prefs,
self.hospital_outside_option
)
result["unmatch_doctor_size"] = len(hospital_matching[self.doctor_outside_option])
result["matching_size"] = self.num_doctors - result["unmatch_doctor_size"]
result["unmatch_hospital_cap_size"] = \
np.sum(self.hospital_caps) - result["matching_size"]
result["cap_full_hospital_size"] = 0
for h in range(self.num_hospitals):
if len(hospital_matching[h]) == self.hospital_caps[h]:
result["cap_full_hospital_size"] += 1
matching_ranks = self.get_doctor_matching_ranks(matching)
result["num_rank1_doctors"] = len(matching_ranks[matching_ranks == 0])
result["num_rank12_doctors"] = len(matching_ranks[matching_ranks <= 1])
return result
class OneToOneMarket(ManyToOneMarket):
"""
Basic class for the model of a one-to-one two-sided matching market.
Attributes
----------
num_doctors : int
The number of doctors.
num_hospitals : int
The number of hospitals.
doctor_prefs : 2d-array(int)
The list of doctors' preferences over the hospitals and the outside option.
The elements must be 0 <= x <= num_hospitals.
The number `num_hospitals` is considered as an outside option.
hospital_prefs : 2d-array(int)
The list of hospital' preferences over the doctors and the outside option.
The elements must be 0 <= x <= num_doctors.
The number `num_doctors` is considered as an outside option.
"""
def __init__(self, doctor_prefs, hospital_prefs):
super().__init__(doctor_prefs, hospital_prefs)
if __name__ == "__main__":
"""
d_prefs = [
[0, 2, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1],
]
h_prefs = [
[0, 2, 1, 3],
[1, 0, 2, 3],
[2, 0, 3, 1],
]
caps = np.array([1, 1, 1])
m = ManyToOneMarket(d_prefs, h_prefs, caps)
r1 = m.deferred_acceptance()
r2 = m.deferred_acceptance(no_numba=True)
print(r1)
print(r2)
"""
"""
d_prefs = [
[4, 0, 5, 1, 2, 3],
[1, 4, 0, 5, 2, 3],
[2, 0, 5, 1, 3, 4],
[3, 0, 5, 1, 2, 4],
[0, 1, 5, 2, 3, 4],
[0, 2, 5, 1, 3, 4],
[0, 2, 3, 5, 1, 4],
]
h_prefs = [
[0, 1, 2, 3, 4, 5, 6, 7],
[4, 1, 7, 0, 2, 3, 5, 6],
[5, 6, 2, 7, 0, 1, 3, 4],
[6, 3, 7],
[1, 0, 7],
]
caps = np.array([3, 1, 1, 1, 1])
m = ManyToOneMarket(d_prefs, h_prefs, caps)
print(m.deferred_acceptance())
"""
"""
d_prefs = np.array([
[2, 0, 4, 3, 5, 1],
[0, 2, 3, 1, 4, 5],
[3, 4, 2, 0, 1, 5],
[2, 3, 0, 4, 5, 1],
[0, 3, 1, 5, 2, 4],
[3, 2, 1, 0, 4, 5],
[1, 4, 0, 2, 5, 3],
[0, 2, 1, 4, 3, 5],
[3, 0, 4, 5, 1, 2],
[2, 0, 4, 1, 3, 5],
[4, 3, 0, 2, 1, 5],
])
h_prefs = np.array([
[2, 6, 8, 10, 4, 3, 9, 7, 5, 0, 1, 11],
[4, 6, 9, 5, 7, 1, 2, 10, 11, 0, 3, 8],
[10, 5, 7, 2, 1, 3, 6, 0, 9, 11, 4, 8],
[9, 0, 1, 10, 3, 8, 4, 2, 5, 7, 11, 6],
[1, 3, 9, 6, 5, 0, 7, 2, 10, 8, 11, 4],
])
caps = [4, 1, 3, 2, 1]
m = ManyToOneMarket(d_prefs, h_prefs, caps)
print(m.deferred_acceptance())
"""
#"""
num_doctors, num_hospitals = 3000, 300
setup = ManyToOneMarket.create_setup(
num_doctors,
num_hospitals,
outside_score_doctor=0.99,
outside_score_hospital=0.0,
random_type="normal",
random_seed=None
)
m = ManyToOneMarket(setup["d_prefs"], setup["h_prefs"], setup["hospital_caps"])
num_simulation = 10
import datetime
start_time = datetime.datetime.now()
for i in range(num_simulation):
m.analyze_matching(m.deferred_acceptance())
print(datetime.datetime.now() - start_time)
start_time = datetime.datetime.now()
for i in range(num_simulation):
m.analyze_matching(m.deferred_acceptance(no_numba=True))
print(datetime.datetime.now() - start_time)
#"""
| [
"numpy.copy",
"numpy.random.default_rng",
"numpy.ones",
"MatchingMarkets.util.InvalidCapsError",
"MatchingMarkets.util.MaxHeap",
"MatchingMarkets.matching_alg.deferred_acceptance",
"MatchingMarkets.util.generate_prefs_from_random_scores",
"numpy.any",
"numpy.max",
"datetime.datetime.now",
"numpy... | [((23591, 23614), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (23612, 23614), False, 'import datetime\n'), ((23774, 23797), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (23795, 23797), False, 'import datetime\n'), ((2097, 2192), 'numpy.full', 'np.full', (['(self.num_doctors, self.num_hospitals + 1)', 'self.doctor_outside_option'], {'dtype': 'int'}), '((self.num_doctors, self.num_hospitals + 1), self.\n doctor_outside_option, dtype=int)\n', (2104, 2192), True, 'import numpy as np\n'), ((2269, 2366), 'numpy.full', 'np.full', (['(self.num_hospitals, self.num_doctors + 1)', 'self.hospital_outside_option'], {'dtype': 'int'}), '((self.num_hospitals, self.num_doctors + 1), self.\n hospital_outside_option, dtype=int)\n', (2276, 2366), True, 'import numpy as np\n'), ((5242, 5272), 'numpy.any', 'np.any', (['(self.hospital_caps < 0)'], {}), '(self.hospital_caps < 0)\n', (5248, 5272), True, 'import numpy as np\n'), ((5634, 5699), 'numpy.full', 'np.full', (['[num_people, num_objects + 1]', 'outside_option'], {'dtype': 'int'}), '([num_people, num_objects + 1], outside_option, dtype=int)\n', (5641, 5699), True, 'import numpy as np\n'), ((6189, 6228), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'random_seed'}), '(seed=random_seed)\n', (6210, 6228), True, 'import numpy as np\n'), ((8732, 8759), 'numpy.copy', 'np.copy', (['self.hospital_caps'], {}), '(self.hospital_caps)\n', (8739, 8759), True, 'import numpy as np\n'), ((8779, 8843), 'numpy.full', 'np.full', (['self.num_doctors', 'self.doctor_outside_option'], {'dtype': 'int'}), '(self.num_doctors, self.doctor_outside_option, dtype=int)\n', (8786, 8843), True, 'import numpy as np\n'), ((11602, 11629), 'numpy.copy', 'np.copy', (['self.hospital_caps'], {}), '(self.hospital_caps)\n', (11609, 11629), True, 'import numpy as np\n'), ((11649, 11713), 'numpy.full', 'np.full', (['self.num_doctors', 'self.doctor_outside_option'], {'dtype': 'int'}), '(self.num_doctors, self.doctor_outside_option, dtype=int)\n', (11656, 11713), True, 'import numpy as np\n'), ((12574, 12638), 'numpy.full', 'np.full', (['self.num_doctors', 'self.doctor_outside_option'], {'dtype': 'int'}), '(self.num_doctors, self.doctor_outside_option, dtype=int)\n', (12581, 12638), True, 'import numpy as np\n'), ((13033, 13070), 'numpy.zeros', 'np.zeros', (['self.num_doctors'], {'dtype': 'int'}), '(self.num_doctors, dtype=int)\n', (13041, 13070), True, 'import numpy as np\n'), ((15698, 15819), 'MatchingMarkets.matching_alg.deferred_acceptance', 'deferred_acceptance', (['self.num_doctors', 'self.num_hospitals', 'self.doctor_prefs', 'self.hospital_prefs', 'self.hospital_caps'], {}), '(self.num_doctors, self.num_hospitals, self.doctor_prefs,\n self.hospital_prefs, self.hospital_caps)\n', (15717, 15819), False, 'from MatchingMarkets.matching_alg import deferred_acceptance\n'), ((16261, 16298), 'numpy.empty', 'np.empty', (['self.num_doctors'], {'dtype': 'int'}), '(self.num_doctors, dtype=int)\n', (16269, 16298), True, 'import numpy as np\n'), ((16339, 16382), 'numpy.full', 'np.full', (['self.num_hospitals', 'unmatched_flag'], {}), '(self.num_hospitals, unmatched_flag)\n', (16346, 16382), True, 'import numpy as np\n'), ((18281, 18304), 'numpy.zeros_like', 'np.zeros_like', (['matching'], {}), '(matching)\n', (18294, 18304), True, 'import numpy as np\n'), ((1705, 1743), 'numpy.ones', 'np.ones', (['self.num_hospitals'], {'dtype': 'int'}), '(self.num_hospitals, dtype=int)\n', (1712, 1743), True, 'import numpy as np\n'), ((3349, 3387), 'MatchingMarkets.util.InvalidPrefsError', 'InvalidPrefsError', (['msg', '"""doctor_prefs"""'], {}), "(msg, 'doctor_prefs')\n", (3366, 3387), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((4353, 4393), 'MatchingMarkets.util.InvalidPrefsError', 'InvalidPrefsError', (['msg', '"""hospital_prefs"""'], {}), "(msg, 'hospital_prefs')\n", (4370, 4393), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((4690, 4729), 'numpy.array', 'np.array', (['self.hospital_caps'], {'dtype': 'int'}), '(self.hospital_caps, dtype=int)\n', (4698, 4729), True, 'import numpy as np\n'), ((5191, 5229), 'MatchingMarkets.util.InvalidCapsError', 'InvalidCapsError', (['msg', '"""hospital_caps"""'], {}), "(msg, 'hospital_caps')\n", (5207, 5229), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((5432, 5470), 'MatchingMarkets.util.InvalidCapsError', 'InvalidCapsError', (['msg', '"""hospital_caps"""'], {}), "(msg, 'hospital_caps')\n", (5448, 5470), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((6320, 6466), 'MatchingMarkets.util.generate_prefs_from_random_scores', 'generate_prefs_from_random_scores', (['num_doctors', 'num_hospitals'], {'outside_score': 'None', 'random_type': 'random_type', 'random_generator': 'random_generator'}), '(num_doctors, num_hospitals, outside_score\n =None, random_type=random_type, random_generator=random_generator)\n', (6353, 6466), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((6760, 6878), 'MatchingMarkets.util.generate_prefs_from_random_scores', 'generate_prefs_from_random_scores', (['num_doctors', 'num_hospitals', 'outside_score_doctor', 'random_type', 'random_generator'], {}), '(num_doctors, num_hospitals,\n outside_score_doctor, random_type, random_generator)\n', (6793, 6878), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((7046, 7192), 'MatchingMarkets.util.generate_prefs_from_random_scores', 'generate_prefs_from_random_scores', (['num_hospitals', 'num_doctors'], {'outside_score': 'None', 'random_type': 'random_type', 'random_generator': 'random_generator'}), '(num_hospitals, num_doctors, outside_score\n =None, random_type=random_type, random_generator=random_generator)\n', (7079, 7192), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((7488, 7608), 'MatchingMarkets.util.generate_prefs_from_random_scores', 'generate_prefs_from_random_scores', (['num_hospitals', 'num_doctors', 'outside_score_hospital', 'random_type', 'random_generator'], {}), '(num_hospitals, num_doctors,\n outside_score_hospital, random_type, random_generator)\n', (7521, 7608), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((13230, 13260), 'MatchingMarkets.util.MaxHeap', 'MaxHeap', (['self.hospital_caps[h]'], {}), '(self.hospital_caps[h])\n', (13237, 13260), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((20067, 20093), 'numpy.sum', 'np.sum', (['self.hospital_caps'], {}), '(self.hospital_caps)\n', (20073, 20093), True, 'import numpy as np\n'), ((23718, 23741), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (23739, 23741), False, 'import datetime\n'), ((23914, 23937), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (23935, 23937), False, 'import datetime\n'), ((2785, 2823), 'MatchingMarkets.util.InvalidPrefsError', 'InvalidPrefsError', (['msg', '"""doctor_prefs"""'], {}), "(msg, 'doctor_prefs')\n", (2802, 2823), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((3007, 3045), 'MatchingMarkets.util.InvalidPrefsError', 'InvalidPrefsError', (['msg', '"""doctor_prefs"""'], {}), "(msg, 'doctor_prefs')\n", (3024, 3045), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((3058, 3088), 'numpy.min', 'np.min', (['converted_doctor_prefs'], {}), '(converted_doctor_prefs)\n', (3064, 3088), True, 'import numpy as np\n'), ((3110, 3140), 'numpy.max', 'np.max', (['converted_doctor_prefs'], {}), '(converted_doctor_prefs)\n', (3116, 3140), True, 'import numpy as np\n'), ((3773, 3813), 'MatchingMarkets.util.InvalidPrefsError', 'InvalidPrefsError', (['msg', '"""hospital_prefs"""'], {}), "(msg, 'hospital_prefs')\n", (3790, 3813), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((4000, 4040), 'MatchingMarkets.util.InvalidPrefsError', 'InvalidPrefsError', (['msg', '"""hospital_prefs"""'], {}), "(msg, 'hospital_prefs')\n", (4017, 4040), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n'), ((4053, 4085), 'numpy.min', 'np.min', (['converted_hospital_prefs'], {}), '(converted_hospital_prefs)\n', (4059, 4085), True, 'import numpy as np\n'), ((4107, 4139), 'numpy.max', 'np.max', (['converted_hospital_prefs'], {}), '(converted_hospital_prefs)\n', (4113, 4139), True, 'import numpy as np\n'), ((4928, 4966), 'MatchingMarkets.util.InvalidCapsError', 'InvalidCapsError', (['msg', '"""hospital_caps"""'], {}), "(msg, 'hospital_caps')\n", (4944, 4966), False, 'from MatchingMarkets.util import InvalidPrefsError, InvalidCapsError, MaxHeap, generate_prefs_from_random_scores, generate_caps_given_sum, round_caps_to_meet_sum\n')] |
# License: BSD 3 clause
# -*- coding: utf8 -*-
import unittest
from tick.base.build.base import standard_normal_cdf, \
standard_normal_inv_cdf
from scipy.stats import norm
import numpy as np
from numpy.random import normal, uniform
class Test(unittest.TestCase):
def setUp(self):
self.size = 10
def test_standard_normal_cdf(self):
"""...Test normal cumulative distribution function
"""
tested_sample = normal(size=self.size)
actual = np.array([standard_normal_cdf(s) for s in tested_sample])
expected = norm.cdf(tested_sample)
np.testing.assert_almost_equal(actual, expected, decimal=7)
def test_standard_normal_inv_cdf(self):
"""...Test inverse of normal cumulative distribution function
"""
tested_sample = uniform(size=self.size)
actual = np.array([standard_normal_inv_cdf(s) for s in tested_sample])
expected = norm.ppf(tested_sample)
np.testing.assert_almost_equal(actual, expected, decimal=7)
actual_array = np.empty(self.size)
standard_normal_inv_cdf(tested_sample, actual_array)
np.testing.assert_almost_equal(actual_array, expected, decimal=7)
| [
"numpy.random.normal",
"tick.base.build.base.standard_normal_cdf",
"scipy.stats.norm.ppf",
"tick.base.build.base.standard_normal_inv_cdf",
"numpy.testing.assert_almost_equal",
"numpy.empty",
"numpy.random.uniform",
"scipy.stats.norm.cdf"
] | [((452, 474), 'numpy.random.normal', 'normal', ([], {'size': 'self.size'}), '(size=self.size)\n', (458, 474), False, 'from numpy.random import normal, uniform\n'), ((569, 592), 'scipy.stats.norm.cdf', 'norm.cdf', (['tested_sample'], {}), '(tested_sample)\n', (577, 592), False, 'from scipy.stats import norm\n'), ((602, 661), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['actual', 'expected'], {'decimal': '(7)'}), '(actual, expected, decimal=7)\n', (632, 661), True, 'import numpy as np\n'), ((813, 836), 'numpy.random.uniform', 'uniform', ([], {'size': 'self.size'}), '(size=self.size)\n', (820, 836), False, 'from numpy.random import normal, uniform\n'), ((935, 958), 'scipy.stats.norm.ppf', 'norm.ppf', (['tested_sample'], {}), '(tested_sample)\n', (943, 958), False, 'from scipy.stats import norm\n'), ((967, 1026), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['actual', 'expected'], {'decimal': '(7)'}), '(actual, expected, decimal=7)\n', (997, 1026), True, 'import numpy as np\n'), ((1051, 1070), 'numpy.empty', 'np.empty', (['self.size'], {}), '(self.size)\n', (1059, 1070), True, 'import numpy as np\n'), ((1079, 1131), 'tick.base.build.base.standard_normal_inv_cdf', 'standard_normal_inv_cdf', (['tested_sample', 'actual_array'], {}), '(tested_sample, actual_array)\n', (1102, 1131), False, 'from tick.base.build.base import standard_normal_cdf, standard_normal_inv_cdf\n'), ((1140, 1205), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['actual_array', 'expected'], {'decimal': '(7)'}), '(actual_array, expected, decimal=7)\n', (1170, 1205), True, 'import numpy as np\n'), ((502, 524), 'tick.base.build.base.standard_normal_cdf', 'standard_normal_cdf', (['s'], {}), '(s)\n', (521, 524), False, 'from tick.base.build.base import standard_normal_cdf, standard_normal_inv_cdf\n'), ((864, 890), 'tick.base.build.base.standard_normal_inv_cdf', 'standard_normal_inv_cdf', (['s'], {}), '(s)\n', (887, 890), False, 'from tick.base.build.base import standard_normal_cdf, standard_normal_inv_cdf\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Deals with kpoints.
"""
from typing import Union
import numpy as np
from twinpy.properties.hexagonal import check_hexagonal_lattice
from twinpy.structure.lattice import CrystalLattice
class Kpoints():
"""
This class deals with kpoints.
"""
def __init__(
self,
lattice:np.array,
):
"""
Args:
lattice: Lattice matrix.
"""
self._lattice = lattice
self._reciprocal_lattice = None
self._reciprocal_abc = None
self._reciprocal_volume = None
self._is_hexagonal = False
self._set_properties()
def _set_properties(self):
"""
Set properties.
"""
cry_lat = CrystalLattice(lattice=self._lattice)
self._reciprocal_lattice = cry_lat.reciprocal_lattice
recip_cry_lat = CrystalLattice(lattice=self._reciprocal_lattice)
self._reciprocal_abc = recip_cry_lat.abc
self._reciprocal_volume = recip_cry_lat.volume
try:
check_hexagonal_lattice(self._lattice)
self._is_hexagonal = True
except AssertionError:
pass
def get_mesh_from_interval(self,
interval:Union[float,np.array],
decimal_handling:str=None,
include_two_pi:bool=True) -> list:
"""
Get mesh from interval.
Args:
interval: Grid interval.
decimal_handling: Decimal handling. Available choise is 'floor',
'ceil' and 'round'. If 'decimal_handling' is not
'floor' and 'ceil', 'round' is set automatically.
include_two_pi: If True, include 2 * pi.
Returns:
list: Sampling mesh.
Note:
The basis norms of reciprocal lattice is divided by interval and
make float to int using the rule specified with 'decimal_handling'.
If the final mesh includes 0, fix 0 to 1.
"""
if isinstance(interval, np.ndarray):
assert interval.shape == (3,), \
"Shape of interval is {}, which must be (3,)".format(
interval.shape)
recip_abc = self._reciprocal_abc.copy()
if include_two_pi:
recip_abc *= 2 * np.pi
mesh_float = np.round(recip_abc / interval, decimals=5)
if decimal_handling == 'floor':
mesh = np.int64(np.floor(mesh_float))
elif decimal_handling == 'ceil':
mesh = np.int64(np.ceil(mesh_float))
else:
mesh = np.int64(np.round(mesh_float))
fixed_mesh = np.where(mesh==0, 1, mesh)
return fixed_mesh.tolist()
def get_intervals_from_mesh(self,
mesh:list,
include_two_pi:bool=True) -> np.array:
"""
Get intervals from mesh.
Args:
mesh: Sampling mesh.
include_two_pi: If True, include 2 * pi.
Returns:
np.array: Get intervals for each axis.
"""
recip_abc = self._reciprocal_abc.copy()
if include_two_pi:
recip_abc *= 2 * np.pi
intervals = recip_abc / np.array(mesh)
return intervals
def fix_mesh_based_on_symmetry(self, mesh:list) -> list:
"""
Fix mesh based on lattice symmetry.
Args:
mesh: Sampling mesh.
Returns:
list: Fixed sampling mesh.
Note:
Currenly, check only hexagonal lattice.
If crystal lattice is hexagonal,
mesh is fixed as: (odd odd even).
Else mesh: (even even even).
But '1' is kept fixed during this operation.
"""
if self._is_hexagonal:
condition = lambda x: int(x%2==0) # If True, get 1, else get 0.
arr = [ condition(m) for m in mesh[:2] ]
if (mesh[2]!=1 and mesh[2]%2==1):
arr.append(1)
else:
arr.append(0)
arr = np.array(arr)
else:
condition = lambda x: int(x%2==1)
arr = np.array([ condition(m) for m in mesh ])
fixed_mesh = np.array(mesh) + arr
return fixed_mesh.tolist()
def get_offset(self, use_gamma_center:bool=False, mesh:list=None) -> list:
"""
Get offset.
Args:
use_gamma_center: If use_gamma_center is True, return [0., 0., 0.].
mesh: Optional. If mesh is parsed,
set offset element 0 where mesh is 1.
Returns:
list: Offset from origin centered mesh grids.
"""
if use_gamma_center:
return [0., 0., 0.]
if self._is_hexagonal:
offset = [0., 0., 0.5]
else:
offset = [0.5, 0.5, 0.5]
if mesh is not None:
offset = np.array(offset)
offset[np.where(np.array(mesh)==1)] = 0
offset = offset.tolist()
return offset
def get_mesh_offset_auto(self,
interval:Union[float,np.array]=None,
mesh:list=None,
include_two_pi:bool=True,
decimal_handling:str='round',
use_symmetry:bool=True,
use_gamma_center:bool=False):
"""
Get mesh and offset.
Args:
interval: Grid interval.
mesh: Sampling mesh.
include_two_pi: If True, include 2 * pi.
decimal_handling: Decimal handling. Available choise is 'floor',
'ceil' and 'round'. If 'decimal_handling' is not
'floor' and 'ceil', 'round' is set automatically.
use_symmetry: When 'mesh' is None, 'use_symmetry' is called.
If True, run 'fix_mesh_based_on_symmetry'.
use_gamma_center: If use_gamma_center is True,
offset becomes [0., 0., 0.].
Raises:
ValueError: Both mesh and interval are not specified.
ValueError: Both mesh and interval are specified.
Returns:
tuple: (mesh, offset).
"""
if interval is None and mesh is None:
raise ValueError("both mesh and interval are not specified")
if interval is not None and mesh is not None:
raise ValueError("both mesh and interval are specified")
if mesh is None:
_mesh = self.get_mesh_from_interval(
interval=interval,
decimal_handling=decimal_handling,
include_two_pi=include_two_pi)
if use_symmetry:
_mesh = self.fix_mesh_based_on_symmetry(mesh=_mesh)
else:
_mesh = mesh
offset = self.get_offset(use_gamma_center=use_gamma_center,
mesh=_mesh)
return (_mesh, offset)
def get_dict(self,
interval:Union[float,np.array]=None,
mesh:list=None,
include_two_pi:bool=True,
decimal_handling:str='round',
use_symmetry:bool=True):
"""
Get dict including all properties and settings.
Args:
interval: Grid interval.
mesh: Sampling mesh.
include_two_pi: If True, include 2 * pi.
decimal_handling: Decimal handling. Available choise is 'floor',
'ceil' and 'round'. If 'decimal_handling' is not
'floor' and 'ceil', 'round' is set automatically.
use_symmetry: If True, run 'fix_mesh_based_on_symmetry'.
Raises:
ValueError: Both mesh and interval are not specified.
ValueError: Both mesh and interval are specified.
Returns:
dict: All properties and settings.
"""
mesh, offset = self.get_mesh_offset_auto(
interval=interval,
mesh=mesh,
include_two_pi=include_two_pi,
decimal_handling=decimal_handling,
use_symmetry=use_symmetry)
intervals = self.get_intervals_from_mesh(
mesh=mesh,
include_two_pi=include_two_pi,
)
recip_lattice = self._reciprocal_lattice.copy()
recip_abc = self._reciprocal_abc.copy()
recip_vol = self._reciprocal_volume
total_mesh = mesh[0] * mesh[1] * mesh[2]
if include_two_pi:
recip_lattice *= 2 * np.pi
recip_abc *= 2 * np.pi
recip_vol *= (2 * np.pi)**3
dic = {
'reciprocal_lattice': recip_lattice,
'reciprocal_abc': recip_abc,
'reciprocal_volume': recip_vol,
'total_mesh': total_mesh,
'mesh': mesh,
'offset': offset,
'input_interval': interval,
'intervals': intervals,
'include_two_pi': include_two_pi,
'decimal_handling': decimal_handling,
'use_symmetry': use_symmetry,
}
return dic
| [
"numpy.ceil",
"numpy.where",
"numpy.round",
"numpy.floor",
"numpy.array",
"twinpy.structure.lattice.CrystalLattice",
"twinpy.properties.hexagonal.check_hexagonal_lattice"
] | [((771, 808), 'twinpy.structure.lattice.CrystalLattice', 'CrystalLattice', ([], {'lattice': 'self._lattice'}), '(lattice=self._lattice)\n', (785, 808), False, 'from twinpy.structure.lattice import CrystalLattice\n'), ((895, 943), 'twinpy.structure.lattice.CrystalLattice', 'CrystalLattice', ([], {'lattice': 'self._reciprocal_lattice'}), '(lattice=self._reciprocal_lattice)\n', (909, 943), False, 'from twinpy.structure.lattice import CrystalLattice\n'), ((2444, 2486), 'numpy.round', 'np.round', (['(recip_abc / interval)'], {'decimals': '(5)'}), '(recip_abc / interval, decimals=5)\n', (2452, 2486), True, 'import numpy as np\n'), ((2753, 2781), 'numpy.where', 'np.where', (['(mesh == 0)', '(1)', 'mesh'], {}), '(mesh == 0, 1, mesh)\n', (2761, 2781), True, 'import numpy as np\n'), ((1073, 1111), 'twinpy.properties.hexagonal.check_hexagonal_lattice', 'check_hexagonal_lattice', (['self._lattice'], {}), '(self._lattice)\n', (1096, 1111), False, 'from twinpy.properties.hexagonal import check_hexagonal_lattice\n'), ((3339, 3353), 'numpy.array', 'np.array', (['mesh'], {}), '(mesh)\n', (3347, 3353), True, 'import numpy as np\n'), ((4174, 4187), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (4182, 4187), True, 'import numpy as np\n'), ((4328, 4342), 'numpy.array', 'np.array', (['mesh'], {}), '(mesh)\n', (4336, 4342), True, 'import numpy as np\n'), ((5023, 5039), 'numpy.array', 'np.array', (['offset'], {}), '(offset)\n', (5031, 5039), True, 'import numpy as np\n'), ((2555, 2575), 'numpy.floor', 'np.floor', (['mesh_float'], {}), '(mesh_float)\n', (2563, 2575), True, 'import numpy as np\n'), ((2646, 2665), 'numpy.ceil', 'np.ceil', (['mesh_float'], {}), '(mesh_float)\n', (2653, 2665), True, 'import numpy as np\n'), ((2709, 2729), 'numpy.round', 'np.round', (['mesh_float'], {}), '(mesh_float)\n', (2717, 2729), True, 'import numpy as np\n'), ((5068, 5082), 'numpy.array', 'np.array', (['mesh'], {}), '(mesh)\n', (5076, 5082), True, 'import numpy as np\n')] |
import numpy.testing as npt
import sys
from dipy.workflows.base import IntrospectiveArgumentParser
from dipy.workflows.flow_runner import run_flow
from dipy.workflows.tests.workflow_tests_utils import TestFlow, \
DummyCombinedWorkflow
def test_iap():
sys.argv = [sys.argv[0]]
pos_keys = ['positional_str', 'positional_bool', 'positional_int',
'positional_float']
opt_keys = ['optional_str', 'optional_bool', 'optional_int',
'optional_float']
pos_results = ['test', 0, 10, 10.2]
opt_results = ['opt_test', True, 20, 20.2]
inputs = inputs_from_results(opt_results, opt_keys, optional=True)
inputs.extend(inputs_from_results(pos_results))
sys.argv.extend(inputs)
parser = IntrospectiveArgumentParser()
dummy_flow = TestFlow()
parser.add_workflow(dummy_flow)
args = parser.get_flow_args()
all_keys = pos_keys + opt_keys
all_results = pos_results + opt_results
# Test if types and order are respected
for k, v in zip(all_keys, all_results):
npt.assert_equal(args[k], v)
# Test if **args really fits dummy_flow's arguments
return_values = dummy_flow.run(**args)
npt.assert_array_equal(return_values, all_results + [2.0])
def test_flow_runner():
old_argv = sys.argv
sys.argv = [sys.argv[0]]
opt_keys = ['param_combined', 'dwf1.param1', 'dwf2.param2', 'force', 'out_strat',
'mix_names']
pos_results = ['dipy.txt']
opt_results = [30, 10, 20, True, 'absolute', True]
inputs = inputs_from_results(opt_results, opt_keys, optional=True)
inputs.extend(inputs_from_results(pos_results))
sys.argv.extend(inputs)
dcwf = DummyCombinedWorkflow()
param1, param2, combined = run_flow(dcwf)
# generic flow params
assert dcwf._force_overwrite
assert dcwf._output_strategy == 'absolute'
assert dcwf._mix_names
# sub flow params
assert param1 == 10
assert param2 == 20
# parent flow param
assert combined == 30
sys.argv = old_argv
def inputs_from_results(results, keys=None, optional=False):
prefix = '--'
inputs = []
for idx, result in enumerate(results):
if keys is not None:
inputs.append(prefix+keys[idx])
if optional and str(result) in ['True', 'False']:
continue
inputs.append(str(result))
return inputs
if __name__ == '__main__':
test_iap()
test_flow_runner()
| [
"dipy.workflows.base.IntrospectiveArgumentParser",
"numpy.testing.assert_equal",
"dipy.workflows.tests.workflow_tests_utils.TestFlow",
"dipy.workflows.tests.workflow_tests_utils.DummyCombinedWorkflow",
"sys.argv.extend",
"dipy.workflows.flow_runner.run_flow",
"numpy.testing.assert_array_equal"
] | [((711, 734), 'sys.argv.extend', 'sys.argv.extend', (['inputs'], {}), '(inputs)\n', (726, 734), False, 'import sys\n'), ((748, 777), 'dipy.workflows.base.IntrospectiveArgumentParser', 'IntrospectiveArgumentParser', ([], {}), '()\n', (775, 777), False, 'from dipy.workflows.base import IntrospectiveArgumentParser\n'), ((795, 805), 'dipy.workflows.tests.workflow_tests_utils.TestFlow', 'TestFlow', ([], {}), '()\n', (803, 805), False, 'from dipy.workflows.tests.workflow_tests_utils import TestFlow, DummyCombinedWorkflow\n'), ((1185, 1243), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['return_values', '(all_results + [2.0])'], {}), '(return_values, all_results + [2.0])\n', (1207, 1243), True, 'import numpy.testing as npt\n'), ((1655, 1678), 'sys.argv.extend', 'sys.argv.extend', (['inputs'], {}), '(inputs)\n', (1670, 1678), False, 'import sys\n'), ((1691, 1714), 'dipy.workflows.tests.workflow_tests_utils.DummyCombinedWorkflow', 'DummyCombinedWorkflow', ([], {}), '()\n', (1712, 1714), False, 'from dipy.workflows.tests.workflow_tests_utils import TestFlow, DummyCombinedWorkflow\n'), ((1746, 1760), 'dipy.workflows.flow_runner.run_flow', 'run_flow', (['dcwf'], {}), '(dcwf)\n', (1754, 1760), False, 'from dipy.workflows.flow_runner import run_flow\n'), ((1052, 1080), 'numpy.testing.assert_equal', 'npt.assert_equal', (['args[k]', 'v'], {}), '(args[k], v)\n', (1068, 1080), True, 'import numpy.testing as npt\n')] |
from skimage import io
import pyopencl as cl
import numpy as np
import sys
# VISIONGL IMPORTS
from vglShape import *
from vglStrEl import *
import vglConst as vc
"""
img:
is the input image
cl_shape:
3D Images:
The OpenCL's default is to be (img_width, img_height, img_depht)
2D Images:
The The OpenCL's default is to be (img_width, img_height)
cl_pitch:
3D Images (needed):
The OpenCL's default is to be (img_width*bytes_per_pixel, img_height*img_width*bytes_per_pixel)
and it is assumed when pitch=(0, 0) is given
2D Images (optional):
The OpenCL's default is to be (img_width*bytes_per_pixel)
and it is assumed when pitch=(0) is given
cl_origin
Is the origin of the image, where to start copying the image.
2D images must have 0 in the z-axis
cl_region
Is where to end the copying of the image.
2D images must have 1 in the z-axis
"""
class VglImage(object):
def __init__(self, imgPath, imgDim=vc.VGL_IMAGE_2D_IMAGE()):
# IF THE IMAGE TYPE IS NOT SPECIFIED, A 2D IMAGE WILL BE ASSUMED
# INICIALIZING DATA
self.imgDim = imgDim
self.img_host = None
self.img_device = None
self.img_sync = False
self.last_changed_host = False
self.last_changed_device = False
if(self.imgDim == vc.VGL_IMAGE_2D_IMAGE()):
print("Creating 2D Image!")
elif(self.imgDim == vc.VGL_IMAGE_3D_IMAGE()):
print("Creating 3D Image!")
# OPENING IMAGE
self.set_image_host(imgPath)
def create_vglShape(self):
if(self.img_host is not None):
print("The image was founded. Creating vglShape.")
self.vglshape = VglShape()
if( self.imgDim == vc.VGL_IMAGE_2D_IMAGE() ):
print("2D Image")
if( len(self.img_host.shape) == 2 ):
# SHADES OF GRAY IMAGE
print("VglImage LUMINANCE")
self.vglshape.constructor2DShape(1, self.img_host.shape[1], self.img_host.shape[0])
elif(len(self.img_host.shape) == 3):
# MORE THAN ONE COLOR CHANNEL
print("VglImage RGB")
self.vglshape.constructor2DShape(self.img_host.shape[2], self.img_host.shape[1], self.img_host.shape[0])
elif( self.imgDim == vc.VGL_IMAGE_3D_IMAGE() ):
print("3D Image")
if( len(self.img_host.shape) == 3 ):
# SHADES OF GRAY IMAGE
print("VglImage LUMINANCE")
self.vglshape.constructor3DShape( 1, self.img_host.shape[2], self.img_host.shape[1], self.img_host.shape[0] )
elif(len(self.img_host.shape) == 4):
# MORE THAN ONE COLOR CHANNEL
print("VglImage RGB")
self.vglshape.constructor3DShape( self.img_host.shape[3], self.img_host.shape[2], self.img_host.shape[1], self.img_host.shape[0] )
self.img_sync = False
self.last_changed_host = True
self.last_changed_device = False
else:
print("Impossible to create a vglImage object. host_image is None.")
def set_image_host(self, imgPath):
try:
self.img_host = io.imread(imgPath)
except FileNotFoundError as fnf:
print("Image wasn't found. ")
print(str(fnf))
except Exception as e:
print("Unrecognized error:")
print(str(e))
self.img_sync = False
self.last_changed_host = True
self.last_changed_device = False
self.create_vglShape()
def rgb_to_rgba(self):
print("[RGB -> RGBA]")
img_host_rgba = np.empty((self.vglshape.getHeight(), self.vglshape.getWidth(), 4), self.img_host.dtype)
img_host_rgba[:,:,0] = self.img_host[:,:,0]
img_host_rgba[:,:,1] = self.img_host[:,:,1]
img_host_rgba[:,:,2] = self.img_host[:,:,2]
img_host_rgba[:,:,3] = 255
self.img_host = img_host_rgba
self.create_vglShape()
def rgba_to_rgb(self):
print("[RGBA -> RGB]")
if( (self.img_host[0,0,:].size < 4) | (self.img_host[0,0,:].size > 4) ):
print("IMAGE IS NOT RGBA")
else:
img_host_rgb = np.empty((self.vglshape.getHeight(), self.vglshape.getWidth(), 3), self.img_host.dtype)
img_host_rgb[:,:,0] = self.img_host[:,:,0]
img_host_rgb[:,:,1] = self.img_host[:,:,1]
img_host_rgb[:,:,2] = self.img_host[:,:,2]
self.img_host = img_host_rgb
self.create_vglShape()
def vglImageUpload(self, ctx, queue):
# IMAGE VARS
print("Uploading image to device.")
if( self.getVglShape().getNFrames() == 1 ):
origin = ( 0, 0, 0 )
region = ( self.getVglShape().getWidth(), self.getVglShape().getHeight(), 1 )
shape = ( self.getVglShape().getWidth(), self.getVglShape().getHeight() )
mf = cl.mem_flags
imgFormat = cl.ImageFormat(self.get_toDevice_channel_order(), self.get_toDevice_dtype())
self.img_device = cl.Image(ctx, mf.READ_ONLY, imgFormat, shape)
elif( self.getVglShape().getNFrames() > 1 ):
origin = ( 0, 0, 0 )
region = ( self.getVglShape().getWidth(), self.getVglShape().getHeight(), self.getVglShape().getNFrames() )
shape = ( self.getVglShape().getWidth(), self.getVglShape().getHeight(), self.getVglShape().getNFrames() )
mf = cl.mem_flags
imgFormat = cl.ImageFormat(self.get_toDevice_channel_order(), self.get_toDevice_dtype())
self.img_device = cl.Image(ctx, mf.READ_ONLY, imgFormat, shape)
else:
print("VglImage NFrames wrong. NFrames returns:", self.getVglShape().getNFrames() )
return
# COPYING NDARRAY IMAGE TO OPENCL IMAGE OBJECT
cl.enqueue_copy(queue, self.img_device, self.img_host, origin=origin, region=region, is_blocking=True)
self.img_sync = False
self.last_changed_host = False
self.last_changed_device = True
def vglImageDownload(self, ctx, queue):
# MAKE IMAGE DOWNLOAD HERE
print("Downloading Image from device.")
if( self.getVglShape().getNFrames() == 1 ):
origin = ( 0, 0, 0 )
region = ( self.getVglShape().getWidth(), self.getVglShape().getHeight(), 1 )
totalSize = self.getVglShape().getHeight() * self.getVglShape().getWidth() * self.getVglShape().getNChannels()
buffer = np.zeros(totalSize, self.img_host.dtype)
cl.enqueue_copy(queue, buffer, self.img_device, origin=origin, region=region, is_blocking=True)
if( self.getVglShape().getNChannels() == 1 ):
buffer = np.frombuffer( buffer, self.img_host.dtype ).reshape( self.getVglShape().getHeight(), self.getVglShape().getWidth() )
elif( (self.getVglShape().getNChannels() == 3) or (self.getVglShape().getNChannels() == 4) ):
buffer = np.frombuffer( buffer, self.img_host.dtype ).reshape( self.getVglShape().getHeight(), self.getVglShape().getWidth(), self.getVglShape().getNChannels() )
elif( self.getVglShape().getNFrames() > 1 ):
pitch = (0, 0)
origin = ( 0, 0, 0 )
region = ( self.getVglShape().getWidth(), self.getVglShape().getHeight(), self.getVglShape().getNFrames() )
totalSize = self.getVglShape().getHeight() * self.getVglShape().getWidth() * self.getVglShape().getNFrames()
buffer = np.zeros(totalSize, self.img_host.dtype)
cl.enqueue_copy(queue, buffer, self.img_device, origin=origin, region=region, is_blocking=True)
if( self.getVglShape().getNChannels() == 1 ):
buffer = np.frombuffer( buffer, self.img_host.dtype ).reshape( self.getVglShape().getNFrames(), self.getVglShape().getHeight(), self.getVglShape().getWidth() )
elif( (self.getVglShape().getNChannels() == 3) or (self.getVglShape().getNChannels() == 4) ):
buffer = np.frombuffer( buffer, self.img_host.dtype ).reshape( self.getVglShape().getNFrames(), self.getVglShape().getHeight(), self.getVglShape().getWidth(), self.getVglShape().getNChannels() )
self.img_host = buffer
self.create_vglShape()
self.img_sync = False
self.last_changed_device = False
self.last_changed_host = True
def sync(self, ctx, queue):
if( not self.img_sync ):
if( self.last_changed_device ):
self.vglImageDownload(ctx, queue)
elif( self.last_changed_host ):
self.vglImageUpload(ctx, queue)
else:
print("Already synced")
def img_save(self, name):
print("Saving Picture in Hard Drive")
io.imsave(name, self.img_host)
def get_similar_device_image_object(self, ctx, queue):
if(self.imgDim == vc.VGL_IMAGE_2D_IMAGE()):
shape = ( self.vglshape.getWidth(), self.vglshape.getHeight() )
mf = cl.mem_flags
imgFormat = cl.ImageFormat(self.get_toDevice_channel_order(), self.get_toDevice_dtype())
img_copy = cl.Image(ctx, mf.WRITE_ONLY, imgFormat, shape)
elif(self.imgDim == vc.VGL_IMAGE_3D_IMAGE()):
shape = ( self.vglshape.getWidth(), self.vglshape.getHeight(), self.vglshape.getNFrames() )
mf = cl.mem_flags
imgFormat = cl.ImageFormat(self.get_toDevice_channel_order(), self.get_toDevice_dtype())
img_copy = cl.Image(ctx, mf.WRITE_ONLY, imgFormat, shape)
print("--> Orig:", self.get_device_image().width, self.get_device_image().height, self.get_device_image().depth)
print("--> Copy:", img_copy.width, img_copy.height, img_copy.depth)
return img_copy
def set_device_image(self, img):
if( isinstance(img, cl.Image) ):
self.img_device = img
self.img_sync = False
self.last_changed_device = True
self.last_changed_host = False
else:
print("Invalid object. cl.Image objects only.")
def getVglShape(self):
return self.vglshape
def get_device_image(self):
return self.img_device
def get_host_image(self):
return self.img_host
def get_toDevice_dtype(self):
img_device_dtype = None
if( self.img_host.dtype == np.uint8 ):
img_device_dtype = cl.channel_type.UNORM_INT8
print("8bit Channel Size!")
elif( self.img_host.dtype == np.uint16 ):
img_device_dtype = cl.channel_type.UNORM_INT16
print("16bit Channel Size!")
return img_device_dtype
def get_toDevice_channel_order(self):
img_device_channel_order = None
if( self.getVglShape().getNChannels() == 1 ):
img_device_channel_order = cl.channel_order.LUMINANCE
elif( self.getVglShape().getNChannels() == 2 ):
img_device_channel_order = cl.channel_order.RG
elif( self.getVglShape().getNChannels() == 3 ):
img_device_channel_order = cl.channel_order.RGB
elif( self.getVglShape().getNChannels() == 4 ):
img_device_channel_order = cl.channel_order.RGBA
return img_device_channel_order
| [
"pyopencl.enqueue_copy",
"vglConst.VGL_IMAGE_2D_IMAGE",
"skimage.io.imread",
"numpy.zeros",
"vglConst.VGL_IMAGE_3D_IMAGE",
"skimage.io.imsave",
"numpy.frombuffer",
"pyopencl.Image"
] | [((943, 966), 'vglConst.VGL_IMAGE_2D_IMAGE', 'vc.VGL_IMAGE_2D_IMAGE', ([], {}), '()\n', (964, 966), True, 'import vglConst as vc\n'), ((5103, 5209), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['queue', 'self.img_device', 'self.img_host'], {'origin': 'origin', 'region': 'region', 'is_blocking': '(True)'}), '(queue, self.img_device, self.img_host, origin=origin,\n region=region, is_blocking=True)\n', (5118, 5209), True, 'import pyopencl as cl\n'), ((7699, 7729), 'skimage.io.imsave', 'io.imsave', (['name', 'self.img_host'], {}), '(name, self.img_host)\n', (7708, 7729), False, 'from skimage import io\n'), ((1243, 1266), 'vglConst.VGL_IMAGE_2D_IMAGE', 'vc.VGL_IMAGE_2D_IMAGE', ([], {}), '()\n', (1264, 1266), True, 'import vglConst as vc\n'), ((2820, 2838), 'skimage.io.imread', 'io.imread', (['imgPath'], {}), '(imgPath)\n', (2829, 2838), False, 'from skimage import io\n'), ((4427, 4472), 'pyopencl.Image', 'cl.Image', (['ctx', 'mf.READ_ONLY', 'imgFormat', 'shape'], {}), '(ctx, mf.READ_ONLY, imgFormat, shape)\n', (4435, 4472), True, 'import pyopencl as cl\n'), ((5690, 5730), 'numpy.zeros', 'np.zeros', (['totalSize', 'self.img_host.dtype'], {}), '(totalSize, self.img_host.dtype)\n', (5698, 5730), True, 'import numpy as np\n'), ((5734, 5834), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['queue', 'buffer', 'self.img_device'], {'origin': 'origin', 'region': 'region', 'is_blocking': '(True)'}), '(queue, buffer, self.img_device, origin=origin, region=\n region, is_blocking=True)\n', (5749, 5834), True, 'import pyopencl as cl\n'), ((7809, 7832), 'vglConst.VGL_IMAGE_2D_IMAGE', 'vc.VGL_IMAGE_2D_IMAGE', ([], {}), '()\n', (7830, 7832), True, 'import vglConst as vc\n'), ((8030, 8076), 'pyopencl.Image', 'cl.Image', (['ctx', 'mf.WRITE_ONLY', 'imgFormat', 'shape'], {}), '(ctx, mf.WRITE_ONLY, imgFormat, shape)\n', (8038, 8076), True, 'import pyopencl as cl\n'), ((1322, 1345), 'vglConst.VGL_IMAGE_3D_IMAGE', 'vc.VGL_IMAGE_3D_IMAGE', ([], {}), '()\n', (1343, 1345), True, 'import vglConst as vc\n'), ((1598, 1621), 'vglConst.VGL_IMAGE_2D_IMAGE', 'vc.VGL_IMAGE_2D_IMAGE', ([], {}), '()\n', (1619, 1621), True, 'import vglConst as vc\n'), ((4900, 4945), 'pyopencl.Image', 'cl.Image', (['ctx', 'mf.READ_ONLY', 'imgFormat', 'shape'], {}), '(ctx, mf.READ_ONLY, imgFormat, shape)\n', (4908, 4945), True, 'import pyopencl as cl\n'), ((6599, 6639), 'numpy.zeros', 'np.zeros', (['totalSize', 'self.img_host.dtype'], {}), '(totalSize, self.img_host.dtype)\n', (6607, 6639), True, 'import numpy as np\n'), ((6643, 6743), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['queue', 'buffer', 'self.img_device'], {'origin': 'origin', 'region': 'region', 'is_blocking': '(True)'}), '(queue, buffer, self.img_device, origin=origin, region=\n region, is_blocking=True)\n', (6658, 6743), True, 'import pyopencl as cl\n'), ((8099, 8122), 'vglConst.VGL_IMAGE_3D_IMAGE', 'vc.VGL_IMAGE_3D_IMAGE', ([], {}), '()\n', (8120, 8122), True, 'import vglConst as vc\n'), ((8348, 8394), 'pyopencl.Image', 'cl.Image', (['ctx', 'mf.WRITE_ONLY', 'imgFormat', 'shape'], {}), '(ctx, mf.WRITE_ONLY, imgFormat, shape)\n', (8356, 8394), True, 'import pyopencl as cl\n'), ((2075, 2098), 'vglConst.VGL_IMAGE_3D_IMAGE', 'vc.VGL_IMAGE_3D_IMAGE', ([], {}), '()\n', (2096, 2098), True, 'import vglConst as vc\n'), ((5893, 5935), 'numpy.frombuffer', 'np.frombuffer', (['buffer', 'self.img_host.dtype'], {}), '(buffer, self.img_host.dtype)\n', (5906, 5935), True, 'import numpy as np\n'), ((6121, 6163), 'numpy.frombuffer', 'np.frombuffer', (['buffer', 'self.img_host.dtype'], {}), '(buffer, self.img_host.dtype)\n', (6134, 6163), True, 'import numpy as np\n'), ((6803, 6845), 'numpy.frombuffer', 'np.frombuffer', (['buffer', 'self.img_host.dtype'], {}), '(buffer, self.img_host.dtype)\n', (6816, 6845), True, 'import numpy as np\n'), ((7064, 7106), 'numpy.frombuffer', 'np.frombuffer', (['buffer', 'self.img_host.dtype'], {}), '(buffer, self.img_host.dtype)\n', (7077, 7106), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Yizhong
# created_at: 10/27/2016 下午8:34
import numpy
class Performance(object):
def __init__(self, percision, recall, hit_num):
self.percision = percision
self.recall = recall
self.hit_num = hit_num
class Metrics(object):
def __init__(self, levels=['span', 'nuclearity', 'relation']):
""" Initialization
:type levels: list of string
:param levels: eval levels, the possible values are only
'span','nuclearity','relation'
"""
self.levels = levels
self.span_perf = Performance([], [], 0)
self.nuc_perf = Performance([], [], 0)
self.rela_perf = Performance([], [], 0)
self.span_num = 0
self.hit_num_each_relation = {}
self.pred_num_each_relation = {}
self.gold_num_each_relation = {}
def eval(self, goldtree, predtree):
""" Evaluation performance on one pair of RST trees
:type goldtree: RSTTree class
:param goldtree: gold RST tree
:type predtree: RSTTree class
:param predtree: RST tree from the parsing algorithm
"""
goldbrackets = goldtree.bracketing()
predbrackets = predtree.bracketing()
self.span_num += len(goldbrackets)
for level in self.levels:
if level == 'span':
self._eval(goldbrackets, predbrackets, idx=1)
elif level == 'nuclearity':
self._eval(goldbrackets, predbrackets, idx=2)
elif level == 'relation':
self._eval(goldbrackets, predbrackets, idx=3)
else:
raise ValueError("Unrecognized eval level: {}".format(level))
def _eval(self, goldbrackets, predbrackets, idx):
""" Evaluation on each discourse span
"""
# goldspan = [item[:idx] for item in goldbrackets]
# predspan = [item[:idx] for item in predbrackets]
if idx == 1 or idx == 2:
goldspan = [item[:idx] for item in goldbrackets]
predspan = [item[:idx] for item in predbrackets]
elif idx == 3:
goldspan = [(item[0], item[2]) for item in goldbrackets]
predspan = [(item[0], item[2]) for item in predbrackets]
else:
raise ValueError('Undefined idx for evaluation')
hitspan = [span for span in goldspan if span in predspan]
p, r = 0.0, 0.0
for span in hitspan:
if span in goldspan:
p += 1.0
if span in predspan:
r += 1.0
if idx == 1:
self.span_perf.hit_num += p
elif idx == 2:
self.nuc_perf.hit_num += p
elif idx == 3:
self.rela_perf.hit_num += p
p /= len(goldspan)
r /= len(predspan)
if idx == 1:
self.span_perf.percision.append(p)
self.span_perf.recall.append(r)
elif idx == 2:
self.nuc_perf.percision.append(p)
self.nuc_perf.recall.append(r)
elif idx == 3:
self.rela_perf.percision.append(p)
self.rela_perf.recall.append(r)
if idx == 3:
for span in hitspan:
relation = span[-1]
if relation in self.hit_num_each_relation:
self.hit_num_each_relation[relation] += 1
else:
self.hit_num_each_relation[relation] = 1
for span in goldspan:
relation = span[-1]
if relation in self.gold_num_each_relation:
self.gold_num_each_relation[relation] += 1
else:
self.gold_num_each_relation[relation] = 1
for span in predspan:
relation = span[-1]
if relation in self.pred_num_each_relation:
self.pred_num_each_relation[relation] += 1
else:
self.pred_num_each_relation[relation] = 1
def report(self):
""" Compute the F1 score for different eval levels
and print it out
"""
for level in self.levels:
if 'span' == level:
p = numpy.array(self.span_perf.percision).mean()
r = numpy.array(self.span_perf.recall).mean()
f1 = (2 * p * r) / (p + r)
print('Average precision on span level is {0:.4f}'.format(p))
# print('Recall on span level is {0:.4f}'.format(r))
# print('F1 score on span level is {0:.4f}'.format(f1))
print('Global precision on span level is {0:.4f}'.format(self.span_perf.hit_num / self.span_num))
elif 'nuclearity' == level:
p = numpy.array(self.nuc_perf.percision).mean()
r = numpy.array(self.nuc_perf.recall).mean()
f1 = (2 * p * r) / (p + r)
print('Average precision on nuclearity level is {0:.4f}'.format(p))
# print('Recall on nuclearity level is {0:.4f}'.format(r))
# print('F1 score on nuclearity level is {0:.4f}'.format(f1))
print('Global precision on nuclearity level is {0:.4f}'.format(self.nuc_perf.hit_num / self.span_num))
elif 'relation' == level:
p = numpy.array(self.rela_perf.percision).mean()
r = numpy.array(self.rela_perf.recall).mean()
f1 = (2 * p * r) / (p + r)
print('Average precision on relation level is {0:.4f}'.format(p))
# print('Recall on relation level is {0:.4f}'.format(r))
# print('F1 score on relation level is {0:.4f}'.format(f1))
print('Global precision on relation level is {0:.4f}'.format(self.rela_perf.hit_num / self.span_num))
else:
raise ValueError("Unrecognized eval level")
# sorted_relations = sorted(self.gold_num_each_relation.keys(), key=lambda x: self.gold_num_each_relation[x])
sorted_relations = sorted(self.gold_num_each_relation.keys())
for relation in sorted_relations:
hit_num = self.hit_num_each_relation[relation] if relation in self.hit_num_each_relation else 0
gold_num = self.gold_num_each_relation[relation]
pred_num = self.pred_num_each_relation[relation] if relation in self.pred_num_each_relation else 0
precision = hit_num / pred_num if pred_num > 0 else 0
recall = hit_num / gold_num
try:
f1 = 2 * precision * recall / (precision + recall)
except ZeroDivisionError:
f1 = 0
print(
'Relation\t{:20}\tgold_num\t{:4d}\tprecision\t{:05.4f}\trecall\t{:05.4f}\tf1\t{:05.4f}'.format(relation,
gold_num,
precision,
recall,
f1))
| [
"numpy.array"
] | [((4236, 4273), 'numpy.array', 'numpy.array', (['self.span_perf.percision'], {}), '(self.span_perf.percision)\n', (4247, 4273), False, 'import numpy\n'), ((4301, 4335), 'numpy.array', 'numpy.array', (['self.span_perf.recall'], {}), '(self.span_perf.recall)\n', (4312, 4335), False, 'import numpy\n'), ((4779, 4815), 'numpy.array', 'numpy.array', (['self.nuc_perf.percision'], {}), '(self.nuc_perf.percision)\n', (4790, 4815), False, 'import numpy\n'), ((4843, 4876), 'numpy.array', 'numpy.array', (['self.nuc_perf.recall'], {}), '(self.nuc_perf.recall)\n', (4854, 4876), False, 'import numpy\n'), ((5341, 5378), 'numpy.array', 'numpy.array', (['self.rela_perf.percision'], {}), '(self.rela_perf.percision)\n', (5352, 5378), False, 'import numpy\n'), ((5406, 5440), 'numpy.array', 'numpy.array', (['self.rela_perf.recall'], {}), '(self.rela_perf.recall)\n', (5417, 5440), False, 'import numpy\n')] |
import tensorflow as tf
import numpy as np
import random
class GaussianNoise():
def __init__(self,action_dimension,epsilon_init = 0.7, epsilon_end = 0.3,mu=0, theta =0.15, sigma = 0.25):
self.action_dimension = action_dimension
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.epsilon_decay = 0.9995
self.epsilon = epsilon_init
self.epsilon_end = epsilon_end
self.decay = (epsilon_init-epsilon_end)/10000.
def reset(self):
self.epsilon = np.maximum(self.epsilon - self.decay, self.epsilon_end)
def noise(self,step):
self.is_noise = (np.random.uniform() <self.epsilon)
noise = np.random.normal(size= [1,self.action_dimension])* self.sigma * self.is_noise
return noise
class Actor_USL():
def __init__(self, action_size, scope = 'DDPG_Actor'):
self.output_size = action_size
self.scope = scope
def forward(self,state):
with tf.variable_scope(self.scope, reuse = tf.AUTO_REUSE):
self.state = state
self.fcn1 = tf.contrib.layers.fully_connected(self.state, 2048, activation_fn = tf.nn.leaky_relu)
self.fcn2= tf.contrib.layers.fully_connected(self.fcn1, 2048, activation_fn = tf.nn.leaky_relu)
self.fcn3= tf.contrib.layers.fully_connected(self.fcn2, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn4= tf.contrib.layers.fully_connected(self.fcn3, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn5= tf.contrib.layers.fully_connected(self.fcn4, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn6= tf.contrib.layers.fully_connected(self.fcn5, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn7= tf.contrib.layers.fully_connected(self.fcn6, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn8= tf.contrib.layers.fully_connected(self.fcn7, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn9= tf.contrib.layers.fully_connected(self.fcn8, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn10= tf.contrib.layers.fully_connected(self.fcn9, 512, activation_fn =tf.nn.leaky_relu)
self.fcn11= tf.contrib.layers.fully_connected(self.fcn10, 256, activation_fn =tf.nn.leaky_relu)
self.fcn12= tf.contrib.layers.fully_connected(self.fcn11, 128, activation_fn =tf.nn.leaky_relu)
self.action = 1+tf.nn.elu(tf.contrib.layers.fully_connected(self.fcn12,self.output_size, activation_fn = None))
return self.action
class Actor():
def __init__(self, action_size, thr, scope = 'DDPG_Actor', is_tanh = True):
self.output_size = action_size
self.scope = scope
self.is_tanh = is_tanh
self.thr = thr
def forward(self,state):
with tf.variable_scope(self.scope, reuse = tf.AUTO_REUSE):
## Actor
state_part1 = state[:,0:self.thr]
state_part2 = state[:, self.thr::]
state_part1_ = tf.contrib.layers.fully_connected(state_part1, 512, activation_fn = tf.nn.leaky_relu)
state_part2_ = tf.contrib.layers.fully_connected(state_part2, 512, activation_fn = tf.nn.leaky_relu)
state_post = tf.concat([state_part1_, state_part2_],axis=1)
self.fcn1 = tf.contrib.layers.fully_connected(state_post, 2048, activation_fn = tf.nn.leaky_relu)
self.fcn2= tf.contrib.layers.fully_connected(self.fcn1, 2048, activation_fn = tf.nn.leaky_relu)
self.fcn3= tf.contrib.layers.fully_connected(self.fcn2, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn4= tf.contrib.layers.fully_connected(self.fcn3, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn5= tf.contrib.layers.fully_connected(self.fcn4, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn6= tf.contrib.layers.fully_connected(self.fcn5, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn7= tf.contrib.layers.fully_connected(self.fcn6, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn8= tf.contrib.layers.fully_connected(self.fcn7, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn9= tf.contrib.layers.fully_connected(self.fcn8, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn10= tf.contrib.layers.fully_connected(self.fcn9, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn11= tf.contrib.layers.fully_connected(self.fcn10, 512, activation_fn =tf.nn.leaky_relu)
self.fcn12= tf.contrib.layers.fully_connected(self.fcn11, 512, activation_fn =tf.nn.leaky_relu)
self.action = tf.tanh(tf.contrib.layers.fully_connected(self.fcn12,self.output_size, activation_fn = None))
return self.action
class Critic():
def __init__(self, reward_size, BS, UE, scope = 'DDPG_Critic'):
self.scope = scope
self.reward_size = reward_size
self.BS = BS
self.UE = UE
self.renew = (np.arange(self.BS) != self.BS-1).astype(int) #np.array([1,1,1,0])
def state_action_to_PCstate(self, state, action):
P = tf.reshape((self.renew * (0.01 + 0.69 * (action+1)/2) + (1-self.renew) * (0.01 + 0.99 * (action+1)/2)), [-1, 1, self.BS])
SNR_p = 2000*tf.reshape(state[:,0:self.UE*self.BS],[-1, self.UE,self.BS]) * P
SINR = SNR_p/ ( 1+ tf.reduce_sum(SNR_p,axis=2,keepdims=True)- SNR_p)
Rate = tf.log(1+SINR)/tf.log(2.0)*18 + 0.001
QoS = tf.reshape(state[:,self.UE*self.BS:self.UE*self.BS + self.UE ], [-1, self.UE, 1])
Avail_energy = state[:,self.UE*self.BS + self.UE : self.UE*self.BS + self.UE + self.BS]
grid_power= state[:, self.UE*self.BS + self.UE + 2 * self.BS : self.UE*self.BS + self.UE + 3 *self.BS]
RES= state[:, self.UE*self.BS + self.UE + 3 * self.BS : self.UE*self.BS + self.UE + 4 *self.BS]
Backhaul = state[:, self.UE*self.BS + self.UE + 4 * self.BS : self.UE*self.BS + self.UE + 5 *self.BS]
state_1 = tf.reshape(-tf.log(QoS/Rate), [-1, self.BS * self.UE]) # QoS-Rate Ratio [-1, BS*UE]
state_2 = tf.reshape( -tf.log(QoS / 10 /tf.reshape(Backhaul,[-1, 1, self.BS])), [-1, self.BS * self.UE]) # QoS-Bh Ratio [-1, BS * UE]
state_3 = -tf.log(self.renew * Avail_energy * tf.reshape(1-P, [-1,self.BS]) +RES + grid_power) # Remaining energy [-1, BS]
state_4 = tf.reduce_max(Rate, axis=1)/100.0 # Max_Rate [-1,BS]
state_5 = RES + 0.0 # RES [-1, BS]
return tf.concat([state_1, state_2],axis=1), tf.concat([state_3, state_4, state_5], axis=1)
def forward(self,state, action):
with tf.variable_scope(self.scope, reuse = tf.AUTO_REUSE):
state_part1, state_part2 = self.state_action_to_PCstate(state, action)
state_part1_ = tf.contrib.layers.fully_connected(state_part1, 512, activation_fn = tf.nn.leaky_relu)
state_part2_ = tf.contrib.layers.fully_connected(state_part2, 512, activation_fn = tf.nn.leaky_relu)
state_post = tf.concat([state_part1_, state_part2_],axis=1)
self.fcn1 = tf.contrib.layers.fully_connected(state_post, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn2= tf.contrib.layers.fully_connected(self.fcn1, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn3= tf.contrib.layers.fully_connected(self.fcn2, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn4= tf.contrib.layers.fully_connected(self.fcn3, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn5= tf.contrib.layers.fully_connected(self.fcn4, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn6= tf.contrib.layers.fully_connected(self.fcn5, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn7= tf.contrib.layers.fully_connected(self.fcn6, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn8= tf.contrib.layers.fully_connected(self.fcn7, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn9= tf.contrib.layers.fully_connected(self.fcn8, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn10= tf.contrib.layers.fully_connected(self.fcn9, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn11= tf.contrib.layers.fully_connected(self.fcn10, 512, activation_fn =tf.nn.leaky_relu)
self.fcn12= tf.contrib.layers.fully_connected(self.fcn11, 512, activation_fn =tf.nn.leaky_relu)
self.Qval = tf.contrib.layers.fully_connected(self.fcn12,self.reward_size,activation_fn = None)
return self.Qval
class DDPG():
def __init__(self, scope, sess, BS, UE, Actor , Critic,Actor_target , Critic_target, OUNoise, replay_buffer, state_size, action_size,reward_size, gamma, lr_actor, lr_critic, batch_size,tau,is_tanh):
self.sess = sess
self.batch_size = batch_size
self.lr_actor = lr_actor
self.lr_critic = lr_critic
self.scope = scope
self.is_tanh = is_tanh
self.gamma = gamma
self.Actor = Actor
self.Critic = Critic
self.Actor_target = Actor_target
self.Critic_target = Critic_target
self.noise = OUNoise
self.replay_buffer = replay_buffer
self.state_size = state_size
self.action_size = action_size
self.tau = tau
self.reward_size = reward_size
self.state = np.zeros([1,state_size])
self.action = np.zeros([1, action_size])
self.state_next = np.zeros([1,state_size])
self.reward = np.zeros([1,self.reward_size])
self.state_ph = tf.placeholder(shape = [None,state_size], dtype = tf.float32)
self.action_ph = tf.placeholder(shape = [None,action_size], dtype = tf.float32)
self.state_ph_next = tf.placeholder(shape = [None,state_size], dtype= tf.float32)
self.reward_ph = tf.placeholder(shape = [None,self.reward_size], dtype = tf.float32)
self.BS = BS
self.UE = UE
# Network models + Actor netowrk update
self.action_tf = self.Actor.forward(self.state_ph)
self.qval = self.Critic.forward(self.state_ph, self.action_tf)
self.gradient_action = tf.reshape(tf.gradients(tf.reduce_sum(self.qval),self.action_tf),[-1,self.action_size])
self.target_action = tf.clip_by_value(tf.stop_gradient(self.action_tf + 0.03*self.gradient_action),-0.99,0.99)
self.loss_weight = tf.placeholder(shape= [None,1], dtype = tf.float32)
self.policy_loss = tf.reduce_mean(self.loss_weight*tf.reduce_mean((self.target_action-self.action_tf)**2,axis=1,keepdims=True))
self.train_policy = tf.train.AdamOptimizer(learning_rate = self.lr_actor).minimize(self.policy_loss)
## Critic netowrk update
self.action_next_tf = self.Actor_target.forward(self.state_ph_next)
self.target_qval = tf.stop_gradient(self.Critic_target.forward(self.state_ph_next, self.action_next_tf))
self.target_critic = self.reward_ph + self.gamma * self.target_qval
self.loss_critic = tf.reduce_mean(self.loss_weight * tf.reduce_mean((self.target_critic - self.Critic.forward(self.state_ph, self.action_ph))**2,axis=1,keepdims=True))
self.TD_error = tf.sqrt(tf.reduce_sum(tf.abs(self.target_critic - self.Critic.forward(self.state_ph, self.action_ph))**2,axis=1,keepdims=True))
self.loss_critic_wo_noise = tf.reduce_mean(tf.reduce_mean((self.target_critic - self.Critic.forward(self.state_ph, self.action_ph))**2,axis=1,keepdims=True))
self.train_critic = tf.train.AdamOptimizer(learning_rate = self.lr_critic).minimize(self.loss_critic)
self.Actor_noiseless_tf = self.Actor_target.forward(self.state_ph)
tfVars = tf.trainable_variables(scope = self.scope )
tau = self.tau
total_vars = len(tfVars)
self.op_holder =[]
for index, var in enumerate(tfVars[0:int(total_vars/2)]):
self.op_holder.append(tfVars[index+int(total_vars/2)].assign((var.value()*tau)+((1-tau)*tfVars[index+int(total_vars/2)].value())))
def add_exp(self, state, state_next, action, reward):
self.replay_buffer.add(state, state_next, action, reward)
def forward_test_action(self,state):
return self.sess.run(self.Actor_noiseless_tf, feed_dict = {self.state_ph : state})
def forward_noiseless_action(self,state):
return self.sess.run(self.action_tf, feed_dict = {self.state_ph : state})
def forward_noise_action(self,state, step):
if self.is_tanh == True:
output = np.clip(self.sess.run(self.action_tf, feed_dict = {self.state_ph : state}) + self.noise.noise(step), -1., 1.)
else:
output = np.clip(self.sess.run(self.action_tf, feed_dict = {self.state_ph : state}) + self.noise.noise(), 0.00, 1000.)
return output
def forward_loss(self,s,s_1,a,r):
return self.sess.run(self.loss_critic_wo_noise, feed_dict = {self.state_ph : s, self.action_ph: a, self.state_ph_next: s_1, self.reward_ph : r})
class PER():
def __init__(self, buffer_size = 10000, alpha = 0.4, epsilon_per = 0.001, beta = 0.7):
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon_per
self.buffer_size = buffer_size
self.buffer = []
self.prob_bean = np.zeros([0])
self.alpha_decay = (self.alpha- 0.0)/15000
self.beta_increasing = (1.0-self.beta)/15000
def add(self, s,s_1,a,r, ):
self.buffer.append((s,s_1,a,r))
if self.prob_bean.shape[0] == 0:
self.prob_bean = np.concatenate([self.prob_bean,[self.epsilon]],axis=0)
else:
self.prob_bean = np.concatenate([self.prob_bean,[max(self.prob_bean)]],axis=0)
if len(self.buffer) == self.buffer_size +1 :
self.prob_bean = self.prob_bean[1:self.buffer_size+1]
del self.buffer[0]
def sample(self, batch_size):
self.alpha = np.maximum(self.alpha-self.alpha_decay, 0.0)
self.beta = np.minimum(self.beta +self.beta_increasing, 1.0)
batch =list()
idx = np.random.choice(range(len(self.buffer)),size = batch_size, replace = False, p = self.prob_bean**self.alpha/sum(self.prob_bean**self.alpha))
for i in range(batch_size):
batch.append(self.buffer[idx[i]])
s, s_1, a, r = zip(*batch)
s = np.concatenate(s)
s_1 = np.concatenate(s_1)
a = np.concatenate(a)
r = np.concatenate(r)
loss_weight = (1/self.prob_bean[idx]**self.alpha * sum(self.prob_bean**self.alpha)/ len(self.buffer) )**self.beta
loss_weight = loss_weight/max(loss_weight)
return s, s_1, a, r, loss_weight, idx
def update_weight(self, idx, TD_error):
self.prob_bean[idx] = (TD_error.reshape([-1]) + self.epsilon)
class USL():
def __init__(self, scope, sess, BS, UE, Actor , replay_buffer, state_size, action_size, lr_actor, batch_size, alpha_init):
self.sess = sess
self.batch_size = batch_size
self.lr_actor = lr_actor
self.scope = scope
self.Actor = Actor
self.replay_buffer = replay_buffer
self.state_size = state_size
self.action_size = action_size
self.state = np.zeros([1,state_size])
self.action = np.zeros([1, action_size])
self.state_ph = tf.placeholder(shape = [None,state_size], dtype = tf.float32)
self.BS = BS
self.UE = UE
self.radius = 4 * self.BS**0.5
self.mu_ind = np.concatenate([np.ones([1,self.BS]), np.zeros([1,self.BS])],axis=1)
# Network models + Actor netowrk update
self.action_tf = self.Actor.forward(self.state_ph)
self.target_action= tf.placeholder(shape = [None,action_size], dtype = tf.float32)
self.loss = tf.reduce_mean((self.target_action - self.action_tf)**2)
self.train_weights = tf.train.AdamOptimizer(learning_rate = self.lr_actor).minimize(self.loss)
self.alpha = alpha_init
def add_exp(self, state, Rate, QoS, Backhaul):
self.replay_buffer.add(state, Rate, QoS, Backhaul)
def forward_action(self,state):
return self.sess.run(self.action_tf, feed_dict = {self.state_ph : state})
class USL_replay():
def __init__(self, buffer_size = 10000):
self.buffer_size = buffer_size
self.buffer = []
def add(self, State, Rate, QoS, Backhaul):
Rate = np.expand_dims(Rate,0)
QoS = np.expand_dims(QoS,0)
Backhaul = np.expand_dims(Backhaul,0)
self.buffer.append((State, Rate,QoS,Backhaul))
if len(self.buffer) == self.buffer_size +1 :
del self.buffer[0]
def sample(self, batch_size):
batch =list()
idx = np.random.choice(range(len(self.buffer)),size = batch_size, replace = False)
for i in range(batch_size):
batch.append(self.buffer[idx[i]])
State, Rate, QoS, Backhaul = zip(*batch)
State = np.concatenate(State)
Rate = np.concatenate(Rate)
QoS = np.concatenate(QoS)
Backhaul = np.concatenate(Backhaul)
return State, Rate, QoS, Backhaul
| [
"tensorflow.reduce_sum",
"tensorflow.reduce_mean",
"tensorflow.log",
"numpy.arange",
"tensorflow.placeholder",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.concat",
"numpy.concatenate",
"tensorflow.trainable_variables",
"numpy.maximum",
"tensorflow.train.AdamOptimizer",
"numpy.rand... | [((591, 646), 'numpy.maximum', 'np.maximum', (['(self.epsilon - self.decay)', 'self.epsilon_end'], {}), '(self.epsilon - self.decay, self.epsilon_end)\n', (601, 646), True, 'import numpy as np\n'), ((5112, 5245), 'tensorflow.reshape', 'tf.reshape', (['(self.renew * (0.01 + 0.69 * (action + 1) / 2) + (1 - self.renew) * (0.01 +\n 0.99 * (action + 1) / 2))', '[-1, 1, self.BS]'], {}), '(self.renew * (0.01 + 0.69 * (action + 1) / 2) + (1 - self.renew) *\n (0.01 + 0.99 * (action + 1) / 2), [-1, 1, self.BS])\n', (5122, 5245), True, 'import tensorflow as tf\n'), ((5474, 5563), 'tensorflow.reshape', 'tf.reshape', (['state[:, self.UE * self.BS:self.UE * self.BS + self.UE]', '[-1, self.UE, 1]'], {}), '(state[:, self.UE * self.BS:self.UE * self.BS + self.UE], [-1,\n self.UE, 1])\n', (5484, 5563), True, 'import tensorflow as tf\n'), ((9343, 9368), 'numpy.zeros', 'np.zeros', (['[1, state_size]'], {}), '([1, state_size])\n', (9351, 9368), True, 'import numpy as np\n'), ((9390, 9416), 'numpy.zeros', 'np.zeros', (['[1, action_size]'], {}), '([1, action_size])\n', (9398, 9416), True, 'import numpy as np\n'), ((9443, 9468), 'numpy.zeros', 'np.zeros', (['[1, state_size]'], {}), '([1, state_size])\n', (9451, 9468), True, 'import numpy as np\n'), ((9490, 9521), 'numpy.zeros', 'np.zeros', (['[1, self.reward_size]'], {}), '([1, self.reward_size])\n', (9498, 9521), True, 'import numpy as np\n'), ((9545, 9603), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, state_size]', 'dtype': 'tf.float32'}), '(shape=[None, state_size], dtype=tf.float32)\n', (9559, 9603), True, 'import tensorflow as tf\n'), ((9632, 9691), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, action_size]', 'dtype': 'tf.float32'}), '(shape=[None, action_size], dtype=tf.float32)\n', (9646, 9691), True, 'import tensorflow as tf\n'), ((9724, 9782), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, state_size]', 'dtype': 'tf.float32'}), '(shape=[None, state_size], dtype=tf.float32)\n', (9738, 9782), True, 'import tensorflow as tf\n'), ((9810, 9874), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, self.reward_size]', 'dtype': 'tf.float32'}), '(shape=[None, self.reward_size], dtype=tf.float32)\n', (9824, 9874), True, 'import tensorflow as tf\n'), ((10377, 10426), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 1]', 'dtype': 'tf.float32'}), '(shape=[None, 1], dtype=tf.float32)\n', (10391, 10426), True, 'import tensorflow as tf\n'), ((11691, 11731), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'self.scope'}), '(scope=self.scope)\n', (11713, 11731), True, 'import tensorflow as tf\n'), ((13313, 13326), 'numpy.zeros', 'np.zeros', (['[0]'], {}), '([0])\n', (13321, 13326), True, 'import numpy as np\n'), ((13991, 14037), 'numpy.maximum', 'np.maximum', (['(self.alpha - self.alpha_decay)', '(0.0)'], {}), '(self.alpha - self.alpha_decay, 0.0)\n', (14001, 14037), True, 'import numpy as np\n'), ((14056, 14105), 'numpy.minimum', 'np.minimum', (['(self.beta + self.beta_increasing)', '(1.0)'], {}), '(self.beta + self.beta_increasing, 1.0)\n', (14066, 14105), True, 'import numpy as np\n'), ((14420, 14437), 'numpy.concatenate', 'np.concatenate', (['s'], {}), '(s)\n', (14434, 14437), True, 'import numpy as np\n'), ((14452, 14471), 'numpy.concatenate', 'np.concatenate', (['s_1'], {}), '(s_1)\n', (14466, 14471), True, 'import numpy as np\n'), ((14484, 14501), 'numpy.concatenate', 'np.concatenate', (['a'], {}), '(a)\n', (14498, 14501), True, 'import numpy as np\n'), ((14514, 14531), 'numpy.concatenate', 'np.concatenate', (['r'], {}), '(r)\n', (14528, 14531), True, 'import numpy as np\n'), ((15309, 15334), 'numpy.zeros', 'np.zeros', (['[1, state_size]'], {}), '([1, state_size])\n', (15317, 15334), True, 'import numpy as np\n'), ((15356, 15382), 'numpy.zeros', 'np.zeros', (['[1, action_size]'], {}), '([1, action_size])\n', (15364, 15382), True, 'import numpy as np\n'), ((15407, 15465), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, state_size]', 'dtype': 'tf.float32'}), '(shape=[None, state_size], dtype=tf.float32)\n', (15421, 15465), True, 'import tensorflow as tf\n'), ((15785, 15844), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, action_size]', 'dtype': 'tf.float32'}), '(shape=[None, action_size], dtype=tf.float32)\n', (15799, 15844), True, 'import tensorflow as tf\n'), ((15868, 15926), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((self.target_action - self.action_tf) ** 2)'], {}), '((self.target_action - self.action_tf) ** 2)\n', (15882, 15926), True, 'import tensorflow as tf\n'), ((16521, 16544), 'numpy.expand_dims', 'np.expand_dims', (['Rate', '(0)'], {}), '(Rate, 0)\n', (16535, 16544), True, 'import numpy as np\n'), ((16558, 16580), 'numpy.expand_dims', 'np.expand_dims', (['QoS', '(0)'], {}), '(QoS, 0)\n', (16572, 16580), True, 'import numpy as np\n'), ((16599, 16626), 'numpy.expand_dims', 'np.expand_dims', (['Backhaul', '(0)'], {}), '(Backhaul, 0)\n', (16613, 16626), True, 'import numpy as np\n'), ((17085, 17106), 'numpy.concatenate', 'np.concatenate', (['State'], {}), '(State)\n', (17099, 17106), True, 'import numpy as np\n'), ((17122, 17142), 'numpy.concatenate', 'np.concatenate', (['Rate'], {}), '(Rate)\n', (17136, 17142), True, 'import numpy as np\n'), ((17157, 17176), 'numpy.concatenate', 'np.concatenate', (['QoS'], {}), '(QoS)\n', (17171, 17176), True, 'import numpy as np\n'), ((17196, 17220), 'numpy.concatenate', 'np.concatenate', (['Backhaul'], {}), '(Backhaul)\n', (17210, 17220), True, 'import numpy as np\n'), ((340, 370), 'numpy.ones', 'np.ones', (['self.action_dimension'], {}), '(self.action_dimension)\n', (347, 370), True, 'import numpy as np\n'), ((707, 726), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (724, 726), True, 'import numpy as np\n'), ((1063, 1113), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {'reuse': 'tf.AUTO_REUSE'}), '(self.scope, reuse=tf.AUTO_REUSE)\n', (1080, 1113), True, 'import tensorflow as tf\n'), ((1172, 1260), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.state', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.state, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (1205, 1260), True, 'import tensorflow as tf\n'), ((1281, 1368), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn1', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn1, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (1314, 1368), True, 'import tensorflow as tf\n'), ((1389, 1476), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn2', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn2, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (1422, 1476), True, 'import tensorflow as tf\n'), ((1496, 1583), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn3', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn3, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (1529, 1583), True, 'import tensorflow as tf\n'), ((1603, 1690), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn4', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn4, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (1636, 1690), True, 'import tensorflow as tf\n'), ((1710, 1797), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn5', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn5, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (1743, 1797), True, 'import tensorflow as tf\n'), ((1817, 1904), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn6', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn6, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (1850, 1904), True, 'import tensorflow as tf\n'), ((1924, 2011), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn7', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn7, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (1957, 2011), True, 'import tensorflow as tf\n'), ((2031, 2118), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn8', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn8, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (2064, 2118), True, 'import tensorflow as tf\n'), ((2139, 2225), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn9', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn9, 512, activation_fn=tf.nn.\n leaky_relu)\n', (2172, 2225), True, 'import tensorflow as tf\n'), ((2246, 2333), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn10', '(256)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn10, 256, activation_fn=tf.nn.\n leaky_relu)\n', (2279, 2333), True, 'import tensorflow as tf\n'), ((2354, 2441), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn11', '(128)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn11, 128, activation_fn=tf.nn.\n leaky_relu)\n', (2387, 2441), True, 'import tensorflow as tf\n'), ((2856, 2906), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {'reuse': 'tf.AUTO_REUSE'}), '(self.scope, reuse=tf.AUTO_REUSE)\n', (2873, 2906), True, 'import tensorflow as tf\n'), ((3051, 3139), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['state_part1', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(state_part1, 512, activation_fn=tf.nn.\n leaky_relu)\n', (3084, 3139), True, 'import tensorflow as tf\n'), ((3164, 3252), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['state_part2', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(state_part2, 512, activation_fn=tf.nn.\n leaky_relu)\n', (3197, 3252), True, 'import tensorflow as tf\n'), ((3277, 3324), 'tensorflow.concat', 'tf.concat', (['[state_part1_, state_part2_]'], {'axis': '(1)'}), '([state_part1_, state_part2_], axis=1)\n', (3286, 3324), True, 'import tensorflow as tf\n'), ((3348, 3436), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['state_post', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(state_post, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (3381, 3436), True, 'import tensorflow as tf\n'), ((3457, 3544), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn1', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn1, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (3490, 3544), True, 'import tensorflow as tf\n'), ((3565, 3652), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn2', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn2, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (3598, 3652), True, 'import tensorflow as tf\n'), ((3672, 3759), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn3', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn3, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (3705, 3759), True, 'import tensorflow as tf\n'), ((3779, 3866), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn4', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn4, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (3812, 3866), True, 'import tensorflow as tf\n'), ((3886, 3973), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn5', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn5, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (3919, 3973), True, 'import tensorflow as tf\n'), ((3993, 4080), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn6', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn6, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (4026, 4080), True, 'import tensorflow as tf\n'), ((4100, 4187), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn7', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn7, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (4133, 4187), True, 'import tensorflow as tf\n'), ((4207, 4294), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn8', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn8, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (4240, 4294), True, 'import tensorflow as tf\n'), ((4315, 4402), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn9', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn9, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (4348, 4402), True, 'import tensorflow as tf\n'), ((4423, 4510), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn10', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn10, 512, activation_fn=tf.nn.\n leaky_relu)\n', (4456, 4510), True, 'import tensorflow as tf\n'), ((4531, 4618), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn11', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn11, 512, activation_fn=tf.nn.\n leaky_relu)\n', (4564, 4618), True, 'import tensorflow as tf\n'), ((6398, 6425), 'tensorflow.reduce_max', 'tf.reduce_max', (['Rate'], {'axis': '(1)'}), '(Rate, axis=1)\n', (6411, 6425), True, 'import tensorflow as tf\n'), ((6518, 6555), 'tensorflow.concat', 'tf.concat', (['[state_1, state_2]'], {'axis': '(1)'}), '([state_1, state_2], axis=1)\n', (6527, 6555), True, 'import tensorflow as tf\n'), ((6556, 6602), 'tensorflow.concat', 'tf.concat', (['[state_3, state_4, state_5]'], {'axis': '(1)'}), '([state_3, state_4, state_5], axis=1)\n', (6565, 6602), True, 'import tensorflow as tf\n'), ((6662, 6712), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {'reuse': 'tf.AUTO_REUSE'}), '(self.scope, reuse=tf.AUTO_REUSE)\n', (6679, 6712), True, 'import tensorflow as tf\n'), ((6827, 6915), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['state_part1', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(state_part1, 512, activation_fn=tf.nn.\n leaky_relu)\n', (6860, 6915), True, 'import tensorflow as tf\n'), ((6940, 7028), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['state_part2', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(state_part2, 512, activation_fn=tf.nn.\n leaky_relu)\n', (6973, 7028), True, 'import tensorflow as tf\n'), ((7053, 7100), 'tensorflow.concat', 'tf.concat', (['[state_part1_, state_part2_]'], {'axis': '(1)'}), '([state_part1_, state_part2_], axis=1)\n', (7062, 7100), True, 'import tensorflow as tf\n'), ((7124, 7212), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['state_post', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(state_post, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (7157, 7212), True, 'import tensorflow as tf\n'), ((7232, 7319), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn1', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn1, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (7265, 7319), True, 'import tensorflow as tf\n'), ((7339, 7426), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn2', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn2, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (7372, 7426), True, 'import tensorflow as tf\n'), ((7446, 7533), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn3', '(2048)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn3, 2048, activation_fn=tf.nn.\n leaky_relu)\n', (7479, 7533), True, 'import tensorflow as tf\n'), ((7553, 7640), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn4', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn4, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (7586, 7640), True, 'import tensorflow as tf\n'), ((7660, 7747), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn5', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn5, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (7693, 7747), True, 'import tensorflow as tf\n'), ((7767, 7854), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn6', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn6, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (7800, 7854), True, 'import tensorflow as tf\n'), ((7874, 7961), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn7', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn7, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (7907, 7961), True, 'import tensorflow as tf\n'), ((7981, 8068), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn8', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn8, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (8014, 8068), True, 'import tensorflow as tf\n'), ((8089, 8176), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn9', '(1024)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn9, 1024, activation_fn=tf.nn.\n leaky_relu)\n', (8122, 8176), True, 'import tensorflow as tf\n'), ((8197, 8284), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn10', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn10, 512, activation_fn=tf.nn.\n leaky_relu)\n', (8230, 8284), True, 'import tensorflow as tf\n'), ((8305, 8392), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn11', '(512)'], {'activation_fn': 'tf.nn.leaky_relu'}), '(self.fcn11, 512, activation_fn=tf.nn.\n leaky_relu)\n', (8338, 8392), True, 'import tensorflow as tf\n'), ((8413, 8500), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn12', 'self.reward_size'], {'activation_fn': 'None'}), '(self.fcn12, self.reward_size,\n activation_fn=None)\n', (8446, 8500), True, 'import tensorflow as tf\n'), ((10264, 10326), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(self.action_tf + 0.03 * self.gradient_action)'], {}), '(self.action_tf + 0.03 * self.gradient_action)\n', (10280, 10326), True, 'import tensorflow as tf\n'), ((13582, 13638), 'numpy.concatenate', 'np.concatenate', (['[self.prob_bean, [self.epsilon]]'], {'axis': '(0)'}), '([self.prob_bean, [self.epsilon]], axis=0)\n', (13596, 13638), True, 'import numpy as np\n'), ((758, 807), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[1, self.action_dimension]'}), '(size=[1, self.action_dimension])\n', (774, 807), True, 'import numpy as np\n'), ((4649, 4736), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn12', 'self.output_size'], {'activation_fn': 'None'}), '(self.fcn12, self.output_size,\n activation_fn=None)\n', (4682, 4736), True, 'import tensorflow as tf\n'), ((5256, 5321), 'tensorflow.reshape', 'tf.reshape', (['state[:, 0:self.UE * self.BS]', '[-1, self.UE, self.BS]'], {}), '(state[:, 0:self.UE * self.BS], [-1, self.UE, self.BS])\n', (5266, 5321), True, 'import tensorflow as tf\n'), ((6035, 6053), 'tensorflow.log', 'tf.log', (['(QoS / Rate)'], {}), '(QoS / Rate)\n', (6041, 6053), True, 'import tensorflow as tf\n'), ((10153, 10177), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.qval'], {}), '(self.qval)\n', (10166, 10177), True, 'import tensorflow as tf\n'), ((10488, 10574), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((self.target_action - self.action_tf) ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '((self.target_action - self.action_tf) ** 2, axis=1, keepdims\n =True)\n', (10502, 10574), True, 'import tensorflow as tf\n'), ((10593, 10644), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr_actor'}), '(learning_rate=self.lr_actor)\n', (10615, 10644), True, 'import tensorflow as tf\n'), ((11517, 11569), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr_critic'}), '(learning_rate=self.lr_critic)\n', (11539, 11569), True, 'import tensorflow as tf\n'), ((15588, 15609), 'numpy.ones', 'np.ones', (['[1, self.BS]'], {}), '([1, self.BS])\n', (15595, 15609), True, 'import numpy as np\n'), ((15610, 15632), 'numpy.zeros', 'np.zeros', (['[1, self.BS]'], {}), '([1, self.BS])\n', (15618, 15632), True, 'import numpy as np\n'), ((15954, 16005), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr_actor'}), '(learning_rate=self.lr_actor)\n', (15976, 16005), True, 'import tensorflow as tf\n'), ((2476, 2563), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.fcn12', 'self.output_size'], {'activation_fn': 'None'}), '(self.fcn12, self.output_size,\n activation_fn=None)\n', (2509, 2563), True, 'import tensorflow as tf\n'), ((4980, 4998), 'numpy.arange', 'np.arange', (['self.BS'], {}), '(self.BS)\n', (4989, 4998), True, 'import numpy as np\n'), ((5348, 5391), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['SNR_p'], {'axis': '(2)', 'keepdims': '(True)'}), '(SNR_p, axis=2, keepdims=True)\n', (5361, 5391), True, 'import tensorflow as tf\n'), ((5413, 5429), 'tensorflow.log', 'tf.log', (['(1 + SINR)'], {}), '(1 + SINR)\n', (5419, 5429), True, 'import tensorflow as tf\n'), ((5428, 5439), 'tensorflow.log', 'tf.log', (['(2.0)'], {}), '(2.0)\n', (5434, 5439), True, 'import tensorflow as tf\n'), ((6155, 6193), 'tensorflow.reshape', 'tf.reshape', (['Backhaul', '[-1, 1, self.BS]'], {}), '(Backhaul, [-1, 1, self.BS])\n', (6165, 6193), True, 'import tensorflow as tf\n'), ((6303, 6335), 'tensorflow.reshape', 'tf.reshape', (['(1 - P)', '[-1, self.BS]'], {}), '(1 - P, [-1, self.BS])\n', (6313, 6335), True, 'import tensorflow as tf\n')] |
"""
Welcome to your first Halite-II bot!
This bot's name is Settler. It's purpose is simple (don't expect it to win complex games :) ):
1. Initialize game
2. If a ship is not docked and there are unowned planets
2.a. Try to Dock in the planet if close enough
2.b If not, go towards the planet
Note: Please do not place print statements here as they are used to communicate with the Halite engine. If you need
to log anything use the logging module.
"""
# Let's start by importing the Halite Starter Kit so we can interface with the Halite engine
import hlt
import numpy
import math
import gc
import hlt.entity
import hlt.collision
import logging
import time
import random
# GAME START
# Here we define the bot's name as Settler and initialize the game, including communication with the Halite engine.
game = hlt.Game("MyBot16")
initialized = False
first_dock = False
cos = [math.cos(math.radians(x)) for x in range(360)]
sin = [math.sin(math.radians(x)) for x in range(360)]
def compute_dist(dx, dy):
return numpy.sqrt(dx * dx + dy * dy)
def compute_square_dist(dx, dy):
return dx * dx + dy * dy
def custom_intersect_segment_circle(start, end, circle, *, fudge=0.5):
# threshold = 2 * hlt.constants.MAX_SPEED + fudge + circle.radius
# if numpy.abs(start.x - circle.x) > threshold or numpy.abs(start.y - circle.y) > threshold:
# return False
dx = end.x - start.x
dy = end.y - start.y
a = dx**2 + dy**2
b = -2 * (start.x**2 - start.x*end.x - start.x*circle.x + end.x*circle.x +
start.y**2 - start.y*end.y - start.y*circle.y + end.y*circle.y)
c = (start.x - circle.x)**2 + (start.y - circle.y)**2
if a == 0.0:
# Start and end are the same point
return start.calculate_distance_between(circle) <= circle.radius + fudge
# Time along segment when closest to the circle (vertex of the quadratic)
t = min(-b / (2 * a), 1.0)
if t < 0:
return False
closest_x = start.x + dx * t
closest_y = start.y + dy * t
closest_distance = hlt.entity.Position(closest_x, closest_y).calculate_distance_between(circle)
return closest_distance <= circle.radius + fudge
SKIP_THRESHOLD = (hlt.constants.MAX_SPEED + 1.1) ** 2
def exists_obstacles_between(ship, target, all_planets, all_ships, all_my_ships_moves, ignore=()):
obstacles = []
entities = ([] if issubclass(hlt.entity.Planet, ignore) else all_planets) \
+ ([] if issubclass(hlt.entity.Ship , ignore) else all_ships) \
+ ([] if issubclass(hlt.entity.Ship , ignore) else all_my_ships_moves)
if not issubclass(hlt.entity.Planet, ignore):
for foreign_entity in all_planets:
if foreign_entity == ship or foreign_entity == target:
continue
if custom_intersect_segment_circle(ship, target, foreign_entity, fudge=ship.radius + 0.1):
return True
if not issubclass(hlt.entity.Ship, ignore):
for foreign_entity in all_ships + all_my_ships_moves:
if foreign_entity == ship or foreign_entity == target:
continue
if compute_square_dist(foreign_entity.x - ship.x, foreign_entity.y - ship.y) > SKIP_THRESHOLD:
continue
if custom_intersect_segment_circle(ship, target, foreign_entity, fudge=ship.radius + 0.1):
return True
return False
def custom_navigate(ship, target, game_map, max_speed, min_speed, speed_decay, step, all_planets, all_ships, all_my_ships_moves,
avoid_obstacles=True, max_corrections=90, angular_step=1,
ignore_ships=False, ignore_planets=False, suicide=False):
# Assumes a position, not planet (as it would go to the center of the planet otherwise)
if max_corrections <= 0:
return 999999, None, None
if not suicide:
distance = ship.calculate_distance_between(target) - target.radius - ship.radius
else:
distance = ship.calculate_distance_between(target)
angle = int(ship.calculate_angle_between(target))
ignore = () if not (ignore_ships or ignore_planets) \
else hlt.entity.Ship if (ignore_ships and not ignore_planets) \
else hlt.entity.Planet if (ignore_planets and not ignore_ships) \
else hlt.entity.Entity
if avoid_obstacles and exists_obstacles_between(ship, target, all_planets, all_ships, all_my_ships_moves, ignore):
new_angle = angle + angular_step
while new_angle >= 360:
new_angle -= 360
while new_angle < 0:
new_angle += 360
new_target_dx = cos[int(new_angle)] * distance
new_target_dy = sin[int(new_angle)] * distance
new_target = hlt.entity.Position(ship.x + new_target_dx, ship.y + new_target_dy)
return custom_navigate(ship, new_target, game_map, max_speed, min_speed, speed_decay, step + 1, all_planets, all_ships, all_my_ships_moves, True, max_corrections - 1, angular_step, ignore_ships, ignore_planets, suicide)
# TODO formulize this better
speed = max(max_speed - step * speed_decay, min_speed)
speed = speed if (distance >= speed) else distance - 0.1
final_target_dx = cos[int(angle)] * speed
final_target_dy = sin[int(angle)] * speed
final_target = hlt.entity.Position(ship.x + final_target_dx, ship.y + final_target_dy)
final_target.radius = ship.radius
return step, final_target, ship.thrust(speed, angle)
# parameters
ANGULAR_STEP = 6
MAX_SPEED = hlt.constants.MAX_SPEED
MIN_SPEED = hlt.constants.MAX_SPEED * 0.5
SPEED_DECAY = 0.0
MAX_CORRECTIONS = 30
MIN_OPPONENT_DIST_TO_DOCK = 25.0
MIN_OPPONENT_DIST_TO_TARGET_PLANET = 25.0
DOCKED_BONUS = 0.0
PLANET_BONUS = 10.0
UNDOCKED_BONUS = -100.0
MAX_OPPONENT_SHIP_TARGET_CNT = 4
MAX_MY_SHIP_TARGET_CNT = 4
PLANET_DOCKED_ALLIES_BONUS = 40.0
OPPONENT_SHIP_CLOSE_TO_MY_DOCKED_BONUS = 40.0
MAX_DIST_TO_TARGET_OPPONENT_UNDOCKED_SHIP = 15.0
#PLANET_CAPACITY_BONUS =
#UNDOCKED_OPPONENT_CLOSE_TO_MY_DOCKED_BONUS = 10.0
PLANET_NEARBY_PLANET_MAX_BONUS = 36.0
PLANET_NEARBY_PLANET_BIAS = 3.0
PLANET_NEARBY_PLANET_SLOPE = 0.25
SUICIDE_UNDOCKED_OPPONENT_DIST = 15.0
ALL_IN_DIST = 50.0
PLANET_FAR_FROM_CENTER_BONUS = 1.0
MAX_PLANET_FAR_FROM_CENTER_BONUS = 70.0
SUICIDE_HEALTH_MULT = 1.0
CLOSE_OPPONENT_DIST = 12.0
CLOSE_ALLY_DIST = 5.0
DOUBLE_NAVIGATE_SHIP_CNT = 999
def planet_nearby_empty_planet_score(dist_matrix, planet_owner, planet_capacity):
score = numpy.maximum(0.0, PLANET_NEARBY_PLANET_BIAS - dist_matrix * PLANET_NEARBY_PLANET_SLOPE)
score = ((planet_owner == -1) * planet_capacity)[numpy.newaxis,:] * ((planet_owner == -1) * planet_capacity)[:,numpy.newaxis] * score
return numpy.minimum(PLANET_NEARBY_PLANET_MAX_BONUS, numpy.sum(score, axis=0))
#PLANET_DOCK_SYNERGE_BONUS = 5.0
# TODOS
# 2. parameter tuning
# 5. collide to planets?
# 6. if timeout, move ship to center of the enemies or allies?
# 7. Add our own planet in target to be more defensive
# 8. count ships of I and opponent to figure out who's winning. If even, be more defensive
# 9. if I have more ships, collide to opponent planet
# 10. go to my ally when there's more enemy
# 11. if you are a lone warrior, far away from my docked ship, and many enemies in your target but no allies, get back
# 12. In a 4P game, be more defensive
# 13. Defend early game rush
# 14. Create a pivot
early_game_all_in = 0
while True:
# TURN START
st = time.time()
# Update the map for the new turn and get the latest version
game_map = game.update_map()
# Here we define the set of commands to be sent to the Halite engine at the end of the turn
command_queue = []
# initialize game info
if not initialized:
my_id = game_map.my_id
me = game_map.get_me()
width = game_map.width
height = game_map.height
initialized = True
# cache players, planets and ships
all_players_ids = game_map._players.keys()
num_players = len(all_players_ids)
all_planets = game_map.all_planets()
all_my_ships = game_map.get_me().all_ships()
num_my_ships = len(all_my_ships)
all_opponent_ships = []
for pid in all_players_ids:
if my_id != pid:
all_opponent_ships += game_map.get_player(pid).all_ships()
num_opponent_ships = len(all_opponent_ships)
all_ships = all_my_ships + all_opponent_ships
# cache coordinates and misc
all_my_ships_x = numpy.array([v.x for v in all_my_ships])
all_my_ships_y = numpy.array([v.y for v in all_my_ships])
all_my_ships_center_x = numpy.mean(all_my_ships_x)
all_my_ships_center_y = numpy.mean(all_my_ships_y)
all_opponent_ships_x = numpy.array([v.x for v in all_opponent_ships])
all_opponent_ships_y = numpy.array([v.y for v in all_opponent_ships])
all_opponent_ships_center_x = numpy.mean(all_opponent_ships_x)
all_opponent_ships_center_y = numpy.mean(all_opponent_ships_y)
all_planets_x = numpy.array([v.x for v in all_planets])
all_planets_y = numpy.array([v.y for v in all_planets])
my_ships_status = numpy.array([v.docking_status for v in all_my_ships])
num_my_undocked_ships = numpy.sum(my_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)
opponent_ships_status = numpy.array([v.docking_status for v in all_opponent_ships])
num_opponent_undocked_ships = numpy.sum(opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)
planet_owner = numpy.array([-1 if v.owner is None else v.owner.id for v in all_planets])
def compute_dist_matrix(x1, y1, x2, y2):
dx = x1[:,numpy.newaxis] - x2[numpy.newaxis,:]
dy = y1[:,numpy.newaxis] - y2[numpy.newaxis,:]
return numpy.sqrt(dx * dx + dy * dy)
my_ship_dist_matrix = compute_dist_matrix(all_my_ships_x, all_my_ships_y, all_my_ships_x, all_my_ships_y)
ship_dist_matrix = compute_dist_matrix(all_my_ships_x, all_my_ships_y, all_opponent_ships_x, all_opponent_ships_y)
planet_dist_matrix = compute_dist_matrix(all_my_ships_x, all_my_ships_y, all_planets_x, all_planets_y)
planet_planet_dist_matrix = compute_dist_matrix(all_planets_x, all_planets_y, all_planets_x, all_planets_y)
closest_opponent_ship = numpy.min(ship_dist_matrix, axis=1)
closest_undocked_opponent_ship = numpy.min(ship_dist_matrix + 99999999.0 * (opponent_ships_status != hlt.entity.Ship.DockingStatus.UNDOCKED)[numpy.newaxis,:], axis=1)
cnt_too_close_to_dock_opponent = numpy.sum((ship_dist_matrix < MIN_OPPONENT_DIST_TO_DOCK) * ((my_ships_status == hlt.entity.Ship.DockingStatus.DOCKED) | (my_ships_status == hlt.entity.Ship.DockingStatus.DOCKING))[:,numpy.newaxis], axis=0)
cnt_too_close_to_dock_ally = numpy.sum((ship_dist_matrix < MIN_OPPONENT_DIST_TO_DOCK) * ((my_ships_status == hlt.entity.Ship.DockingStatus.DOCKED) | (my_ships_status == hlt.entity.Ship.DockingStatus.DOCKING))[:,numpy.newaxis], axis=1)
close_opponent_ship_cnt = numpy.sum((ship_dist_matrix < CLOSE_OPPONENT_DIST) * (opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)[numpy.newaxis,:], axis=1)
close_ally_ship_cnt = numpy.sum((my_ship_dist_matrix < CLOSE_ALLY_DIST), axis=1)
cnt_too_close_to_dock_closest_ally = numpy.zeros(len(all_my_ships), dtype=numpy.int)
for i in range(len(all_opponent_ships)):
if opponent_ships_status[i] == hlt.entity.Ship.DockingStatus.UNDOCKED:
# TODO optimize this
k = numpy.argmin(ship_dist_matrix[:,i] + 99999999.0 * ((my_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED) | (my_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKING)))
if ship_dist_matrix[k][i] < MIN_OPPONENT_DIST_TO_DOCK:
cnt_too_close_to_dock_closest_ally[k] += 1
planet_capacity = numpy.array([p.num_docking_spots for p in all_planets])
planet_docked_cnt = numpy.array([len(p._docked_ship_ids) for p in all_planets]) #TODO does this include docking ships?
planet_remaining_cnt = planet_capacity - planet_docked_cnt
# my ship target scores
my_ship_score = numpy.array([0.0] * len(all_my_ships))
my_ship_score += OPPONENT_SHIP_CLOSE_TO_MY_DOCKED_BONUS * cnt_too_close_to_dock_closest_ally
my_ship_score += -99999999.0 * (cnt_too_close_to_dock_closest_ally == 0)
my_ship_max_target_cnt = numpy.minimum(MAX_MY_SHIP_TARGET_CNT, cnt_too_close_to_dock_closest_ally)
# opponent ship target scores
opponent_ship_score = numpy.array([0.0] * len(all_opponent_ships))
opponent_ship_score += OPPONENT_SHIP_CLOSE_TO_MY_DOCKED_BONUS * cnt_too_close_to_dock_opponent
opponent_ship_score += UNDOCKED_BONUS * \
((opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED) | (opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKING))
opponent_ship_score += DOCKED_BONUS * \
((opponent_ships_status == hlt.entity.Ship.DockingStatus.DOCKED) | (opponent_ships_status == hlt.entity.Ship.DockingStatus.DOCKING))
opponent_ship_max_target_cnt = numpy.array([MAX_OPPONENT_SHIP_TARGET_CNT] * len(all_opponent_ships))
# planet target scores
planet_score = numpy.array([PLANET_BONUS] * len(all_planets))
if not first_dock and num_players == 2:
planet_score[numpy.argmin(planet_dist_matrix[0])] += 20.0 # so that all ships go to the same planet at the beginning
planet_score[(planet_owner == my_id)] += PLANET_DOCKED_ALLIES_BONUS
if num_players == 2:
planet_score += planet_nearby_empty_planet_score(planet_planet_dist_matrix, planet_owner, planet_capacity)
elif num_players > 2:
planet_score += numpy.minimum(MAX_PLANET_FAR_FROM_CENTER_BONUS, PLANET_FAR_FROM_CENTER_BONUS * (compute_dist(all_planets_x - width / 2.0, all_planets_y - height / 2.0)))
planet_max_target_cnt = planet_remaining_cnt.copy()
my_ship_target_cnt = numpy.array([0] * len(all_my_ships))
opponent_ship_target_cnt = numpy.array([0] * len(all_opponent_ships))
planet_target_cnt = numpy.array([0] * len(all_planets))
my_ship_target_available = my_ship_target_cnt < my_ship_max_target_cnt
opponent_ship_target_available = opponent_ship_target_cnt < opponent_ship_max_target_cnt
planet_target_available = planet_target_cnt < planet_max_target_cnt
# Early game exception
if early_game_all_in == 0:
if len(all_my_ships) != 3 or len(all_opponent_ships) != 3 or num_players > 2 or numpy.sum(my_ships_status != hlt.entity.Ship.DockingStatus.UNDOCKED) == 3:
early_game_all_in = 2
if numpy.min(ship_dist_matrix) < ALL_IN_DIST:
early_game_all_in = 1
if early_game_all_in == 1:
opponent_ship_score += 1.0e9
# compute scores of all edges
scores = [0.0] * (len(all_my_ships) * (1 + len(all_planets) + len(all_opponent_ships) + len(all_my_ships)))
len_scores = 0
for k in range(len(all_my_ships)):
ed = time.time()
if ed - st > 1.7:
break
ship = all_my_ships[k]
if ship.docking_status != ship.DockingStatus.UNDOCKED:
continue
if not early_game_all_in == 1:
opponent_too_close_to_target_planet = False if closest_undocked_opponent_ship[k] > MIN_OPPONENT_DIST_TO_TARGET_PLANET else True
opponent_too_close_to_dock = False if closest_undocked_opponent_ship[k] > MIN_OPPONENT_DIST_TO_DOCK else True
for i in range(len(all_planets)):
planet = all_planets[i]
if planet.owner == None or planet.owner.id == my_id:
dist_score = -(planet_dist_matrix[k][i] - planet.radius)
# TODO move this to planet_score
opponent_score = -99999999.0 if opponent_too_close_to_target_planet else 0.0 # TODO opponent_score # TODO geographical_score
total_score = planet_score[i] + dist_score + opponent_score
scores[len_scores] = (total_score, k, i, 'planet')
len_scores += 1
if ship.can_dock(planet) and not opponent_too_close_to_dock:
total_score = 99999999.0
scores[len_scores] = (total_score, k, i, 'dock')
len_scores += 1
else:
# TODO: suicide to opponent planet when I got more ships
pass
for i in range(len(all_my_ships)):
if my_ships_status[i] == hlt.entity.Ship.DockingStatus.UNDOCKED or my_ships_status[i] == hlt.entity.Ship.DockingStatus.UNDOCKING:
continue
mship = all_my_ships[i]
dist_score = -(my_ship_dist_matrix[k][i] - mship.radius)
total_score = my_ship_score[i] + dist_score
scores[len_scores] = (total_score, k, i, 'my_ship')
len_scores += 1
for i in range(len(all_opponent_ships)):
if ship_dist_matrix[k][i] > MAX_DIST_TO_TARGET_OPPONENT_UNDOCKED_SHIP and opponent_ships_status[i] == hlt.entity.Ship.DockingStatus.UNDOCKED and not early_game_all_in == 1:
continue
oship = all_opponent_ships[i]
dist_score = -(ship_dist_matrix[k][i] - oship.radius)
# TODO geograpihcal_score
total_score = opponent_ship_score[i] + dist_score
scores[len_scores] = (total_score, k, i, 'opponent_ship')
len_scores += 1
# choose action in decreasing score order
all_my_ships_moves_from = []
all_my_ships_moves_to = []
ship_used = numpy.array([False] * len(all_my_ships))
scores = sorted(scores[:len_scores], reverse=True)
for i in range(len(scores)):
ed = time.time()
if ed - st > 1.7:
break
ship_idx = scores[i][1]
my_ship = all_my_ships[ship_idx]
target_idx = scores[i][2]
action = scores[i][3]
if ship_used[ship_idx]:
continue
command = None
if action == 'dock':
if not planet_target_available[target_idx]:
continue
target = all_planets[target_idx]
command = my_ship.dock(target)
first_dock = True
planet_target_cnt[target_idx] += 1
if planet_target_cnt[target_idx] >= planet_max_target_cnt[target_idx]:
planet_target_available[target_idx] = False
elif action == 'planet':
if not planet_target_available[target_idx]:
continue
target = all_planets[target_idx]
# rand_angle = random.randint(0, 359)
# rand_dist = random.uniform(0.0, radius
# rand_target = hlt.entity.Position(target.x +
step, ship_move, command = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=ANGULAR_STEP, ignore_ships=False, ignore_planets=False, suicide=False)
if step != 0 and num_my_ships < DOUBLE_NAVIGATE_SHIP_CNT :
step2, ship_move2, command2 = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=-ANGULAR_STEP, ignore_ships=False, ignore_planets=False, suicide=False)
if step2 < step:
ship_move = ship_move2
command = command2
if (ship_move is not None) and (command is not None):
# TODO refactor this
collide = False
for j in range(len(all_my_ships_moves_from)):
end = hlt.entity.Position(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x),
ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))
end.radius = my_ship.radius
if custom_intersect_segment_circle(my_ship, end, all_my_ships_moves_from[j], fudge=my_ship.radius + 0.1):
collide = True
break
if not collide:
all_my_ships_moves_to.append(ship_move)
all_my_ships_moves_from.append(my_ship)
planet_target_cnt[target_idx] += 1
if planet_target_cnt[target_idx] >= planet_max_target_cnt[target_idx]:
planet_target_available[target_idx] = False
else:
command = None
ship_move = None
elif action == 'my_ship':
if not my_ship_target_available[target_idx]:
continue
target = all_my_ships[target_idx]
suicide = False
step, ship_move, command = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=ANGULAR_STEP, ignore_ships=False, ignore_planets=False, suicide=suicide)
if step != 0 and num_my_ships < DOUBLE_NAVIGATE_SHIP_CNT :
step2, ship_move2, command2 = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=-ANGULAR_STEP, ignore_ships=False, ignore_planets=False, suicide=suicide)
if step2 < step:
ship_move = ship_move2
command = command2
if (ship_move is not None) and (command is not None):
collide = False
for j in range(len(all_my_ships_moves_from)):
end = hlt.entity.Position(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x),
ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))
end.radius = my_ship.radius
if custom_intersect_segment_circle(my_ship, end, all_my_ships_moves_from[j], fudge=my_ship.radius + 0.1):
collide = True
break
if not collide:
all_my_ships_moves_to.append(ship_move)
all_my_ships_moves_from.append(my_ship)
my_ship_target_cnt[target_idx] += 1
if my_ship_target_cnt[target_idx] >= my_ship_max_target_cnt[target_idx]:
my_ship_target_available[target_idx] = False
else:
command = None
ship_move = None
elif action == 'opponent_ship':
if not opponent_ship_target_available[target_idx]:
continue
target = all_opponent_ships[target_idx]
suicide = False
ignore_ships = False
if not early_game_all_in == 1:
if my_ship.health <= SUICIDE_HEALTH_MULT * hlt.constants.WEAPON_DAMAGE * float(close_opponent_ship_cnt[ship_idx]) / float(close_ally_ship_cnt[ship_idx]) or \
(opponent_ships_status[target_idx] == hlt.entity.Ship.DockingStatus.DOCKED and closest_undocked_opponent_ship[ship_idx] < SUICIDE_UNDOCKED_OPPONENT_DIST):
suicide = True
ignore_ships = True
else:
if my_ship.health <= SUICIDE_HEALTH_MULT * hlt.constants.WEAPON_DAMAGE * float(close_opponent_ship_cnt[ship_idx]) / float(close_ally_ship_cnt[ship_idx]):
suicide = True
ignore_ships = True
step, ship_move, command = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=ANGULAR_STEP, ignore_ships=ignore_ships, ignore_planets=False, suicide=suicide)
if step != 0 and num_my_ships < DOUBLE_NAVIGATE_SHIP_CNT :
step2, ship_move2, command2 = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=-ANGULAR_STEP, ignore_ships=ignore_ships, ignore_planets=False, suicide=suicide)
if step2 < step:
ship_move = ship_move2
command = command2
if (ship_move is not None) and (command is not None):
collide = False
for j in range(len(all_my_ships_moves_from)):
end = hlt.entity.Position(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x),
ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))
end.radius = my_ship.radius
if custom_intersect_segment_circle(my_ship, end, all_my_ships_moves_from[j], fudge=my_ship.radius + 0.1):
collide = True
break
if not collide:
all_my_ships_moves_to.append(ship_move)
all_my_ships_moves_from.append(my_ship)
opponent_ship_target_cnt[target_idx] += 1
if opponent_ship_target_cnt[target_idx] >= opponent_ship_max_target_cnt[target_idx]:
opponent_ship_target_available[target_idx] = False
else:
command = None
ship_move = None
else:
assert False
if command is not None:
ship_used[ship_idx] = True
command_queue.append(command)
# logging.info('my_id ' + str(my_id))
# for i in range(len(all_planets)):
# planet = all_planets[i]
# logging.info(planet.owner)
# Send our set of commands to the Halite engine for this turn
game.send_command_queue(command_queue)
# TURN END
# GAME END
| [
"numpy.mean",
"numpy.sqrt",
"numpy.minimum",
"hlt.Game",
"math.radians",
"numpy.array",
"numpy.sum",
"hlt.entity.Position",
"numpy.min",
"numpy.argmin",
"numpy.maximum",
"time.time"
] | [((811, 830), 'hlt.Game', 'hlt.Game', (['"""MyBot16"""'], {}), "('MyBot16')\n", (819, 830), False, 'import hlt\n'), ((1017, 1046), 'numpy.sqrt', 'numpy.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (1027, 1046), False, 'import numpy\n'), ((5262, 5333), 'hlt.entity.Position', 'hlt.entity.Position', (['(ship.x + final_target_dx)', '(ship.y + final_target_dy)'], {}), '(ship.x + final_target_dx, ship.y + final_target_dy)\n', (5281, 5333), False, 'import hlt\n'), ((6424, 6516), 'numpy.maximum', 'numpy.maximum', (['(0.0)', '(PLANET_NEARBY_PLANET_BIAS - dist_matrix * PLANET_NEARBY_PLANET_SLOPE)'], {}), '(0.0, PLANET_NEARBY_PLANET_BIAS - dist_matrix *\n PLANET_NEARBY_PLANET_SLOPE)\n', (6437, 6516), False, 'import numpy\n'), ((7400, 7411), 'time.time', 'time.time', ([], {}), '()\n', (7409, 7411), False, 'import time\n'), ((8399, 8439), 'numpy.array', 'numpy.array', (['[v.x for v in all_my_ships]'], {}), '([v.x for v in all_my_ships])\n', (8410, 8439), False, 'import numpy\n'), ((8461, 8501), 'numpy.array', 'numpy.array', (['[v.y for v in all_my_ships]'], {}), '([v.y for v in all_my_ships])\n', (8472, 8501), False, 'import numpy\n'), ((8530, 8556), 'numpy.mean', 'numpy.mean', (['all_my_ships_x'], {}), '(all_my_ships_x)\n', (8540, 8556), False, 'import numpy\n'), ((8585, 8611), 'numpy.mean', 'numpy.mean', (['all_my_ships_y'], {}), '(all_my_ships_y)\n', (8595, 8611), False, 'import numpy\n'), ((8639, 8685), 'numpy.array', 'numpy.array', (['[v.x for v in all_opponent_ships]'], {}), '([v.x for v in all_opponent_ships])\n', (8650, 8685), False, 'import numpy\n'), ((8713, 8759), 'numpy.array', 'numpy.array', (['[v.y for v in all_opponent_ships]'], {}), '([v.y for v in all_opponent_ships])\n', (8724, 8759), False, 'import numpy\n'), ((8794, 8826), 'numpy.mean', 'numpy.mean', (['all_opponent_ships_x'], {}), '(all_opponent_ships_x)\n', (8804, 8826), False, 'import numpy\n'), ((8861, 8893), 'numpy.mean', 'numpy.mean', (['all_opponent_ships_y'], {}), '(all_opponent_ships_y)\n', (8871, 8893), False, 'import numpy\n'), ((8914, 8953), 'numpy.array', 'numpy.array', (['[v.x for v in all_planets]'], {}), '([v.x for v in all_planets])\n', (8925, 8953), False, 'import numpy\n'), ((8974, 9013), 'numpy.array', 'numpy.array', (['[v.y for v in all_planets]'], {}), '([v.y for v in all_planets])\n', (8985, 9013), False, 'import numpy\n'), ((9036, 9089), 'numpy.array', 'numpy.array', (['[v.docking_status for v in all_my_ships]'], {}), '([v.docking_status for v in all_my_ships])\n', (9047, 9089), False, 'import numpy\n'), ((9118, 9186), 'numpy.sum', 'numpy.sum', (['(my_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)'], {}), '(my_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)\n', (9127, 9186), False, 'import numpy\n'), ((9215, 9274), 'numpy.array', 'numpy.array', (['[v.docking_status for v in all_opponent_ships]'], {}), '([v.docking_status for v in all_opponent_ships])\n', (9226, 9274), False, 'import numpy\n'), ((9309, 9383), 'numpy.sum', 'numpy.sum', (['(opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)'], {}), '(opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)\n', (9318, 9383), False, 'import numpy\n'), ((9403, 9478), 'numpy.array', 'numpy.array', (['[(-1 if v.owner is None else v.owner.id) for v in all_planets]'], {}), '([(-1 if v.owner is None else v.owner.id) for v in all_planets])\n', (9414, 9478), False, 'import numpy\n'), ((10159, 10194), 'numpy.min', 'numpy.min', (['ship_dist_matrix'], {'axis': '(1)'}), '(ship_dist_matrix, axis=1)\n', (10168, 10194), False, 'import numpy\n'), ((10232, 10371), 'numpy.min', 'numpy.min', (['(ship_dist_matrix + 99999999.0 * (opponent_ships_status != hlt.entity.Ship.\n DockingStatus.UNDOCKED)[numpy.newaxis, :])'], {'axis': '(1)'}), '(ship_dist_matrix + 99999999.0 * (opponent_ships_status != hlt.\n entity.Ship.DockingStatus.UNDOCKED)[numpy.newaxis, :], axis=1)\n', (10241, 10371), False, 'import numpy\n'), ((10403, 10624), 'numpy.sum', 'numpy.sum', (['((ship_dist_matrix < MIN_OPPONENT_DIST_TO_DOCK) * ((my_ships_status == hlt.\n entity.Ship.DockingStatus.DOCKED) | (my_ships_status == hlt.entity.Ship\n .DockingStatus.DOCKING))[:, numpy.newaxis])'], {'axis': '(0)'}), '((ship_dist_matrix < MIN_OPPONENT_DIST_TO_DOCK) * ((\n my_ships_status == hlt.entity.Ship.DockingStatus.DOCKED) | (\n my_ships_status == hlt.entity.Ship.DockingStatus.DOCKING))[:, numpy.\n newaxis], axis=0)\n', (10412, 10624), False, 'import numpy\n'), ((10642, 10863), 'numpy.sum', 'numpy.sum', (['((ship_dist_matrix < MIN_OPPONENT_DIST_TO_DOCK) * ((my_ships_status == hlt.\n entity.Ship.DockingStatus.DOCKED) | (my_ships_status == hlt.entity.Ship\n .DockingStatus.DOCKING))[:, numpy.newaxis])'], {'axis': '(1)'}), '((ship_dist_matrix < MIN_OPPONENT_DIST_TO_DOCK) * ((\n my_ships_status == hlt.entity.Ship.DockingStatus.DOCKED) | (\n my_ships_status == hlt.entity.Ship.DockingStatus.DOCKING))[:, numpy.\n newaxis], axis=1)\n', (10651, 10863), False, 'import numpy\n'), ((10879, 11028), 'numpy.sum', 'numpy.sum', (['((ship_dist_matrix < CLOSE_OPPONENT_DIST) * (opponent_ships_status == hlt.\n entity.Ship.DockingStatus.UNDOCKED)[numpy.newaxis, :])'], {'axis': '(1)'}), '((ship_dist_matrix < CLOSE_OPPONENT_DIST) * (opponent_ships_status ==\n hlt.entity.Ship.DockingStatus.UNDOCKED)[numpy.newaxis, :], axis=1)\n', (10888, 11028), False, 'import numpy\n'), ((11050, 11106), 'numpy.sum', 'numpy.sum', (['(my_ship_dist_matrix < CLOSE_ALLY_DIST)'], {'axis': '(1)'}), '(my_ship_dist_matrix < CLOSE_ALLY_DIST, axis=1)\n', (11059, 11106), False, 'import numpy\n'), ((11697, 11752), 'numpy.array', 'numpy.array', (['[p.num_docking_spots for p in all_planets]'], {}), '([p.num_docking_spots for p in all_planets])\n', (11708, 11752), False, 'import numpy\n'), ((12231, 12304), 'numpy.minimum', 'numpy.minimum', (['MAX_MY_SHIP_TARGET_CNT', 'cnt_too_close_to_dock_closest_ally'], {}), '(MAX_MY_SHIP_TARGET_CNT, cnt_too_close_to_dock_closest_ally)\n', (12244, 12304), False, 'import numpy\n'), ((887, 902), 'math.radians', 'math.radians', (['x'], {}), '(x)\n', (899, 902), False, 'import math\n'), ((941, 956), 'math.radians', 'math.radians', (['x'], {}), '(x)\n', (953, 956), False, 'import math\n'), ((4701, 4768), 'hlt.entity.Position', 'hlt.entity.Position', (['(ship.x + new_target_dx)', '(ship.y + new_target_dy)'], {}), '(ship.x + new_target_dx, ship.y + new_target_dy)\n', (4720, 4768), False, 'import hlt\n'), ((6708, 6732), 'numpy.sum', 'numpy.sum', (['score'], {'axis': '(0)'}), '(score, axis=0)\n', (6717, 6732), False, 'import numpy\n'), ((9648, 9677), 'numpy.sqrt', 'numpy.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (9658, 9677), False, 'import numpy\n'), ((14803, 14814), 'time.time', 'time.time', ([], {}), '()\n', (14812, 14814), False, 'import time\n'), ((17560, 17571), 'time.time', 'time.time', ([], {}), '()\n', (17569, 17571), False, 'import time\n'), ((2036, 2077), 'hlt.entity.Position', 'hlt.entity.Position', (['closest_x', 'closest_y'], {}), '(closest_x, closest_y)\n', (2055, 2077), False, 'import hlt\n'), ((11372, 11558), 'numpy.argmin', 'numpy.argmin', (['(ship_dist_matrix[:, i] + 99999999.0 * ((my_ships_status == hlt.entity.Ship\n .DockingStatus.UNDOCKED) | (my_ships_status == hlt.entity.Ship.\n DockingStatus.UNDOCKING)))'], {}), '(ship_dist_matrix[:, i] + 99999999.0 * ((my_ships_status == hlt\n .entity.Ship.DockingStatus.UNDOCKED) | (my_ships_status == hlt.entity.\n Ship.DockingStatus.UNDOCKING)))\n', (11384, 11558), False, 'import numpy\n'), ((13158, 13193), 'numpy.argmin', 'numpy.argmin', (['planet_dist_matrix[0]'], {}), '(planet_dist_matrix[0])\n', (13170, 13193), False, 'import numpy\n'), ((14439, 14466), 'numpy.min', 'numpy.min', (['ship_dist_matrix'], {}), '(ship_dist_matrix)\n', (14448, 14466), False, 'import numpy\n'), ((14319, 14387), 'numpy.sum', 'numpy.sum', (['(my_ships_status != hlt.entity.Ship.DockingStatus.UNDOCKED)'], {}), '(my_ships_status != hlt.entity.Ship.DockingStatus.UNDOCKED)\n', (14328, 14387), False, 'import numpy\n'), ((19920, 20098), 'hlt.entity.Position', 'hlt.entity.Position', (['(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x))', '(ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))'], {}), '(ship_move.x - (all_my_ships_moves_to[j].x -\n all_my_ships_moves_from[j].x), ship_move.y - (all_my_ships_moves_to[j].\n y - all_my_ships_moves_from[j].y))\n', (19939, 20098), False, 'import hlt\n'), ((22334, 22512), 'hlt.entity.Position', 'hlt.entity.Position', (['(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x))', '(ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))'], {}), '(ship_move.x - (all_my_ships_moves_to[j].x -\n all_my_ships_moves_from[j].x), ship_move.y - (all_my_ships_moves_to[j].\n y - all_my_ships_moves_from[j].y))\n', (22353, 22512), False, 'import hlt\n'), ((25556, 25734), 'hlt.entity.Position', 'hlt.entity.Position', (['(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x))', '(ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))'], {}), '(ship_move.x - (all_my_ships_moves_to[j].x -\n all_my_ships_moves_from[j].x), ship_move.y - (all_my_ships_moves_to[j].\n y - all_my_ships_moves_from[j].y))\n', (25575, 25734), False, 'import hlt\n')] |
from sumo.constants import RUN_DEFAULTS
from sumo.modes.run.run import SumoRun
from sumo.utils import save_arrays_to_npz
import numpy as np
import os
import pytest
def _get_args(infile: str, k: list, outdir: str):
args = RUN_DEFAULTS.copy()
args['outdir'] = outdir
args['k'] = k
args["infile"] = infile
return args
def test_init(tmpdir):
# incorrect parameters
with pytest.raises(AttributeError):
SumoRun()
fname = os.path.join(tmpdir, "data.npz")
outdir = os.path.join(tmpdir, "outdir")
samples = 10
sample_labels = ['sample_{}'.format(i) for i in range(samples)]
args = _get_args(fname, [2], outdir)
# no input file
with pytest.raises(FileNotFoundError):
SumoRun(**args)
save_arrays_to_npz({'0': np.random.random((samples, samples)),
'1': np.random.random((samples, samples)),
'samples': np.array(sample_labels)}, fname)
# incorrect number of repetitions
args['n'] = -1
with pytest.raises(ValueError):
SumoRun(**args)
# incorrect number of threads
args = _get_args(fname, [2], outdir)
args['t'] = -1
with pytest.raises(ValueError):
SumoRun(**args)
# incorrect outdir
args = _get_args(fname, [2], fname)
with pytest.raises(NotADirectoryError):
SumoRun(**args)
# incorrect k
args = _get_args(fname, [2, 3, 4], outdir)
with pytest.raises(ValueError):
SumoRun(**args)
args = _get_args(fname, [2], outdir)
SumoRun(**args)
args = _get_args(fname, [2, 5], outdir)
SumoRun(**args)
# incorrect k range
args = _get_args(fname, [5, 2], outdir)
with pytest.raises(ValueError):
SumoRun(**args)
# incorrect subsample argument
args = _get_args(fname, [2], outdir)
args['subsample'] = -0.1
with pytest.raises(ValueError):
SumoRun(**args)
args['subsample'] = 0.9
with pytest.raises(ValueError):
SumoRun(**args)
def test_run(tmpdir):
fname = os.path.join(tmpdir, "data.npz")
outdir = os.path.join(tmpdir, "outdir")
samples = 10
a0 = np.random.random((samples, samples))
a0 = (a0 * a0.T) / 2
a1 = np.random.random((samples, samples))
a1 = (a1 * a1.T) / 2
sample_labels = ['sample_{}'.format(i) for i in range(samples)]
args = _get_args(fname, [2], outdir)
# no sample names
save_arrays_to_npz({'0': a0, '1': a1}, fname)
with pytest.raises(ValueError):
sr = SumoRun(**args)
sr.run()
# incorrect sample names
save_arrays_to_npz({'0': a0, '1': a1, 'samples': np.array(sample_labels[1:])}, fname)
with pytest.raises(ValueError):
sr = SumoRun(**args)
sr.run()
# incorrect adjacency matrices
save_arrays_to_npz({'samples': np.array(sample_labels)}, fname)
with pytest.raises(ValueError):
sr = SumoRun(**args)
sr.run()
# incorrect value of h_init
save_arrays_to_npz({'0': a0, '1': a1, 'samples': np.array(sample_labels)}, fname)
args = _get_args(fname, [2], outdir)
args['h_init'] = -1
with pytest.raises(ValueError):
sr = SumoRun(**args)
sr.run()
args['h_init'] = 3
with pytest.raises(ValueError):
sr = SumoRun(**args)
sr.run()
args = _get_args(fname, [2], outdir)
args['sparsity'] = [10]
args['n'] = 10 # makes test run quicker
sr = SumoRun(**args)
sr.run()
assert all([os.path.exists(os.path.join(outdir, x)) for x in ['k2', 'plots',
os.path.join('plots', 'consensus_k2.png'),
os.path.join('k2', 'sumo_results.npz')]])
| [
"sumo.constants.RUN_DEFAULTS.copy",
"numpy.random.random",
"os.path.join",
"sumo.modes.run.run.SumoRun",
"numpy.array",
"pytest.raises",
"sumo.utils.save_arrays_to_npz"
] | [((227, 246), 'sumo.constants.RUN_DEFAULTS.copy', 'RUN_DEFAULTS.copy', ([], {}), '()\n', (244, 246), False, 'from sumo.constants import RUN_DEFAULTS\n'), ((460, 492), 'os.path.join', 'os.path.join', (['tmpdir', '"""data.npz"""'], {}), "(tmpdir, 'data.npz')\n", (472, 492), False, 'import os\n'), ((506, 536), 'os.path.join', 'os.path.join', (['tmpdir', '"""outdir"""'], {}), "(tmpdir, 'outdir')\n", (518, 536), False, 'import os\n'), ((1531, 1546), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1538, 1546), False, 'from sumo.modes.run.run import SumoRun\n'), ((1596, 1611), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1603, 1611), False, 'from sumo.modes.run.run import SumoRun\n'), ((2031, 2063), 'os.path.join', 'os.path.join', (['tmpdir', '"""data.npz"""'], {}), "(tmpdir, 'data.npz')\n", (2043, 2063), False, 'import os\n'), ((2077, 2107), 'os.path.join', 'os.path.join', (['tmpdir', '"""outdir"""'], {}), "(tmpdir, 'outdir')\n", (2089, 2107), False, 'import os\n'), ((2134, 2170), 'numpy.random.random', 'np.random.random', (['(samples, samples)'], {}), '((samples, samples))\n', (2150, 2170), True, 'import numpy as np\n'), ((2205, 2241), 'numpy.random.random', 'np.random.random', (['(samples, samples)'], {}), '((samples, samples))\n', (2221, 2241), True, 'import numpy as np\n'), ((2404, 2449), 'sumo.utils.save_arrays_to_npz', 'save_arrays_to_npz', (["{'0': a0, '1': a1}", 'fname'], {}), "({'0': a0, '1': a1}, fname)\n", (2422, 2449), False, 'from sumo.utils import save_arrays_to_npz\n'), ((3417, 3432), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (3424, 3432), False, 'from sumo.modes.run.run import SumoRun\n'), ((398, 427), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (411, 427), False, 'import pytest\n'), ((437, 446), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '()\n', (444, 446), False, 'from sumo.modes.run.run import SumoRun\n'), ((693, 725), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (706, 725), False, 'import pytest\n'), ((735, 750), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (742, 750), False, 'from sumo.modes.run.run import SumoRun\n'), ((1021, 1046), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1034, 1046), False, 'import pytest\n'), ((1056, 1071), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1063, 1071), False, 'from sumo.modes.run.run import SumoRun\n'), ((1176, 1201), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1189, 1201), False, 'import pytest\n'), ((1211, 1226), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1218, 1226), False, 'from sumo.modes.run.run import SumoRun\n'), ((1300, 1333), 'pytest.raises', 'pytest.raises', (['NotADirectoryError'], {}), '(NotADirectoryError)\n', (1313, 1333), False, 'import pytest\n'), ((1343, 1358), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1350, 1358), False, 'from sumo.modes.run.run import SumoRun\n'), ((1434, 1459), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1447, 1459), False, 'import pytest\n'), ((1469, 1484), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1476, 1484), False, 'from sumo.modes.run.run import SumoRun\n'), ((1690, 1715), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1703, 1715), False, 'import pytest\n'), ((1725, 1740), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1732, 1740), False, 'from sumo.modes.run.run import SumoRun\n'), ((1856, 1881), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1869, 1881), False, 'import pytest\n'), ((1891, 1906), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1898, 1906), False, 'from sumo.modes.run.run import SumoRun\n'), ((1944, 1969), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1957, 1969), False, 'import pytest\n'), ((1979, 1994), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (1986, 1994), False, 'from sumo.modes.run.run import SumoRun\n'), ((2459, 2484), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2472, 2484), False, 'import pytest\n'), ((2499, 2514), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (2506, 2514), False, 'from sumo.modes.run.run import SumoRun\n'), ((2661, 2686), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2674, 2686), False, 'import pytest\n'), ((2701, 2716), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (2708, 2716), False, 'from sumo.modes.run.run import SumoRun\n'), ((2847, 2872), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2860, 2872), False, 'import pytest\n'), ((2887, 2902), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (2894, 2902), False, 'from sumo.modes.run.run import SumoRun\n'), ((3114, 3139), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3127, 3139), False, 'import pytest\n'), ((3154, 3169), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (3161, 3169), False, 'from sumo.modes.run.run import SumoRun\n'), ((3220, 3245), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3233, 3245), False, 'import pytest\n'), ((3260, 3275), 'sumo.modes.run.run.SumoRun', 'SumoRun', ([], {}), '(**args)\n', (3267, 3275), False, 'from sumo.modes.run.run import SumoRun\n'), ((781, 817), 'numpy.random.random', 'np.random.random', (['(samples, samples)'], {}), '((samples, samples))\n', (797, 817), True, 'import numpy as np\n'), ((848, 884), 'numpy.random.random', 'np.random.random', (['(samples, samples)'], {}), '((samples, samples))\n', (864, 884), True, 'import numpy as np\n'), ((921, 944), 'numpy.array', 'np.array', (['sample_labels'], {}), '(sample_labels)\n', (929, 944), True, 'import numpy as np\n'), ((2615, 2642), 'numpy.array', 'np.array', (['sample_labels[1:]'], {}), '(sample_labels[1:])\n', (2623, 2642), True, 'import numpy as np\n'), ((2805, 2828), 'numpy.array', 'np.array', (['sample_labels'], {}), '(sample_labels)\n', (2813, 2828), True, 'import numpy as np\n'), ((3006, 3029), 'numpy.array', 'np.array', (['sample_labels'], {}), '(sample_labels)\n', (3014, 3029), True, 'import numpy as np\n'), ((3478, 3501), 'os.path.join', 'os.path.join', (['outdir', 'x'], {}), '(outdir, x)\n', (3490, 3501), False, 'import os\n'), ((3594, 3635), 'os.path.join', 'os.path.join', (['"""plots"""', '"""consensus_k2.png"""'], {}), "('plots', 'consensus_k2.png')\n", (3606, 3635), False, 'import os\n'), ((3703, 3741), 'os.path.join', 'os.path.join', (['"""k2"""', '"""sumo_results.npz"""'], {}), "('k2', 'sumo_results.npz')\n", (3715, 3741), False, 'import os\n')] |
import argparse
import math
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
from torch.autograd import Variable
from torch.utils.data import Dataset
import math
from model import HierachyVAE
from read_data import *
from utils import *
parser = argparse.ArgumentParser(description='Hierachy VAE')
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch-size', type=int, default = 6)
parser.add_argument('--batch-size-u', type=int, default=32)
parser.add_argument('--val-iteration', type=int, default=120)
parser.add_argument('--n-highway-layers', type=int, default=0)
parser.add_argument('--encoder-layers', type=int, default=1)
parser.add_argument('--generator-layers', type=int, default=1)
parser.add_argument('--bidirectional', type=bool, default=False)
parser.add_argument('--embedding-size', type=int, default=128)
parser.add_argument('--encoder-hidden-size', type=int, default=128)
parser.add_argument('--generator-hidden-size', type=int, default=128)
parser.add_argument('--z-size', type=int, default=64)
parser.add_argument('--gpu', default='2,3', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--n-labeled-data', type=int, default=100,
help='Number of labeled data')
parser.add_argument('--n-unlabeled-data', type=int, default=-
1, help='Number of unlabeled data')
parser.add_argument('--data-path', type=str,
default='./borrow/', help='path to data folders')
parser.add_argument('--max-seq-num', type=int, default=6,
help='max sentence num in a message')
parser.add_argument('--max-seq-len', type=int, default=64,
help='max sentence length')
parser.add_argument('--word-dropout', type=float, default=0.8)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--rec-coef', type=float, default=1)
parser.add_argument('--predict-weight', type=float, default=1)
parser.add_argument('--class-weight', type=float, default=5)
parser.add_argument('--kld-weight-y', type=float, default=1)
parser.add_argument('--kld-weight-z', type=float, default=1)
parser.add_argument('--kld-y-thres', type=float, default=1.4)
parser.add_argument('--warm-up', default='False', type=str)
parser.add_argument('--hard', type=str, default='False')
parser.add_argument('--tau', type=float, default=1)
parser.add_argument('--tau-min', type=float, default=0.4)
parser.add_argument('--anneal-rate', type=float, default=0.01)
parser.add_argument('--tsa-type', type=str, default='exp')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_cuda = torch.cuda.is_available()
devices = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("gpu num: ", n_gpu)
if args.warm_up == 'False':
args.warm_up = False
else:
args.warm_up = True
if args.hard == 'False':
args.hard = False
else:
args.hard = True
best_acc = 0
total_steps = 0
def main():
global best_acc
train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset, vocab, n_labels, doc_labels = read_data(
data_path=args.data_path, n_labeled_data=args.n_labeled_data, n_unlabeled_data=args.n_unlabeled_data, max_seq_num=args.max_seq_num, max_seq_len=args.max_seq_len, embedding_size=args.embedding_size)
dist = train_labeled_dataset.esit_dist
labeled_trainloader = Data.DataLoader(
dataset=train_labeled_dataset, batch_size=args.batch_size, shuffle=True)
unlabeled_trainloader = Data.DataLoader(
dataset=train_unlabeled_dataset, batch_size=args.batch_size_u, shuffle=True)
val_loader = Data.DataLoader(
dataset=val_dataset, batch_size=16, shuffle=False)
test_loader = Data.DataLoader(
dataset=test_dataset, batch_size=16, shuffle=False)
model = HierachyVAE(vocab.vocab_size, args.embedding_size, args.n_highway_layers, args.encoder_hidden_size, args.encoder_layers, args.generator_hidden_size, args.generator_layers, args.z_size, n_labels, doc_labels, args.bidirectional, vocab.embed, args.hard).cuda()
model = nn.DataParallel(model)
train_criterion = HierachyVAELoss()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.AdamW(params = filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr)
test_accs = []
count = 20
for epoch in range(args.epochs):
if epoch % 10 == 0:
args.tau = np.maximum(args.tau * np.exp(-args.anneal_rate*epoch), args.tau_min)
train(labeled_trainloader, unlabeled_trainloader, vocab, optimizer, model, train_criterion, epoch, n_labels, dist)
_, train_acc, total, macro_f1 = validate(labeled_trainloader, model, criterion, epoch, n_labels, vocab)
print("epoch {}, train acc {}, train amount {}, micro_f1 {}".format(
epoch, train_acc, total, macro_f1))
val_loss, val_acc, total, macro_f1 = validate(val_loader, model, criterion, epoch, n_labels, vocab)
print("epoch {}, val acc {}, val_loss {}, micro_f1 {}".format(
epoch, val_acc, val_loss, macro_f1))
count = count -1
if val_acc >= best_acc:
count = 20
best_acc = val_acc
test_loss, test_acc, total, macro_f1 = validate(test_loader, model, criterion, epoch, n_labels, vocab)
test_accs.append((test_acc, macro_f1))
torch.save(model, args.data_path + 'model.pkl')
print("epoch {}, test acc {},test loss {}".format(
epoch, test_acc, test_loss))
print('Best acc:')
print(best_acc)
print('Test acc:')
print(test_accs)
if count < 0:
print("early stop")
break
print('Best acc:')
print(best_acc)
print('Test acc:')
print(test_accs)
def create_generator_inputs(x, vocab, train = True):
prob = []
for i in range(0, x.shape[0]):
temp = []
for j in range(0, x.shape[1]):
if x[i][j] != 3:
temp.append(x[i][j])
prob.append(temp)
prob = torch.tensor(prob)
if train == False:
return prob
r = np.random.rand(prob.shape[0], prob.shape[1])
for i in range(0, prob.shape[0]):
for j in range(1, prob.shape[1]):
if r[i, j] < args.word_dropout and prob[i, j] not in [vocab.word2id['<pad>'], vocab.word2id['<eos>']]:
prob[i, j] = vocab.word2id['<unk>']
return prob
def train(labeled_trainloader, unlabeled_trainloader, vocab, optimizer, model, criterion, epoch, n_labels, dist):
labeled_train_iter = iter(labeled_trainloader)
unlabeled_train_iter = iter(unlabeled_trainloader)
model.train()
tau = args.tau
for batch_idx in range(args.val_iteration):
try:
x, l, y, mask1, mask2, mask3, mask4, mid, sent_len, doc_len = labeled_train_iter.next()
except:
labeled_train_iter = iter(labeled_trainloader)
x, l, y, mask1, mask2, mask3, mask4, mid, sent_len, doc_len = labeled_train_iter.next()
try:
x_u, l_u, y_u, mask1_u, mask2_u, mask3_u, mask4_u, mid_u, sent_len_u, doc_len_u = unlabeled_train_iter.next()
except:
unlabeled_train_iter = iter(unlabeled_trainloader)
x_u, l_u, y_u, mask1_u, mask2_u, mask3_u, mask4_u, mid_u, sent_len_u, doc_len_u = unlabeled_train_iter.next()
x = torch.cat([x, x_u], dim = 0)
l = torch.cat([l, l_u], dim = 0)
y = torch.cat([y.long(), y_u.long()], dim = 0)
mask1 = torch.cat([mask1, mask1_u], dim = 0)
mask2 = torch.cat([mask2, mask2_u], dim = 0)
mask3 = torch.cat([mask3, mask3_u], dim = 0)
mask4 = torch.cat([mask4, mask4_u], dim = 0)
doc_len = torch.cat([doc_len, doc_len_u], dim = 0)
sent_len = torch.cat([sent_len, sent_len_u], dim = 0)
batch_size = l.shape[0]
seq_num = x.shape[1]
seq_len = x.shape[2]
temp = l.view(-1, 1).long()
l_one_hot = torch.zeros(batch_size*seq_num, n_labels).cuda()
for i in range(0, len(temp)):
if temp[i] != 10:
l_one_hot[i][temp[i]] = 1
l_one_hot = l_one_hot.view(batch_size, seq_num, n_labels)
if batch_idx % 30 == 1:
tau = np.maximum(tau * np.exp(-args.anneal_rate*batch_idx), args.tau_min)
xs, ys = (x.view(batch_size*seq_num, seq_len), l.view(batch_size*seq_num))
prob = create_generator_inputs(xs, vocab, train = True)
x, prob, l_one_hot, y, l = x.cuda(), prob.cuda(), l_one_hot.cuda(), y.cuda(), l.cuda()
mask1, mask2 = mask1.cuda(), mask2.cuda()
logits, kld_z, q_y, q_y_softmax, t, strategy_embedding = model(x, prob, args.tau, mask1, mask2, args.hard, l_one_hot, doc_len = doc_len, sent_len = sent_len)
mse_loss, likelihood, kld_z, log_prior, classification_loss, kld_y, kld_weight_y, kld_weight_z = criterion(logits, kld_z, q_y, q_y_softmax, t, mask1, mask2, mask3, mask4, x, l, y, l_one_hot, epoch + batch_idx/args.val_iteration, n_labels, dist, tsa_type = args.tsa_type)
if kld_y < args.kld_y_thres:
kld_weight_y = 0
else:
kld_weight_y = kld_weight_y
if classification_loss < 0.001:
class_weight = args.class_weight
else:
class_weight = args.class_weight
if args.warm_up:
predict_weight = linear_rampup(epoch+batch_idx/args.val_iteration) * args.predict_weight
else:
predict_weight = args.predict_weight
if args.warm_up:
rec_coef = linear_rampup(epoch+batch_idx/args.val_iteration) * args.rec_coef
else:
rec_coef = args.rec_coef
loss = predict_weight * mse_loss + rec_coef * likelihood + class_weight * classification_loss + kld_weight_y * (kld_y + log_prior) + kld_weight_z * kld_z
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx%100 == 0:
print("epoch {}, step {}, loss {}, mse loss {}, reconstruct {}, classification {}, kld y {}. kld z {}".format(epoch, batch_idx, loss, mse_loss, likelihood, classification_loss, kld_y, kld_z))
def validate(val_loader, model, criterion, epoch, n_labels, vocab):
model.eval()
with torch.no_grad():
loss_total = 0
total_sample = 0
acc_total = 0
correct = 0
predict_dict = {}
correct_dict = {}
correct_total = {}
for i in range(0, n_labels):
predict_dict[i] = 0
correct_dict[i] = 0
correct_total[i] = 0
p = 0
r = 0
for batch_idx, (x, l, y, mask1, mask2, mask3, mask4, mid, sent_len, doc_len) in enumerate(val_loader):
x, l = x.cuda(), l.cuda()
batch_size = x.shape[0]
seq_num = x.shape[1]
seq_len = x.shape[2]
x = x.view(batch_size * seq_num, seq_len)
l = l.view(batch_size * seq_num).long()
sent_len = sent_len.view(batch_size * seq_num)
logits, ___ = model.module.encode(x, sent_len = sent_len)
_, predicted = torch.max(logits.data, 1)
trainable_idx = torch.where(mask1.view(batch_size * seq_num) == 1)
if len(trainable_idx[0]) <= 0:
print("...")
print(mask1.view(batch_size * seq_num))
print(np.array(mask1.view(batch_size * seq_num)).sum())
continue
loss = criterion(logits[trainable_idx], l[trainable_idx])
correct += (np.array(predicted.cpu())[trainable_idx] == np.array(l.cpu())[trainable_idx]).sum()
input_size = np.array(mask1.view(batch_size * seq_num)).sum()
loss_total += loss.item() * input_size
total_sample += input_size
#print(x.shape, mask1.shape)
for i in range(0, len(trainable_idx[0])):
correct_total[np.array(l[trainable_idx].cpu())[i]] += 1
predict_dict[np.array(predicted[trainable_idx].cpu())[i]] += 1
if np.array(l[trainable_idx].cpu())[i] == np.array(predicted[trainable_idx].cpu())[i]:
correct_dict[np.array(l[trainable_idx].cpu())[i]] += 1
f1 = []
true_total_ = 0
predict_total_ = 0
correct_total_ = 0
for (u, v) in correct_dict.items():
if predict_dict[u] == 0:
temp = 0
else:
temp = v/predict_dict[u]
if correct_total[u] == 0:
temp2 = 0
else:
temp2 = v/correct_total[u]
if temp == 0 and temp2 == 0:
f1.append(0)
else:
f1.append((2*temp*temp2)/(temp+temp2))
true_total_ += correct_total[u]
predict_total_ += predict_dict[u]
correct_total_ += v
Marco_f1 = sum(f1)/(len(f1))
p = correct_total_ / predict_total_
r = correct_total_/ true_total_
Micro_f1 = (2*p*r)/(p+r)
print('true dist: ', correct_total)
print('predict dist: ', predict_dict)
print('correct pred: ', correct_dict)
print('Macro: ', Marco_f1)
print('Micro: ', Micro_f1)
acc_total = correct / total_sample
loss_total = loss_total / total_sample
return loss_total, Marco_f1, total_sample, Micro_f1
def linear_rampup(current, rampup_length=args.epochs):
if rampup_length == 0:
return 1.0
else:
current = np.clip(current / rampup_length, 0.0, 1.0)
return float(current)
def TSA(epoch, n_class, tsa_type = 'exp'):
epoch = math.floor(epoch)
if tsa_type == 'exp':
return np.exp((epoch/args.epochs - 1) * 5) * (1-1/n_class) + 1/n_class
elif tsa_type == 'linear':
return epoch/args.epochs * (1- 1/n_class) + 1/n_class
elif tsa_type == 'log':
return (1-np.exp(-epoch/args.epochs * 5)) * (1-1/n_class) + 1/n_class
else:
return 1
class HierachyVAELoss(object):
def __call__(self, logits, kld_z, q_y, q_y_softmax, t, mask1, mask2, mask3, mask4, x, l, y, l_one_hot, epoch, n_labels, dist, tsa_type = 'exp'):
mse_loss = F.cross_entropy(t, y.long())
batch_size = x.shape[0]
seq_num = x.shape[1]
seq_len = x.shape[2]
n_class = l_one_hot.shape[-1]
xs, ys, ys_one_hot = (x.view(batch_size*seq_num, seq_len), l.view(batch_size*seq_num), l_one_hot.view(batch_size*seq_num, n_class))
xs = xs[:, 1:xs.shape[1]]
trainable_idx = torch.where(mask4.view(batch_size*seq_num) == 1)
logits_ = logits[trainable_idx].view(-1, logits.shape[-1])
xs_ = xs[trainable_idx].contiguous().view(-1)
weight = torch.tensor([0.0] + [1.0]*(logits.shape[-1]-1)).cuda()
likelihood = F.cross_entropy(logits_, xs_, weight = weight)
kld_z = kld_z.mean()
trainable_idx = torch.where(mask1.view(batch_size * seq_num) == 1)
prior = standard_categorical(ys_one_hot)
log_prior = -torch.sum(ys_one_hot[trainable_idx] * torch.log(prior[trainable_idx] + 1e-8), dim = 1).mean()
thres = TSA(epoch, n_labels, tsa_type)
q_y_log_softmax = F.log_softmax(q_y, dim = 1)
if len(trainable_idx[0]) > 0:
count = 0
classification_loss = 0
for i in range(0,len(trainable_idx[0])):
try:
if q_y_softmax[trainable_idx[0][i]][ys[trainable_idx[0][i]].long()] < thres:
classification_loss += (-1 * q_y_log_softmax[trainable_idx[0][i]][ys[trainable_idx[0][i]].long()])
count += 1
except:
print(thres)
print(epoch)
print(q_y_softmax[trainable_idx[0][i]])
print(q_y_softmax[trainable_idx[0][i]][ys[trainable_idx[0][i]].long()])
exit()
if count > 0:
classification_loss = classification_loss / count
else:
classification_loss = 0
else:
classification_loss = 0
trainable_idx = torch.where(mask2.view(batch_size*seq_num) == 1)
g = Variable(torch.log(dist)).cuda()
log_qy = torch.log(q_y_softmax[trainable_idx] + 1e-8)
kld_y = torch.sum(q_y_softmax[trainable_idx]*(log_qy - g), dim = -1).mean()
return mse_loss, likelihood, kld_z, log_prior, classification_loss, kld_y, args.kld_weight_y * linear_rampup(epoch), args.kld_weight_z* linear_rampup(epoch)
if __name__ == '__main__':
main()
| [
"numpy.clip",
"torch.nn.CrossEntropyLoss",
"numpy.random.rand",
"math.floor",
"torch.max",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.sum",
"model.HierachyVAE",
"argparse.ArgumentParser",
"numpy.exp",
"torch.nn.functional.log_softmax",
"torch.save",
"torch.cat",
"torch.... | [((341, 392), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hierachy VAE"""'}), "(description='Hierachy VAE')\n", (364, 392), False, 'import argparse\n'), ((2749, 2774), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2772, 2774), False, 'import torch\n'), ((2854, 2879), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2877, 2879), False, 'import torch\n'), ((3551, 3643), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'train_labeled_dataset', 'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(dataset=train_labeled_dataset, batch_size=args.batch_size,\n shuffle=True)\n', (3566, 3643), True, 'import torch.utils.data as Data\n'), ((3677, 3774), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'train_unlabeled_dataset', 'batch_size': 'args.batch_size_u', 'shuffle': '(True)'}), '(dataset=train_unlabeled_dataset, batch_size=args.\n batch_size_u, shuffle=True)\n', (3692, 3774), True, 'import torch.utils.data as Data\n'), ((3796, 3862), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'val_dataset', 'batch_size': '(16)', 'shuffle': '(False)'}), '(dataset=val_dataset, batch_size=16, shuffle=False)\n', (3811, 3862), True, 'import torch.utils.data as Data\n'), ((3890, 3957), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': '(16)', 'shuffle': '(False)'}), '(dataset=test_dataset, batch_size=16, shuffle=False)\n', (3905, 3957), True, 'import torch.utils.data as Data\n'), ((4250, 4272), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (4265, 4272), True, 'import torch.nn as nn\n'), ((4330, 4351), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4349, 4351), True, 'import torch.nn as nn\n'), ((6320, 6338), 'torch.tensor', 'torch.tensor', (['prob'], {}), '(prob)\n', (6332, 6338), False, 'import torch\n'), ((6396, 6440), 'numpy.random.rand', 'np.random.rand', (['prob.shape[0]', 'prob.shape[1]'], {}), '(prob.shape[0], prob.shape[1])\n', (6410, 6440), True, 'import numpy as np\n'), ((14353, 14370), 'math.floor', 'math.floor', (['epoch'], {}), '(epoch)\n', (14363, 14370), False, 'import math\n'), ((2808, 2833), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2831, 2833), False, 'import torch\n'), ((7658, 7684), 'torch.cat', 'torch.cat', (['[x, x_u]'], {'dim': '(0)'}), '([x, x_u], dim=0)\n', (7667, 7684), False, 'import torch\n'), ((7699, 7725), 'torch.cat', 'torch.cat', (['[l, l_u]'], {'dim': '(0)'}), '([l, l_u], dim=0)\n', (7708, 7725), False, 'import torch\n'), ((7800, 7834), 'torch.cat', 'torch.cat', (['[mask1, mask1_u]'], {'dim': '(0)'}), '([mask1, mask1_u], dim=0)\n', (7809, 7834), False, 'import torch\n'), ((7853, 7887), 'torch.cat', 'torch.cat', (['[mask2, mask2_u]'], {'dim': '(0)'}), '([mask2, mask2_u], dim=0)\n', (7862, 7887), False, 'import torch\n'), ((7906, 7940), 'torch.cat', 'torch.cat', (['[mask3, mask3_u]'], {'dim': '(0)'}), '([mask3, mask3_u], dim=0)\n', (7915, 7940), False, 'import torch\n'), ((7959, 7993), 'torch.cat', 'torch.cat', (['[mask4, mask4_u]'], {'dim': '(0)'}), '([mask4, mask4_u], dim=0)\n', (7968, 7993), False, 'import torch\n'), ((8015, 8053), 'torch.cat', 'torch.cat', (['[doc_len, doc_len_u]'], {'dim': '(0)'}), '([doc_len, doc_len_u], dim=0)\n', (8024, 8053), False, 'import torch\n'), ((8075, 8115), 'torch.cat', 'torch.cat', (['[sent_len, sent_len_u]'], {'dim': '(0)'}), '([sent_len, sent_len_u], dim=0)\n', (8084, 8115), False, 'import torch\n'), ((10675, 10690), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10688, 10690), False, 'import torch\n'), ((14224, 14266), 'numpy.clip', 'np.clip', (['(current / rampup_length)', '(0.0)', '(1.0)'], {}), '(current / rampup_length, 0.0, 1.0)\n', (14231, 14266), True, 'import numpy as np\n'), ((15543, 15587), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_', 'xs_'], {'weight': 'weight'}), '(logits_, xs_, weight=weight)\n', (15558, 15587), True, 'import torch.nn.functional as F\n'), ((15943, 15968), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['q_y'], {'dim': '(1)'}), '(q_y, dim=1)\n', (15956, 15968), True, 'import torch.nn.functional as F\n'), ((17017, 17062), 'torch.log', 'torch.log', (['(q_y_softmax[trainable_idx] + 1e-08)'], {}), '(q_y_softmax[trainable_idx] + 1e-08)\n', (17026, 17062), False, 'import torch\n'), ((3980, 4243), 'model.HierachyVAE', 'HierachyVAE', (['vocab.vocab_size', 'args.embedding_size', 'args.n_highway_layers', 'args.encoder_hidden_size', 'args.encoder_layers', 'args.generator_hidden_size', 'args.generator_layers', 'args.z_size', 'n_labels', 'doc_labels', 'args.bidirectional', 'vocab.embed', 'args.hard'], {}), '(vocab.vocab_size, args.embedding_size, args.n_highway_layers,\n args.encoder_hidden_size, args.encoder_layers, args.\n generator_hidden_size, args.generator_layers, args.z_size, n_labels,\n doc_labels, args.bidirectional, vocab.embed, args.hard)\n', (3991, 4243), False, 'from model import HierachyVAE\n'), ((5593, 5640), 'torch.save', 'torch.save', (['model', "(args.data_path + 'model.pkl')"], {}), "(model, args.data_path + 'model.pkl')\n", (5603, 5640), False, 'import torch\n'), ((11618, 11643), 'torch.max', 'torch.max', (['logits.data', '(1)'], {}), '(logits.data, 1)\n', (11627, 11643), False, 'import torch\n'), ((8266, 8309), 'torch.zeros', 'torch.zeros', (['(batch_size * seq_num)', 'n_labels'], {}), '(batch_size * seq_num, n_labels)\n', (8277, 8309), False, 'import torch\n'), ((14412, 14449), 'numpy.exp', 'np.exp', (['((epoch / args.epochs - 1) * 5)'], {}), '((epoch / args.epochs - 1) * 5)\n', (14418, 14449), True, 'import numpy as np\n'), ((15466, 15518), 'torch.tensor', 'torch.tensor', (['([0.0] + [1.0] * (logits.shape[-1] - 1))'], {}), '([0.0] + [1.0] * (logits.shape[-1] - 1))\n', (15478, 15518), False, 'import torch\n'), ((17078, 17138), 'torch.sum', 'torch.sum', (['(q_y_softmax[trainable_idx] * (log_qy - g))'], {'dim': '(-1)'}), '(q_y_softmax[trainable_idx] * (log_qy - g), dim=-1)\n', (17087, 17138), False, 'import torch\n'), ((4629, 4662), 'numpy.exp', 'np.exp', (['(-args.anneal_rate * epoch)'], {}), '(-args.anneal_rate * epoch)\n', (4635, 4662), True, 'import numpy as np\n'), ((8585, 8622), 'numpy.exp', 'np.exp', (['(-args.anneal_rate * batch_idx)'], {}), '(-args.anneal_rate * batch_idx)\n', (8591, 8622), True, 'import numpy as np\n'), ((16967, 16982), 'torch.log', 'torch.log', (['dist'], {}), '(dist)\n', (16976, 16982), False, 'import torch\n'), ((14615, 14647), 'numpy.exp', 'np.exp', (['(-epoch / args.epochs * 5)'], {}), '(-epoch / args.epochs * 5)\n', (14621, 14647), True, 'import numpy as np\n'), ((15804, 15843), 'torch.log', 'torch.log', (['(prior[trainable_idx] + 1e-08)'], {}), '(prior[trainable_idx] + 1e-08)\n', (15813, 15843), False, 'import torch\n')] |
import argparse
import collections
import time
import numpy as np
import torch as th
import torch.nn.functional as F
import torch.nn.init as INIT
import torch.optim as optim
from torch.utils.data import DataLoader
from gene_dataset import Datagenerator
import torch
# from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import networkx as nx
import spacy
nlp = spacy.load('en')
import dgl
from wwl import *
from dgl.data.tree import SST, SSTBatch
from operator import itemgetter
from tree_lstm_feature import TreeLSTM
from convtree_lstm_feature import ConvLSTM
import heapq
import networkx
from convtree_lstm_feature import ConvLSTM
# torch.set_printoptions(threshold='nan')
import sklearn as sk
# from minepy import MINE
import math
def euclidean(v1,v2):
#如果两数据集数目不同,计算两者之间都对应有的数
sq=np.square(v1 - v2)
su=np.sum(sq)
d= np.sqrt(su)
return math.sqrt(d)
def getdistances(data):
data=np.array(data)
KNN = np.zeros([data.shape[0], data.shape[0]])
for i in range(data.shape[0]):
for j in range(data.shape[0]):
vec1 = data[i]
vec2 = data[j]
KNN[i][j]=euclidean(vec1,vec2)
return KNN
def mape(y_true, y_pred):
return th.mean(th.abs((y_pred- y_true) / y_true))
def batcher(device):
def batcher_dev(batch):
batch_trees = dgl.batch(batch)
return SSTBatch(graph=batch_trees,
mask=batch_trees.ndata['mask'].to(device),
wordid=batch_trees.ndata['x'].to(device),
label=batch_trees.ndata['y'].to(device))
return batcher_dev
def cos(x,y):
return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))
def main(args):
np.random.seed(args.seed)
th.manual_seed(args.seed)
th.cuda.manual_seed(args.seed)
best_epoch = -1
cuda = args.gpu >= 0
device = th.device('cuda:{}'.format(args.gpu)) if cuda else th.device('cpu')
if cuda:
th.cuda.set_device(args.gpu)
trainset = SST()
graphset,train_graphset,node_attrs,G,A,G0,g_wwl,rootid=Datagenerator()
model = TreeLSTM(trainset.num_vocabs,
args.x_size,
args.h_size,
trainset.num_classes,
args.dropout,
# cell_type='childsum' if args.child_sum else 'nary',
cell_type='childsum',
pretrained_emb = trainset.pretrained_emb).to(device)
params_ex_emb =[x for x in list(model.parameters()) if x.requires_grad and x.size(0)!=trainset.num_vocabs]
params_emb = list(model.embedding.parameters())
for p in params_ex_emb:
if p.dim() > 1:
INIT.xavier_uniform_(p)
optimizer = optim.Adam([
{'params':params_ex_emb, 'lr':args.lr, 'weight_decay':args.weight_decay},
{'params':params_emb, 'lr':0.1*args.lr}])
# optimizer = optim.Adam(model.parameters(), lr=0.01)
dur = []
#Reorganize the read dataframe into a list
label_duration = []
feature_name = []
feature_name_word = []
Roleinstance_name = []
ActivityStart=[]
NodeID=[]
RootActivityId=[]
ParentActivityId=[]
ActivityId=[]
labelclass=[]
Tid=[]
for k, v in node_attrs.items():
count = 0
vec = []
for k1, v1 in v.items():
# print("")
if len(v) == 2:
if count == 0:
label_duration.append(v1)
if count == 1:
doc = nlp(v1)
vec = doc.vector
feature_name.append(vec.tolist())
feature_name_word.append(v1)
vec = vec[0:25].tolist()
if count == 2:
ActivityStart.append(v1)
if count == 3:
NodeID.append(v1)
if count == 4:
RootActivityId.append(v1)
if count == 5:
ParentActivityId.append(v1)
if count == 6:
ActivityId.append(v1)
if count == 7:
Tid.append(v1)
count = count + 1
else:
if count == 1:
label_duration.append(v1)
if count == 2:
# print("2 v1", v1)
doc = nlp(v1)
vec1 = doc.vector
vec=vec1[0:20].tolist()
feature_name_word.append(v1)
if count == 3:
# print("3 v1",v1)
doc = nlp(v1)
vec1 = doc.vector
vec.extend(vec1[0:5].tolist())
# ActivityStart.append(v1)
if count == 4:
labelclass.append(int(v1))
if count == 6:
##cluster
doc = nlp(v1)
vec1 = doc.vector
Roleinstance_name.append(v1)
vec.extend(vec1[0:5].tolist())
if count == 7:
##cluster
doc = nlp(v1)
ActivityId.append(v1)
if count == 8:
labelclass.append(int(v1))
count = count + 1
feature_name.append(vec)
feature_name_np = np.array(feature_name)
kernel_matrix, node_representations = wwl(g_wwl, node_features=feature_name_np, num_iterations=1)
feature_name_np2 = np.column_stack((node_representations[0][0:feature_name_np.shape[0]], feature_name_np,))
feature_name_np_tensor = th.tensor(feature_name_np2, dtype=th.float32)
g = graphset[0]
n = g.number_of_nodes()
feature_name_np_tensor1 = feature_name_np_tensor
label_duration_tensor = th.tensor(label_duration, dtype=th.float32)
labelclass = th.tensor(labelclass, dtype=th.float32)
"""
train part
"""
label_duration_tensor1 = label_duration_tensor.type(th.FloatTensor)
label_duration_tensor1 = label_duration_tensor1.reshape(label_duration_tensor1.shape[0], 1)
feature_name_np_tensor_aggragte = np.zeros([feature_name_np.shape[0], 32])
feature_name_np_tensor_aggragte_2np = np.zeros([feature_name_np.shape[0], 50])
for i in range(feature_name_np.shape[1]-2):
path_all=networkx.shortest_path(G0,source=(i+1))
pathlist=list(path_all.values())[-1]
for k in range(len(pathlist)):
feature_name_np_tensor_aggragte[i]=feature_name_np_tensor1[pathlist[k]]+feature_name_np_tensor_aggragte[i]
feature_name_np_tensor_aggragte_2np[i][0:32] = feature_name_np_tensor1[i]
feature_name_np_tensor_aggragte_2np[i][32:50] = (feature_name_np_tensor_aggragte[i][0:18])
feature_name_np_tensor_aggragte_2 = torch.from_numpy(feature_name_np_tensor_aggragte_2np).type(torch.FloatTensor)
import pickle
picklefile1 = open("feature_name_np_tensor_aggragte_2np.pkl", "wb")
pickle.dump(feature_name_np_tensor_aggragte_2np, picklefile1)
picklefile1.close()
####################################################################
labelclass_session=labelclass[rootid]
# for epoch in range(1000):
# t_epoch = time.time()
# model.train()
#
# t0 = time.time() # tik
#
# h = th.zeros((feature_name_np_tensor1.shape[0], feature_name_np_tensor1.shape[1]))
# c = th.zeros((feature_name_np_tensor1.shape[0], feature_name_np_tensor1.shape[1]))
# # logits ,classlogits= model(g,G, h, c,feature_name_np_tensor1)
# logits, classlogits = model(g, G, h, c, feature_name_np_tensor_aggragte_2,rootid,epoch)
# logp=logits.type(th.FloatTensor)
#
#
# labelclass= labelclass_session.type(th.LongTensor)
# # logp=logp.reshape(k,1)
# labelclass = labelclass.reshape(len(rootid))
#
# loss = F.mse_loss(logp, labelclass, size_average=False)
#
# logp_class=F.log_softmax(classlogits, dim=1)
#
# logp_class=logp_class.type(th.FloatTensor)
#
# logp_class = logp_class.reshape([ len(rootid), 2])
#
# loss1 = F.nll_loss(logp_class, labelclass)
#
# labelclass =np.array(labelclass)
# labelclass=torch.from_numpy(labelclass).type(torch.LongTensor)
#
# optimizer.zero_grad()
# loss1.backward()
# optimizer.step()
# dur.append(time.time() - t0) # tok
# pred = logp_class.data.max(1, keepdim=True)[1]
# acc = pred.eq(labelclass.data.view_as(pred)).cpu().sum().item() / float(labelclass.size()[0])
#
# print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | Acc {:.4f} | Root Acc {:.4f} | Time(s) {:.4f}",
# epoch, loss1.item(),acc)
# file_handle1 = open(
# '1029_loss_sumVMOnCreate611_nodenumtrain_bin1.txt',
# mode='a')
# print(str(epoch), file=file_handle1)
# print(str(loss.item()), file=file_handle1)
# file_handle1.close()
#
# th.save(model.state_dict(), 'train.pkl'.format(args.seed))
###############################################################################################
"""
test part
"""
model.load_state_dict(th.load('train.pkl'.format(args.seed)))
accs = []
model.eval()
# label_duration_tensor_test = label_duration_tensor.type(th.FloatTensor)
label_duration_tensor_test = labelclass.type(th.FloatTensor)
feature_name_np_tensor_test = feature_name_np_tensor
feature_name_word_test = feature_name_word
for step in range(500):
g = graphset[0]
n = g.number_of_nodes()
with th.no_grad():
h = th.zeros((n, args.h_size)).to(device)
c = th.zeros((n, args.h_size)).to(device)
logits, classlogits = model(g, G, h, c, feature_name_np_tensor_aggragte_2,rootid,epoch)
# logp_class=classlogits
logp_class = F.log_softmax(classlogits, dim=1)
file_handle3 = open('logp_class.txt', mode='a')
logp_class.numpy()
import pickle
picklefile=open("logp_class_abnormal_normal.pkl","wb")
pickle.dump(logp_class,picklefile)
picklefile.close()
print("logp_class", logp_class.numpy().tolist(), file=file_handle3)
file_handle3.close()
logp_class = logp_class.type(th.FloatTensor)
logp = logits.type(th.FloatTensor)
# pred = logp_class.data.max(1, keepdim=True)[1]
import pandas as pd
logpnp=np.array(logp)
test_acc = 91
label_duration_tensor_test = th.tensor(label_duration_tensor_test, dtype=th.int)
label_duration_tensor_test = label_duration_tensor_test.reshape(len(rootid), 1)
"""
caculate mape
"""
loss_test = mape(logp, label_duration_tensor_test)
logp = logp.reshape([1, len(rootid)])
label_duration_tensor_test = label_duration_tensor_test.reshape([1, len(rootid)])
# label_duration_tensor_test = label_duration_tensor_test.reshape([1, 200])
print("label_duration_tensor_test", label_duration_tensor_test.shape)
print("logp", logp.shape)
# logp1.dtype='float32'
# print("logp", logp1.dtype)
label_duration_tensor_test1=np.array(label_duration_tensor_test,dtype=np.int32)
# label_duration_tensor_test.dtype='float32'
print("label_duration_tensor_test", label_duration_tensor_test.dtype)
label_duration_tensor_test1=label_duration_tensor_test1.tolist()[0]
print("label_duration_tensor_test1", len(label_duration_tensor_test1))
print("label_duration_tensor_test1",label_duration_tensor_test1)
distribution=torch.argmax(logp_class, dim=1)
print("distribution", distribution)
# logp1= distribution.reshape([4, 261])
logp1 = np.array(distribution, dtype=np.int32)
selector = SelectKBest(chi2, k=2)
input=[]
for i in range(len(feature_name_np_tensor_aggragte_2.numpy().tolist())):
input.append(list(map(abs, feature_name_np_tensor_aggragte_2.numpy().tolist()[i])))
X=feature_name_np_tensor_aggragte_2np
# print("X_new.scores_", selector.transform(X))
logp1 = logp1.tolist()
listlog = distribution.numpy().tolist()
label_duration_tensor_test1_np = np.array(label_duration_tensor_test1)
Abnormlist_np= np.where((distribution ==2) | ( distribution ==1) ,)
# K = cos(logp_class, logp_class.t())
K = getdistances(logp_class)
for i in range(Abnormlist_np[0].shape[0]):
causeroot=[]
similarity=[]
if i !=0:
path = networkx.shortest_path(G0, source=Abnormlist_np[0][i])
print("path",path)
list(path.values())
list_path = list(path.values())
print("list_path", list_path)
rootcausedep = []
for iii in range(len(list_path)):
for jjj in range(len(list_path[iii])):
if list_path[iii][jjj] not in rootcausedep and (list_path[iii][jjj]!=Abnormlist_np[0][i]):
rootcausedep.append(list_path[iii][jjj])
# similarity.append(K[Abnormlist_np[0][i]][list_path[iii][jjj]])
print("rootcausedep", rootcausedep)
# similarity
for j in range(len(rootcausedep)):
KJ=0
for jk in range(len(rootcausedep)):
if jk is not j:
KJ=K[rootcausedep[j]][rootcausedep[jk]]+KJ
KJ=KJ+K[rootcausedep[j]][Abnormlist_np[0][i]]
if KJ is not 0:
similarity.append(KJ)
print("similarity",similarity)
if len(similarity) >0 :
max_index = similarity.index(max(similarity, key=abs))
print("rootcausedep",rootcausedep, rootcausedep[max_index])
print("test 0", sum(distribution==0))
print("test 1", sum(distribution==1))
print("test 2", sum(distribution==2))
print("test 3", sum(distribution==3))
print("label 0", label_duration_tensor_test1.count(0))
print("label 1", label_duration_tensor_test1.count(1))
print("label 2", label_duration_tensor_test1.count(2))
print("label 3", label_duration_tensor_test1.count(3))
# logp1
print("logp1",len(logp1))
print("label_duration_tensor_test1", len(label_duration_tensor_test1))
f1score=sk.metrics.f1_score(logp1,label_duration_tensor_test1, average='micro')
print("f1score",f1score)
print("Epoch {:05d} | Test Acc {:.4f} | MAPE Loss {:.4f},f1score",
best_epoch, test_acc, loss_test, f1score)
# loss_test = mape(logp, label_duration_tensor_test[0:522])
abs_duration=abs(label_duration_tensor_test - logp)
# abs_duration = abs(label_duration_tensor_test[0:522] - logp)
abs_duration=abs_duration
id = th.where(abs_duration>0.05)
id1 = th.where(abs_duration > 0.1)
id11 = th.where(abs_duration >=1)
id4 = th.where(abs_duration > 0.4)
id44=np.array( id[0])
id44list=id44.tolist()
feature_name_wordkk=[]
ActivityStartkk=[]
ActivityIdkk = []
label_durationkk=[]
logpkk=[]
abs_duration = (abs_duration).numpy()
idk = heapq.nlargest(3000, range(len(abs_duration)), abs_duration.__getitem__)
idklist = idk
id44list = idklist
logpk=[]
print("len(idklist)",len(idklist))
print("len(feature_name_word_test)",len(feature_name_word_test))
for i in range(len(id44list)):
print("i",i)
feature_name_wordkk.append(feature_name_word_test[id44list[i]])
label_durationkk.append(label_duration[id44list[i]])
logpkk.append(abs_duration[id44list[i]])
logpk.append(logp[id44list[i]])
print("id0.05",id)
print("id0.05", len(id[0]))
print("id0.1", id1)
print("id0.1", len(id1[0]))
print("id0.01", id11)
print("id0.01", len(id11[0]))
print("id0.01", len(id11[0])/100)
print("AnomalyID>0.01", len(id44list))
"""
save result txt
"""
file_handle2 = open('1029sum_fristVMOnCreate611_nodenum_bin1.txt', mode='a')
from collections import Counter
import operator
# 进行统计
a = dict(Counter(feature_name_wordkk))
# 给得出的word进行排序
b = sorted(a.items(), key=operator.itemgetter(1), reverse=True)
for i in range(len(id44list)):
print("index", str(i), file=file_handle2)
print("indexcsv", str(id44list[i]), file=file_handle2)
print("activity name",str(feature_name_wordkk[i]), file=file_handle2)
# print("ActivityId",str(ActivityIdkk[i]), file=file_handle2)
print("label duration",str(label_durationkk[i]), file=file_handle2)
print("abs_duration",logpkk[i], file=file_handle2)
print("predict duration", logpk[i], file=file_handle2)
file_handle2.close()
file_handle3 = open('0127sumaccVMOnCreate_nodenum_bin1.txt', mode='a')
print("ActivityId", str(b), file=file_handle3)
file_handle3.close()
print('------------------------------------------------------------------------------------')
print("Epoch {:05d} | Test Acc {:.4f} | MAPE Loss {:.4f},f1score",
best_epoch, test_acc,loss_test,f1score)
file_handle4 = open(
'0127mean_mapeVMOnCreate611_nodenum_bin1.txt',
mode='a')
print("mape", file=file_handle4)
print(str(loss_test), file=file_handle4)
file_handle4.close()
file_handle1 = open(
'0127_loss_sumVMOnCreate611_nodenumtest.txt',
mode='a')
# print(str(epoch), file=file_handle1)
print(str(test_acc), file=file_handle1)
# print(str(loss.item()), file=file_handle1)
file_handle1.close()
# print(str(), file=file_handle1)
print("node_representations", node_representations)
print("rootid",rootid)
label_session=[]
for i in range(len(rootid)):
label_session.append(label_duration_tensor_test1[i])
print("sessionlabel", label_session)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--seed', type=int, default=41)
parser.add_argument('--batch-size', type=int, default=25)
parser.add_argument('--child-sum', action='store_true')
parser.add_argument('--x-size', type=int, default=35)
parser.add_argument('--h-size', type=int, default=35)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--log-every', type=int, default=5)
parser.add_argument('--lr', type=float, default=1)
parser.add_argument('--weight-decay', type=float, default=1e-4)
parser.add_argument('--dropout', type=float, default=0.5)
args = parser.parse_args()
# print(args)
main(args)
| [
"numpy.sqrt",
"math.sqrt",
"numpy.column_stack",
"dgl.data.tree.SST",
"torch.from_numpy",
"sklearn.feature_selection.SelectKBest",
"numpy.array",
"networkx.shortest_path",
"numpy.linalg.norm",
"operator.itemgetter",
"gene_dataset.Datagenerator",
"argparse.ArgumentParser",
"spacy.load",
"nu... | [((440, 456), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (450, 456), False, 'import spacy\n'), ((866, 884), 'numpy.square', 'np.square', (['(v1 - v2)'], {}), '(v1 - v2)\n', (875, 884), True, 'import numpy as np\n'), ((890, 900), 'numpy.sum', 'np.sum', (['sq'], {}), '(sq)\n', (896, 900), True, 'import numpy as np\n'), ((906, 917), 'numpy.sqrt', 'np.sqrt', (['su'], {}), '(su)\n', (913, 917), True, 'import numpy as np\n'), ((927, 939), 'math.sqrt', 'math.sqrt', (['d'], {}), '(d)\n', (936, 939), False, 'import math\n'), ((975, 989), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (983, 989), True, 'import numpy as np\n'), ((1000, 1040), 'numpy.zeros', 'np.zeros', (['[data.shape[0], data.shape[0]]'], {}), '([data.shape[0], data.shape[0]])\n', (1008, 1040), True, 'import numpy as np\n'), ((1753, 1778), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1767, 1778), True, 'import numpy as np\n'), ((1783, 1808), 'torch.manual_seed', 'th.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1797, 1808), True, 'import torch as th\n'), ((1813, 1843), 'torch.cuda.manual_seed', 'th.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1832, 1843), True, 'import torch as th\n'), ((2036, 2041), 'dgl.data.tree.SST', 'SST', ([], {}), '()\n', (2039, 2041), False, 'from dgl.data.tree import SST, SSTBatch\n'), ((2101, 2116), 'gene_dataset.Datagenerator', 'Datagenerator', ([], {}), '()\n', (2114, 2116), False, 'from gene_dataset import Datagenerator\n'), ((2767, 2906), 'torch.optim.Adam', 'optim.Adam', (["[{'params': params_ex_emb, 'lr': args.lr, 'weight_decay': args.weight_decay\n }, {'params': params_emb, 'lr': 0.1 * args.lr}]"], {}), "([{'params': params_ex_emb, 'lr': args.lr, 'weight_decay': args.\n weight_decay}, {'params': params_emb, 'lr': 0.1 * args.lr}])\n", (2777, 2906), True, 'import torch.optim as optim\n'), ((5404, 5426), 'numpy.array', 'np.array', (['feature_name'], {}), '(feature_name)\n', (5412, 5426), True, 'import numpy as np\n'), ((5554, 5645), 'numpy.column_stack', 'np.column_stack', (['(node_representations[0][0:feature_name_np.shape[0]], feature_name_np)'], {}), '((node_representations[0][0:feature_name_np.shape[0]],\n feature_name_np))\n', (5569, 5645), True, 'import numpy as np\n'), ((5672, 5717), 'torch.tensor', 'th.tensor', (['feature_name_np2'], {'dtype': 'th.float32'}), '(feature_name_np2, dtype=th.float32)\n', (5681, 5717), True, 'import torch as th\n'), ((5847, 5890), 'torch.tensor', 'th.tensor', (['label_duration'], {'dtype': 'th.float32'}), '(label_duration, dtype=th.float32)\n', (5856, 5890), True, 'import torch as th\n'), ((5908, 5947), 'torch.tensor', 'th.tensor', (['labelclass'], {'dtype': 'th.float32'}), '(labelclass, dtype=th.float32)\n', (5917, 5947), True, 'import torch as th\n'), ((6190, 6230), 'numpy.zeros', 'np.zeros', (['[feature_name_np.shape[0], 32]'], {}), '([feature_name_np.shape[0], 32])\n', (6198, 6230), True, 'import numpy as np\n'), ((6273, 6313), 'numpy.zeros', 'np.zeros', (['[feature_name_np.shape[0], 50]'], {}), '([feature_name_np.shape[0], 50])\n', (6281, 6313), True, 'import numpy as np\n'), ((7013, 7074), 'pickle.dump', 'pickle.dump', (['feature_name_np_tensor_aggragte_2np', 'picklefile1'], {}), '(feature_name_np_tensor_aggragte_2np, picklefile1)\n', (7024, 7074), False, 'import pickle\n'), ((10615, 10629), 'numpy.array', 'np.array', (['logp'], {}), '(logp)\n', (10623, 10629), True, 'import numpy as np\n'), ((10682, 10733), 'torch.tensor', 'th.tensor', (['label_duration_tensor_test'], {'dtype': 'th.int'}), '(label_duration_tensor_test, dtype=th.int)\n', (10691, 10733), True, 'import torch as th\n'), ((11327, 11379), 'numpy.array', 'np.array', (['label_duration_tensor_test'], {'dtype': 'np.int32'}), '(label_duration_tensor_test, dtype=np.int32)\n', (11335, 11379), True, 'import numpy as np\n'), ((11740, 11771), 'torch.argmax', 'torch.argmax', (['logp_class'], {'dim': '(1)'}), '(logp_class, dim=1)\n', (11752, 11771), False, 'import torch\n'), ((11870, 11908), 'numpy.array', 'np.array', (['distribution'], {'dtype': 'np.int32'}), '(distribution, dtype=np.int32)\n', (11878, 11908), True, 'import numpy as np\n'), ((11924, 11946), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['chi2'], {'k': '(2)'}), '(chi2, k=2)\n', (11935, 11946), False, 'from sklearn.feature_selection import SelectKBest\n'), ((12336, 12373), 'numpy.array', 'np.array', (['label_duration_tensor_test1'], {}), '(label_duration_tensor_test1)\n', (12344, 12373), True, 'import numpy as np\n'), ((12393, 12444), 'numpy.where', 'np.where', (['((distribution == 2) | (distribution == 1))'], {}), '((distribution == 2) | (distribution == 1))\n', (12401, 12444), True, 'import numpy as np\n'), ((14351, 14423), 'sklearn.metrics.f1_score', 'sk.metrics.f1_score', (['logp1', 'label_duration_tensor_test1'], {'average': '"""micro"""'}), "(logp1, label_duration_tensor_test1, average='micro')\n", (14370, 14423), True, 'import sklearn as sk\n'), ((14802, 14831), 'torch.where', 'th.where', (['(abs_duration > 0.05)'], {}), '(abs_duration > 0.05)\n', (14810, 14831), True, 'import torch as th\n'), ((14840, 14868), 'torch.where', 'th.where', (['(abs_duration > 0.1)'], {}), '(abs_duration > 0.1)\n', (14848, 14868), True, 'import torch as th\n'), ((14880, 14907), 'torch.where', 'th.where', (['(abs_duration >= 1)'], {}), '(abs_duration >= 1)\n', (14888, 14907), True, 'import torch as th\n'), ((14917, 14945), 'torch.where', 'th.where', (['(abs_duration > 0.4)'], {}), '(abs_duration > 0.4)\n', (14925, 14945), True, 'import torch as th\n'), ((14955, 14970), 'numpy.array', 'np.array', (['id[0]'], {}), '(id[0])\n', (14963, 14970), True, 'import numpy as np\n'), ((17906, 17931), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17929, 17931), False, 'import argparse\n'), ((1267, 1301), 'torch.abs', 'th.abs', (['((y_pred - y_true) / y_true)'], {}), '((y_pred - y_true) / y_true)\n', (1273, 1301), True, 'import torch as th\n'), ((1373, 1389), 'dgl.batch', 'dgl.batch', (['batch'], {}), '(batch)\n', (1382, 1389), False, 'import dgl\n'), ((1678, 1690), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (1684, 1690), True, 'import numpy as np\n'), ((1954, 1970), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (1963, 1970), True, 'import torch as th\n'), ((1992, 2020), 'torch.cuda.set_device', 'th.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (2010, 2020), True, 'import torch as th\n'), ((6380, 6420), 'networkx.shortest_path', 'networkx.shortest_path', (['G0'], {'source': '(i + 1)'}), '(G0, source=i + 1)\n', (6402, 6420), False, 'import networkx\n'), ((16133, 16161), 'collections.Counter', 'Counter', (['feature_name_wordkk'], {}), '(feature_name_wordkk)\n', (16140, 16161), False, 'from collections import Counter\n'), ((1694, 1711), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (1708, 1711), True, 'import numpy as np\n'), ((1714, 1731), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (1728, 1731), True, 'import numpy as np\n'), ((2130, 2293), 'tree_lstm_feature.TreeLSTM', 'TreeLSTM', (['trainset.num_vocabs', 'args.x_size', 'args.h_size', 'trainset.num_classes', 'args.dropout'], {'cell_type': '"""childsum"""', 'pretrained_emb': 'trainset.pretrained_emb'}), "(trainset.num_vocabs, args.x_size, args.h_size, trainset.\n num_classes, args.dropout, cell_type='childsum', pretrained_emb=\n trainset.pretrained_emb)\n", (2138, 2293), False, 'from tree_lstm_feature import TreeLSTM\n'), ((2727, 2750), 'torch.nn.init.xavier_uniform_', 'INIT.xavier_uniform_', (['p'], {}), '(p)\n', (2747, 2750), True, 'import torch.nn.init as INIT\n'), ((6841, 6894), 'torch.from_numpy', 'torch.from_numpy', (['feature_name_np_tensor_aggragte_2np'], {}), '(feature_name_np_tensor_aggragte_2np)\n', (6857, 6894), False, 'import torch\n'), ((9715, 9727), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (9725, 9727), True, 'import torch as th\n'), ((10001, 10034), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['classlogits'], {'dim': '(1)'}), '(classlogits, dim=1)\n', (10014, 10034), True, 'import torch.nn.functional as F\n'), ((10232, 10267), 'pickle.dump', 'pickle.dump', (['logp_class', 'picklefile'], {}), '(logp_class, picklefile)\n', (10243, 10267), False, 'import pickle\n'), ((12646, 12700), 'networkx.shortest_path', 'networkx.shortest_path', (['G0'], {'source': 'Abnormlist_np[0][i]'}), '(G0, source=Abnormlist_np[0][i])\n', (12668, 12700), False, 'import networkx\n'), ((16212, 16234), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (16231, 16234), False, 'import operator\n'), ((9745, 9771), 'torch.zeros', 'th.zeros', (['(n, args.h_size)'], {}), '((n, args.h_size))\n', (9753, 9771), True, 'import torch as th\n'), ((9799, 9825), 'torch.zeros', 'th.zeros', (['(n, args.h_size)'], {}), '((n, args.h_size))\n', (9807, 9825), True, 'import torch as th\n')] |
#! /usr/bin/env python
#
from __future__ import print_function
import time
import os
import numpy as np
import logging
import subprocess
# For some variations on this theme, e.g. time.time vs. time.clock, see
# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python
ostype = None
class DtimeSimple(object):
""" Class to help measuring the wall clock time between tagged events
Typical usage:
dt = Dtime('some_label')
...
dt.tag('a')
...
dt.tag('b')
dt.end()
"""
def __init__(self, label=".", report=True):
self.start = self.time()
self.init = self.start
self.label = label
self.report = report
self.dtimes = []
dt = self.init - self.init
if self.report:
#logging.info("Dtime: %s ADMIT " % (self.label + self.start)) #here took out a '
logging.info("Dtime: %s BEGIN " % self.label + str(dt))
def reset(self, report=True):
self.start = self.time()
self.report = report
self.dtimes = []
def tag(self, mytag):
t0 = self.start
t1 = self.time()
dt = t1 - t0
self.dtimes.append((mytag, dt))
self.start = t1
if self.report:
logging.info("Dtime: %s " % self.label + mytag + " " + str(dt))
return dt
def show(self):
if self.report:
for r in self.dtimes:
logging.info("Dtime: %s " % self.label + str(r[0]) + " " + str(r[1]))
return self.dtimes
def end(self):
t0 = self.init
t1 = self.time()
dt = t1 - t0
if self.report:
logging.info("Dtime: %s END " % self.label + str(dt))
return dt
def time(self):
""" pick the actual OS routine that returns some kind of timer
time.time : wall clock time (include I/O and multitasking overhead)
time.clock : cpu clock time
"""
return np.array([time.clock(), time.time()])
def get_mem(self):
""" Read memory usage info from /proc/pid/status
Return Virtual and Resident memory size in MBytes.
NOTE: not implemented here, see the ADMIT version if you need this.
"""
return np.array([]) # NA yet
class Dtime(object):
""" Class to help measuring the wall clock time between tagged events
Typical usage:
dt = Dtime()
...
dt.tag('a')
...
dt.tag('b')
"""
def __init__(self, label=".", report=True):
self.start = self.time()
self.init = self.start
self.label = label
self.report = report
self.dtimes = []
dt = self.init - self.init
if self.report:
# logging.timing("%s ADMIT " % self.label + str(self.start))
logging.info("%s BEGIN " % self.label + str(dt))
# logging.info("Dtime: %s BEGIN " % self.label + str(dt))
def reset(self, report=True):
self.start = self.time()
self.report = report
self.dtimes = []
def tag(self, mytag):
t0 = self.start
t1 = self.time()
dt = t1 - t0
# get memory usage (Virtual and Resident) info
mem = self.get_mem()
if mem.size != 0 :
dt = np.append(dt, mem)
self.dtimes.append((mytag, dt))
self.start = t1
if self.report:
logging.info("%s " % self.label + mytag + " " + str(dt))
return dt
def show(self):
if self.report:
for r in self.dtimes:
logging.info("%s " % self.label + str(r[0]) + " " + str(r[1]))
return self.dtimes
def end(self):
t0 = self.init
t1 = self.time()
dt = t1 - t0
if self.report:
logging.info("%s END " % self.label + str(dt))
return dt
def time(self):
""" pick the actual OS routine that returns some kind of timer
time.time : wall clock time (include I/O and multitasking overhead)
time.clock : cpu clock time
"""
return np.array([time.clock(), time.time()])
def get_mem(self):
""" Read memory usage info from /proc/pid/status
Return Virtual and Resident memory size in MBytes.
"""
global ostype
if ostype == None:
ostype = os.uname()[0].lower()
logging.info("OSTYPE: %s" % ostype)
scale = {'MB': 1024.0}
lines = []
try:
if ostype == 'linux':
proc_status = '/proc/%d/status' % os.getpid() # linux only
# open pseudo file /proc/<pid>/status
t = open(proc_status)
# get value from line e.g. 'VmRSS: 9999 kB\n'
for it in t.readlines():
if 'VmSize' in it or 'VmRSS' in it :
lines.append(it)
t.close()
else:
proc = subprocess.Popen(['ps','-o', 'rss', '-o', 'vsz', '-o','pid', '-p',str(os.getpid())],stdout=subprocess.PIPE)
proc_output = proc.communicate()[0].split('\n')
proc_output_memory = proc_output[1]
proc_output_memory = proc_output_memory.split()
phys_mem = int(proc_output_memory[0])/1204 # to MB
virtual_mem = int(proc_output_memory[1])/1024
except (IOError, OSError):
if self.report:
logging.info(self.label + " Error: cannot read memory usage information.")
return np.array([])
# parse the two lines
mem = {}
if(ostype != 'darwin'):
for line in lines:
words = line.strip().split()
#print words[0], '===', words[1], '===', words[2]
# get rid of the tailing ':'
key = words[0][:-1]
# convert from KB to MB
scaled = float(words[1]) / scale['MB']
mem[key] = scaled
else:
mem['VmSize'] = virtual_mem
mem['VmRSS'] = phys_mem
return np.array([mem['VmSize'], mem['VmRSS']])
#if __name__ == '__main__':
if False:
logging.basicConfig(level = logging.INFO)
dt = Dtime("testingDtime")
dt.tag('one')
# print("Hello Dtime World")
print("Hello Dtime World")
dt.tag('two')
dt.end()
| [
"logging.basicConfig",
"time.clock",
"numpy.append",
"numpy.array",
"os.getpid",
"time.time",
"logging.info",
"os.uname"
] | [((6345, 6384), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (6364, 6384), False, 'import logging\n'), ((2293, 2305), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2301, 2305), True, 'import numpy as np\n'), ((6260, 6299), 'numpy.array', 'np.array', (["[mem['VmSize'], mem['VmRSS']]"], {}), "([mem['VmSize'], mem['VmRSS']])\n", (6268, 6299), True, 'import numpy as np\n'), ((3356, 3374), 'numpy.append', 'np.append', (['dt', 'mem'], {}), '(dt, mem)\n', (3365, 3374), True, 'import numpy as np\n'), ((4475, 4510), 'logging.info', 'logging.info', (["('OSTYPE: %s' % ostype)"], {}), "('OSTYPE: %s' % ostype)\n", (4487, 4510), False, 'import logging\n'), ((2013, 2025), 'time.clock', 'time.clock', ([], {}), '()\n', (2023, 2025), False, 'import time\n'), ((2027, 2038), 'time.time', 'time.time', ([], {}), '()\n', (2036, 2038), False, 'import time\n'), ((4178, 4190), 'time.clock', 'time.clock', ([], {}), '()\n', (4188, 4190), False, 'import time\n'), ((4192, 4203), 'time.time', 'time.time', ([], {}), '()\n', (4201, 4203), False, 'import time\n'), ((5696, 5708), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5704, 5708), True, 'import numpy as np\n'), ((4671, 4682), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4680, 4682), False, 'import os\n'), ((5601, 5675), 'logging.info', 'logging.info', (["(self.label + ' Error: cannot read memory usage information.')"], {}), "(self.label + ' Error: cannot read memory usage information.')\n", (5613, 5675), False, 'import logging\n'), ((4441, 4451), 'os.uname', 'os.uname', ([], {}), '()\n', (4449, 4451), False, 'import os\n'), ((5138, 5149), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5147, 5149), False, 'import os\n')] |
import os
from os import path
from glob import glob
import json
import re
import pickle
import argparse
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from textwrap import wrap
from scipy import ndimage, misc
import config
from type import RecipeContainer, DataContainer
from utils import URL_to_filename
def get_train_val_test_keys(keys, val_pct=.1, test_pct=.1):
"""
Split a list of keys into three groups: train, validation, and test
"""
n = keys.shape[0]
np.random.shuffle(keys)
test_cutoff = 1 - test_pct
val_cutoff = test_cutoff - val_pct
return np.split(keys, [int(val_cutoff * n), int(test_cutoff * n)])
def files_to_containers(files, recipes, image_list):
"""Store recipe components as a data container
"""
images = np.array([image_list[f] for f in files])
titles = np.array([recipes[f]['title'] for f in files])
ingredients = np.array([recipes[f]['ingredients'] for f in files])
directions = np.array([recipes[f]['instructions'] for f in files])
return RecipeContainer(files, titles, ingredients, directions, images)
def get_plt_grid(df, labels, subplot_shape=(4, 6), fig_size=(12, 8)):
"""Return a matplotlib grid of randomly selected images
from dataframe df of shape subplot_shape
"""
fig, axes = plt.subplots(*subplot_shape)
for ax in axes.ravel():
rand_index = np.random.randint(0, df.shape[0])
img = df[rand_index]
label = labels[rand_index]
ax.imshow(img)
ax.set_title(
'\n'.join(wrap(label, 25)), y=.95, va='top', size=8,
bbox=dict(facecolor='white', pad=.1, alpha=0.6, edgecolor='none'))
ax.axis('off')
fig.tight_layout()
fig.set_size_inches(*fig_size)
fig.subplots_adjust(wspace=.0, hspace=.0)
return fig
def load_recipe(filename):
"""Load a single recipe collection from disk
"""
with open(filename, 'r') as f:
recipes = json.load(f)
print('Loaded {:,} recipes from {}'.format(len(recipes), filename))
return recipes
def clean_recipe_keys(recipes):
"""Clean recipe keys by stripping URLs of special characters
"""
recipes_clean = {}
for key, value in recipes.items():
recipes_clean[URL_to_filename(key)] = value
return recipes_clean
def load_recipes():
"""Load all recipe collections from disk and combine into single dataset
"""
recipes = {}
for filename in glob(path.join(config.path_recipe_box_data, 'recipes_raw*.json')):
recipes.update(load_recipe(filename))
print('Loaded {:,} recipes in total'.format(len(recipes)))
return clean_recipe_keys(recipes)
def load_images(img_dims):
"""Load all images into a dictionary with
key: filename
value: numpy array image
"""
image_list = {}
for root, dirnames, filenames in os.walk(config.path_img):
for filename in filenames:
if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
filepath = os.path.join(root, filename)
try:
image = ndimage.imread(filepath, mode="RGB")
except OSError:
print('Could not load image {}'.format(filepath))
image_resized = misc.imresize(image, img_dims)
if np.random.random() > 0.5:
# Flip horizontally with probability 50%
image_resized = np.fliplr(image_resized)
image_list[filename.split('.')[0]] = image_resized
print('Loaded {:,} images from disk'.format(len(image_list)))
return image_list
def _get_shape_str(shape):
"""Convert image shape to string for filename description
"""
return '{}_{}'.format(*shape[:2])
def _get_npy_filename(shape):
"""Return absolute path for npy image file
"""
shape_str = _get_shape_str(shape)
return path.join(
config.path_recipe_box_data, 'images_processed_{}.npy'.format(shape_str))
def _get_filename_filename(shape):
"""Return absolute path for filename storage
"""
shape_str = _get_shape_str(shape)
return path.join(
config.path_recipe_box_data, 'images_processed_filenames_{}.pk'.format(shape_str))
def save_images(image_list):
"""Save images and associated keys to disk
"""
filenames, images = list(image_list.keys()), np.array(list(image_list.values()))
shape = images.shape[1:]
np.save(_get_npy_filename(shape), images)
with open(_get_filename_filename(shape), 'wb') as f:
pickle.dump(filenames, f)
print('Saved {:,} images to disk'.format(images.shape[0]))
def load_images_disk(shape):
"""Load preprocessed images and associated keys from disk
"""
images = np.load(_get_npy_filename(shape))
with open(_get_filename_filename(shape), 'rb') as f:
filenames = pickle.load(f)
print('Loaded {:,} preprocessed images from disk'.format(images.shape[0]))
return {f: i for f, i in zip(filenames, images)}
def smart_load_images(img_dims):
"""Load preprocessed images and associated keys from disk if available;
otherwise, load raw images from disk, process, then save to disk
"""
path_load = _get_npy_filename(img_dims)
if path.exists(path_load):
return(load_images_disk(img_dims))
else:
images = load_images(img_dims)
save_images(images)
return(images)
def plot_grids_by_segment(data):
"""Plot image sample
"""
get_plt_grid(data.train.images, data.train.titles).savefig(
path.join(config.path_outputs, 'sample-train-imgs.png'))
get_plt_grid(data.validation.images, data.validation.titles).savefig(
path.join(config.path_outputs, 'sample-validation-imgs.png'))
get_plt_grid(data.test.images, data.test.titles).savefig(
path.join(config.path_outputs, 'sample-test-imgs.png'))
def get_complete_recipes(recipes, image_list):
"""Return intersection of recipe keys and image keys
"""
recipe_keys = [URL_to_filename(k) for k in recipes.keys()]
files = np.array([filename for filename in image_list.keys()
if filename in recipe_keys])
print('{:,} complete recipes found'.format(len(files)))
return files
def save_data_container(data, filename_pickle):
"""Save data container to disk in multiple pieces
to keep under 2GB limit
"""
with open(filename_pickle + '_train.pk', 'wb') as f:
pickle.dump(data.train, f)
with open(filename_pickle + '_validation.pk', 'wb') as f:
pickle.dump(data.validation, f)
with open(filename_pickle + '_test.pk', 'wb') as f:
pickle.dump(data.test, f)
print('Data container saved to {}_*.pk'.format(filename_pickle))
def load_recipe_container(filename_pickle):
"""Load data containers from disk and create super container
"""
with open(filename_pickle + '_train.pk', 'rb') as f:
train = pickle.load(f)
with open(filename_pickle + '_validation.pk', 'rb') as f:
validation = pickle.load(f)
with open(filename_pickle + '_test.pk', 'rb') as f:
test = pickle.load(f)
return DataContainer(train, validation, test)
def save_recipes(filename_pickle, batch_size):
"""Load preprocessed data container from disk if available;
otherwise, create container and then save to disk
"""
# Load recipes and images
recipes = load_recipes()
image_list = smart_load_images(batch_size)
# Get train, validation, test split
files = get_complete_recipes(recipes, image_list)
train_files, validation_files, test_files = get_train_val_test_keys(files)
print('Data split into segments of size {:,} (train), {:,} (validation), and {:,} (test)'.format(
train_files.shape[0], validation_files.shape[0], test_files.shape[0]))
# Save data in container
data = DataContainer(
files_to_containers(train_files, recipes, image_list),
files_to_containers(validation_files, recipes, image_list),
files_to_containers(test_files, recipes, image_list),
)
save_data_container(data, filename_pickle)
# Plot image sample
plot_grids_by_segment(data)
return data
def pickled_data_container_exists(filename_pickle):
"""Check whether pickled data container exists at expected path
"""
if not path.exists(filename_pickle + '_train.pk'):
return False
elif not path.exists(filename_pickle + '_validation.pk'):
return False
elif not path.exists(filename_pickle + '_test.pk'):
return False
else:
return True
def main(img_size=64):
"""Return a single data container containing all recipe components, split
into train, validation, and test sets
"""
filename_pickle = path.join(config.path_data, 'data_processed')
if not pickled_data_container_exists(filename_pickle):
data = save_recipes(filename_pickle, (img_size, img_size))
else:
print('Loading pickled data; rm {} to refresh'.format(filename_pickle))
data = load_recipe_container(filename_pickle)
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
opt = parser.parse_args()
main(opt.batch_size)
| [
"scipy.ndimage.imread",
"numpy.array",
"textwrap.wrap",
"scipy.misc.imresize",
"os.walk",
"re.search",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.random.random",
"type.DataContainer",
"matplotlib.use",
"numpy.fliplr",
"pickle.load",
"pickle.dump",
"os.path.join",
"utils.URL_to... | [((149, 163), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (156, 163), True, 'import matplotlib as mpl\n'), ((527, 550), 'numpy.random.shuffle', 'np.random.shuffle', (['keys'], {}), '(keys)\n', (544, 550), True, 'import numpy as np\n'), ((819, 859), 'numpy.array', 'np.array', (['[image_list[f] for f in files]'], {}), '([image_list[f] for f in files])\n', (827, 859), True, 'import numpy as np\n'), ((873, 919), 'numpy.array', 'np.array', (["[recipes[f]['title'] for f in files]"], {}), "([recipes[f]['title'] for f in files])\n", (881, 919), True, 'import numpy as np\n'), ((938, 990), 'numpy.array', 'np.array', (["[recipes[f]['ingredients'] for f in files]"], {}), "([recipes[f]['ingredients'] for f in files])\n", (946, 990), True, 'import numpy as np\n'), ((1008, 1061), 'numpy.array', 'np.array', (["[recipes[f]['instructions'] for f in files]"], {}), "([recipes[f]['instructions'] for f in files])\n", (1016, 1061), True, 'import numpy as np\n'), ((1073, 1136), 'type.RecipeContainer', 'RecipeContainer', (['files', 'titles', 'ingredients', 'directions', 'images'], {}), '(files, titles, ingredients, directions, images)\n', (1088, 1136), False, 'from type import RecipeContainer, DataContainer\n'), ((1338, 1366), 'matplotlib.pyplot.subplots', 'plt.subplots', (['*subplot_shape'], {}), '(*subplot_shape)\n', (1350, 1366), True, 'import matplotlib.pyplot as plt\n'), ((2887, 2911), 'os.walk', 'os.walk', (['config.path_img'], {}), '(config.path_img)\n', (2894, 2911), False, 'import os\n'), ((5264, 5286), 'os.path.exists', 'path.exists', (['path_load'], {}), '(path_load)\n', (5275, 5286), False, 'from os import path\n'), ((7162, 7200), 'type.DataContainer', 'DataContainer', (['train', 'validation', 'test'], {}), '(train, validation, test)\n', (7175, 7200), False, 'from type import RecipeContainer, DataContainer\n'), ((8786, 8831), 'os.path.join', 'path.join', (['config.path_data', '"""data_processed"""'], {}), "(config.path_data, 'data_processed')\n", (8795, 8831), False, 'from os import path\n'), ((9160, 9185), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9183, 9185), False, 'import argparse\n'), ((1416, 1449), 'numpy.random.randint', 'np.random.randint', (['(0)', 'df.shape[0]'], {}), '(0, df.shape[0])\n', (1433, 1449), True, 'import numpy as np\n'), ((1984, 1996), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1993, 1996), False, 'import json\n'), ((2483, 2542), 'os.path.join', 'path.join', (['config.path_recipe_box_data', '"""recipes_raw*.json"""'], {}), "(config.path_recipe_box_data, 'recipes_raw*.json')\n", (2492, 2542), False, 'from os import path\n'), ((4564, 4589), 'pickle.dump', 'pickle.dump', (['filenames', 'f'], {}), '(filenames, f)\n', (4575, 4589), False, 'import pickle\n'), ((4878, 4892), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4889, 4892), False, 'import pickle\n'), ((5571, 5626), 'os.path.join', 'path.join', (['config.path_outputs', '"""sample-train-imgs.png"""'], {}), "(config.path_outputs, 'sample-train-imgs.png')\n", (5580, 5626), False, 'from os import path\n'), ((5710, 5770), 'os.path.join', 'path.join', (['config.path_outputs', '"""sample-validation-imgs.png"""'], {}), "(config.path_outputs, 'sample-validation-imgs.png')\n", (5719, 5770), False, 'from os import path\n'), ((5842, 5896), 'os.path.join', 'path.join', (['config.path_outputs', '"""sample-test-imgs.png"""'], {}), "(config.path_outputs, 'sample-test-imgs.png')\n", (5851, 5896), False, 'from os import path\n'), ((6030, 6048), 'utils.URL_to_filename', 'URL_to_filename', (['k'], {}), '(k)\n', (6045, 6048), False, 'from utils import URL_to_filename\n'), ((6472, 6498), 'pickle.dump', 'pickle.dump', (['data.train', 'f'], {}), '(data.train, f)\n', (6483, 6498), False, 'import pickle\n'), ((6569, 6600), 'pickle.dump', 'pickle.dump', (['data.validation', 'f'], {}), '(data.validation, f)\n', (6580, 6600), False, 'import pickle\n'), ((6665, 6690), 'pickle.dump', 'pickle.dump', (['data.test', 'f'], {}), '(data.test, f)\n', (6676, 6690), False, 'import pickle\n'), ((6952, 6966), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6963, 6966), False, 'import pickle\n'), ((7050, 7064), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7061, 7064), False, 'import pickle\n'), ((7136, 7150), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7147, 7150), False, 'import pickle\n'), ((8355, 8397), 'os.path.exists', 'path.exists', (["(filename_pickle + '_train.pk')"], {}), "(filename_pickle + '_train.pk')\n", (8366, 8397), False, 'from os import path\n'), ((2279, 2299), 'utils.URL_to_filename', 'URL_to_filename', (['key'], {}), '(key)\n', (2294, 2299), False, 'from utils import URL_to_filename\n'), ((2963, 3013), 're.search', 're.search', (['"""\\\\.(jpg|jpeg|png|bmp|tiff)$"""', 'filename'], {}), "('\\\\.(jpg|jpeg|png|bmp|tiff)$', filename)\n", (2972, 3013), False, 'import re\n'), ((8433, 8480), 'os.path.exists', 'path.exists', (["(filename_pickle + '_validation.pk')"], {}), "(filename_pickle + '_validation.pk')\n", (8444, 8480), False, 'from os import path\n'), ((1581, 1596), 'textwrap.wrap', 'wrap', (['label', '(25)'], {}), '(label, 25)\n', (1585, 1596), False, 'from textwrap import wrap\n'), ((3041, 3069), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (3053, 3069), False, 'import os\n'), ((3290, 3320), 'scipy.misc.imresize', 'misc.imresize', (['image', 'img_dims'], {}), '(image, img_dims)\n', (3303, 3320), False, 'from scipy import ndimage, misc\n'), ((8516, 8557), 'os.path.exists', 'path.exists', (["(filename_pickle + '_test.pk')"], {}), "(filename_pickle + '_test.pk')\n", (8527, 8557), False, 'from os import path\n'), ((3119, 3155), 'scipy.ndimage.imread', 'ndimage.imread', (['filepath'], {'mode': '"""RGB"""'}), "(filepath, mode='RGB')\n", (3133, 3155), False, 'from scipy import ndimage, misc\n'), ((3340, 3358), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3356, 3358), True, 'import numpy as np\n'), ((3463, 3487), 'numpy.fliplr', 'np.fliplr', (['image_resized'], {}), '(image_resized)\n', (3472, 3487), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""examples/tests for peakfit"""
# Fix Python 2.x
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os, sys
import numpy as np
try:
from PyMca5.PyMcaIO import specfilewrapper as specfile
except Exception:
try:
from PyMca import specfilewrapper as specfile
except Exception:
from PyMca import specfile
from sloth.fit.peakfit import fit_splitpvoigt, fit_results
_curDir = os.path.dirname(os.path.realpath(__file__))
def test_mock():
# create mock data
import numpy as np
try:
from PyMca5.PyMcaMath.fitting import SpecfitFuns
except Exception:
from PyMca import SpecfitFuns
x = np.linspace(0, 50, 200)
noise = np.random.normal(size=len(x), scale=10)
y = 80.0 - x * 0.25 + noise
y = y + 89 * SpecfitFuns.splitpvoigt([12.5, 30.75, 12.0, 5.0, 0.5], x)
fit, pw = fit_splitpvoigt(x, y, plot=True, show_res=True)
return x, y, fit, pw
def test_diffpat(fname=None):
# tests on 'diff_pat.dat'
if fname is None:
fname = os.path.join(_curDir, "peakfit_tests_diffpat.dat")
try:
sf = specfile.Specfile(fname)
except Exception:
print("{0} not found".format(fname))
return
sd = sf.select("1")
x = sd.datacol(1)
y = sd.datacol(7)
sf = 0 # close file
fit, pw = fit_splitpvoigt(x, y, plot=True, show_res=True)
return x, y, fit, pw
def test_real(scanno, fname=None, noreturn=False):
# tests on real data
if fname is None:
fname = os.path.join(_curDir, "peakfit_tests_real.dat")
try:
sf = specfile.Specfile(fname)
except Exception:
print("{0} not found".format(fname))
return
sd = sf.select(str(scanno))
x = sd.datacol(1) * 1000 # eV
csig = "apd"
cmon = "I02"
csec = "Seconds"
y = (
sd.datacol(csig)
/ sd.datacol(cmon)
* np.mean(sd.datacol(cmon))
/ sd.datacol(csec)
) # cps
fit, pw = fit_splitpvoigt(x, y, dy=True, bkg="Constant", plot=True, show_res=True)
if noreturn:
input("Press Enter to return (kills plot window)...")
return
else:
return x, y, fit, pw
if __name__ == "__main__":
pass
# uncomment at your convenience!
# x, y, fit, pw = test_mock()
# x, y, fit, pw = test_diffpat()
# x, y, fit, pw = test_real(45)
| [
"os.path.join",
"sloth.fit.peakfit.fit_splitpvoigt",
"os.path.realpath",
"PyMca.specfile.Specfile",
"numpy.linspace",
"PyMca.SpecfitFuns.splitpvoigt"
] | [((515, 541), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (531, 541), False, 'import os, sys\n'), ((743, 766), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(200)'], {}), '(0, 50, 200)\n', (754, 766), True, 'import numpy as np\n'), ((940, 987), 'sloth.fit.peakfit.fit_splitpvoigt', 'fit_splitpvoigt', (['x', 'y'], {'plot': '(True)', 'show_res': '(True)'}), '(x, y, plot=True, show_res=True)\n', (955, 987), False, 'from sloth.fit.peakfit import fit_splitpvoigt, fit_results\n'), ((1400, 1447), 'sloth.fit.peakfit.fit_splitpvoigt', 'fit_splitpvoigt', (['x', 'y'], {'plot': '(True)', 'show_res': '(True)'}), '(x, y, plot=True, show_res=True)\n', (1415, 1447), False, 'from sloth.fit.peakfit import fit_splitpvoigt, fit_results\n'), ((2040, 2112), 'sloth.fit.peakfit.fit_splitpvoigt', 'fit_splitpvoigt', (['x', 'y'], {'dy': '(True)', 'bkg': '"""Constant"""', 'plot': '(True)', 'show_res': '(True)'}), "(x, y, dy=True, bkg='Constant', plot=True, show_res=True)\n", (2055, 2112), False, 'from sloth.fit.peakfit import fit_splitpvoigt, fit_results\n'), ((1113, 1163), 'os.path.join', 'os.path.join', (['_curDir', '"""peakfit_tests_diffpat.dat"""'], {}), "(_curDir, 'peakfit_tests_diffpat.dat')\n", (1125, 1163), False, 'import os, sys\n'), ((1186, 1210), 'PyMca.specfile.Specfile', 'specfile.Specfile', (['fname'], {}), '(fname)\n', (1203, 1210), False, 'from PyMca import specfile\n'), ((1589, 1636), 'os.path.join', 'os.path.join', (['_curDir', '"""peakfit_tests_real.dat"""'], {}), "(_curDir, 'peakfit_tests_real.dat')\n", (1601, 1636), False, 'import os, sys\n'), ((1659, 1683), 'PyMca.specfile.Specfile', 'specfile.Specfile', (['fname'], {}), '(fname)\n', (1676, 1683), False, 'from PyMca import specfile\n'), ((868, 925), 'PyMca.SpecfitFuns.splitpvoigt', 'SpecfitFuns.splitpvoigt', (['[12.5, 30.75, 12.0, 5.0, 0.5]', 'x'], {}), '([12.5, 30.75, 12.0, 5.0, 0.5], x)\n', (891, 925), False, 'from PyMca import SpecfitFuns\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew, core, workspace
from functools import reduce
from hypothesis import given
from operator import mul
import caffe2.python.hypothesis_test_util as hu
import numpy as np
from caffe2.python.model_helper import ModelHelper
class TestLayerNormOp(hu.HypothesisTestCase):
@given(X=hu.tensors(n=1), **hu.gcs)
def test_layer_norm_grad_op(self, X, gc, dc):
X = X[0]
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
axis = np.random.randint(0, len(X.shape))
epsilon = 1e-4
op = core.CreateOperator(
"LayerNormGradient",
["gout", "out", "mean", "stdev", "in"],
["gin"],
axis=axis,
epsilon=epsilon,
)
def layer_norm_ref(X):
left = reduce(mul, X.shape[:axis], 1)
reshaped = np.reshape(X, [left, -1])
mean = np.mean(reshaped, axis=1).reshape([left, 1])
stdev = np.sqrt(
np.mean(np.power(reshaped, 2), axis=1).reshape([left, 1]) -
np.power(mean, 2) + epsilon
)
norm = (reshaped - mean) / (stdev)
norm = np.reshape(norm, X.shape)
mean = np.reshape(mean, X.shape[:axis] + (1,))
stdev = np.reshape(stdev, X.shape[:axis] + (1,))
return [norm, mean, stdev]
norm, mean, stdev = layer_norm_ref(X)
gout = norm
def layer_norm_grad_ref(gout_full, norm, mean_full, stdev_full, X_full):
left = reduce(mul, X_full.shape[:axis], 1)
right = reduce(mul, X_full.shape[axis:], 1)
X = np.reshape(X_full, [left, right])
stdev = np.reshape(stdev_full, [left, 1])
mean = np.reshape(mean_full, [left, 1])
gout = np.reshape(gout_full, [left, right])
dstdev_end = (-1.0) / np.power(stdev, 2.0) \
* np.sum((X - mean) * gout, axis=1).reshape([left, 1])
dmean_end = np.sum(-1.0 / stdev * gout, axis=1).reshape([left, 1])
dx_end = 1.0 / stdev * gout
# stdev block
dmean_stdev = -1.0 * mean / stdev * dstdev_end
dx_stdev = X / (right * stdev) * dstdev_end
# mean block
dmean = dmean_end + dmean_stdev
dxmean = (1.0 / right) * dmean
# final outputs
dx = dx_end + dx_stdev + dxmean
dx = dx.reshape(X_full.shape)
return [dx]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[gout, norm, mean, stdev, X],
reference=layer_norm_grad_ref
)
self.assertDeviceChecks(
device_options=dc,
op=op,
inputs=[gout, norm, mean, stdev, X],
outputs_to_check=[0],
)
@given(X=hu.tensors(n=1), **hu.gcs)
def test_layer_norm_op(self, X, gc, dc):
X = X[0]
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
axis = np.random.randint(0, len(X.shape))
epsilon = 1e-4
op = core.CreateOperator(
"LayerNorm",
["input"],
["output", "mean", "stdev"],
axis=axis,
epsilon=epsilon,
)
def layer_norm_ref(X):
left = reduce(mul, X.shape[:axis], 1)
reshaped = np.reshape(X, [left, -1])
mean = np.mean(reshaped, axis=1).reshape([left, 1])
stdev = np.sqrt(
np.mean(np.power(reshaped, 2), axis=1).reshape([left, 1]) -
np.power(mean, 2) + epsilon
)
norm = (reshaped - mean) / (stdev)
norm = np.reshape(norm, X.shape)
mean = np.reshape(mean, X.shape[:axis] + (1,))
stdev = np.reshape(stdev, X.shape[:axis] + (1,))
return [norm, mean, stdev]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=layer_norm_ref
)
self.assertDeviceChecks(
device_options=dc,
op=op,
inputs=[X],
outputs_to_check=[0, 1, 2],
)
@given(X=hu.tensors(n=1), **hu.gcs)
def test_layer_norm_brew_wrapper(self, X, gc, dc):
X = X[0]
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
axis = np.random.randint(0, len(X.shape))
epsilon = 1e-4
workspace.FeedBlob('X', X)
model = ModelHelper(name='test_layer_norm_brew_wrapper')
brew.layer_norm(
model,
'X',
'Y',
axis=axis,
epsilon=epsilon,
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
| [
"numpy.mean",
"caffe2.python.workspace.FeedBlob",
"numpy.reshape",
"caffe2.python.model_helper.ModelHelper",
"numpy.power",
"functools.reduce",
"caffe2.python.hypothesis_test_util.tensors",
"caffe2.python.workspace.RunNetOnce",
"numpy.sum",
"numpy.expand_dims",
"caffe2.python.core.CreateOperator... | [((712, 833), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""LayerNormGradient"""', "['gout', 'out', 'mean', 'stdev', 'in']", "['gin']"], {'axis': 'axis', 'epsilon': 'epsilon'}), "('LayerNormGradient', ['gout', 'out', 'mean', 'stdev',\n 'in'], ['gin'], axis=axis, epsilon=epsilon)\n", (731, 833), False, 'from caffe2.python import brew, core, workspace\n'), ((3252, 3356), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""LayerNorm"""', "['input']", "['output', 'mean', 'stdev']"], {'axis': 'axis', 'epsilon': 'epsilon'}), "('LayerNorm', ['input'], ['output', 'mean', 'stdev'],\n axis=axis, epsilon=epsilon)\n", (3271, 3356), False, 'from caffe2.python import brew, core, workspace\n'), ((4614, 4640), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['"""X"""', 'X'], {}), "('X', X)\n", (4632, 4640), False, 'from caffe2.python import brew, core, workspace\n'), ((4658, 4706), 'caffe2.python.model_helper.ModelHelper', 'ModelHelper', ([], {'name': '"""test_layer_norm_brew_wrapper"""'}), "(name='test_layer_norm_brew_wrapper')\n", (4669, 4706), False, 'from caffe2.python.model_helper import ModelHelper\n'), ((4715, 4775), 'caffe2.python.brew.layer_norm', 'brew.layer_norm', (['model', '"""X"""', '"""Y"""'], {'axis': 'axis', 'epsilon': 'epsilon'}), "(model, 'X', 'Y', axis=axis, epsilon=epsilon)\n", (4730, 4775), False, 'from caffe2.python import brew, core, workspace\n'), ((4856, 4898), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['model.param_init_net'], {}), '(model.param_init_net)\n', (4876, 4898), False, 'from caffe2.python import brew, core, workspace\n'), ((4907, 4938), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['model.net'], {}), '(model.net)\n', (4927, 4938), False, 'from caffe2.python import brew, core, workspace\n'), ((600, 625), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (614, 625), True, 'import numpy as np\n'), ((952, 982), 'functools.reduce', 'reduce', (['mul', 'X.shape[:axis]', '(1)'], {}), '(mul, X.shape[:axis], 1)\n', (958, 982), False, 'from functools import reduce\n'), ((1006, 1031), 'numpy.reshape', 'np.reshape', (['X', '[left, -1]'], {}), '(X, [left, -1])\n', (1016, 1031), True, 'import numpy as np\n'), ((1325, 1350), 'numpy.reshape', 'np.reshape', (['norm', 'X.shape'], {}), '(norm, X.shape)\n', (1335, 1350), True, 'import numpy as np\n'), ((1370, 1409), 'numpy.reshape', 'np.reshape', (['mean', '(X.shape[:axis] + (1,))'], {}), '(mean, X.shape[:axis] + (1,))\n', (1380, 1409), True, 'import numpy as np\n'), ((1430, 1470), 'numpy.reshape', 'np.reshape', (['stdev', '(X.shape[:axis] + (1,))'], {}), '(stdev, X.shape[:axis] + (1,))\n', (1440, 1470), True, 'import numpy as np\n'), ((1678, 1713), 'functools.reduce', 'reduce', (['mul', 'X_full.shape[:axis]', '(1)'], {}), '(mul, X_full.shape[:axis], 1)\n', (1684, 1713), False, 'from functools import reduce\n'), ((1734, 1769), 'functools.reduce', 'reduce', (['mul', 'X_full.shape[axis:]', '(1)'], {}), '(mul, X_full.shape[axis:], 1)\n', (1740, 1769), False, 'from functools import reduce\n'), ((1786, 1819), 'numpy.reshape', 'np.reshape', (['X_full', '[left, right]'], {}), '(X_full, [left, right])\n', (1796, 1819), True, 'import numpy as np\n'), ((1840, 1873), 'numpy.reshape', 'np.reshape', (['stdev_full', '[left, 1]'], {}), '(stdev_full, [left, 1])\n', (1850, 1873), True, 'import numpy as np\n'), ((1893, 1925), 'numpy.reshape', 'np.reshape', (['mean_full', '[left, 1]'], {}), '(mean_full, [left, 1])\n', (1903, 1925), True, 'import numpy as np\n'), ((1945, 1981), 'numpy.reshape', 'np.reshape', (['gout_full', '[left, right]'], {}), '(gout_full, [left, right])\n', (1955, 1981), True, 'import numpy as np\n'), ((460, 475), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(1)'}), '(n=1)\n', (470, 475), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((3140, 3165), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3154, 3165), True, 'import numpy as np\n'), ((3475, 3505), 'functools.reduce', 'reduce', (['mul', 'X.shape[:axis]', '(1)'], {}), '(mul, X.shape[:axis], 1)\n', (3481, 3505), False, 'from functools import reduce\n'), ((3529, 3554), 'numpy.reshape', 'np.reshape', (['X', '[left, -1]'], {}), '(X, [left, -1])\n', (3539, 3554), True, 'import numpy as np\n'), ((3848, 3873), 'numpy.reshape', 'np.reshape', (['norm', 'X.shape'], {}), '(norm, X.shape)\n', (3858, 3873), True, 'import numpy as np\n'), ((3893, 3932), 'numpy.reshape', 'np.reshape', (['mean', '(X.shape[:axis] + (1,))'], {}), '(mean, X.shape[:axis] + (1,))\n', (3903, 3932), True, 'import numpy as np\n'), ((3953, 3993), 'numpy.reshape', 'np.reshape', (['stdev', '(X.shape[:axis] + (1,))'], {}), '(stdev, X.shape[:axis] + (1,))\n', (3963, 3993), True, 'import numpy as np\n'), ((3005, 3020), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(1)'}), '(n=1)\n', (3015, 3020), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((4506, 4531), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4520, 4531), True, 'import numpy as np\n'), ((4361, 4376), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(1)'}), '(n=1)\n', (4371, 4376), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((1051, 1076), 'numpy.mean', 'np.mean', (['reshaped'], {'axis': '(1)'}), '(reshaped, axis=1)\n', (1058, 1076), True, 'import numpy as np\n'), ((2016, 2036), 'numpy.power', 'np.power', (['stdev', '(2.0)'], {}), '(stdev, 2.0)\n', (2024, 2036), True, 'import numpy as np\n'), ((2138, 2173), 'numpy.sum', 'np.sum', (['(-1.0 / stdev * gout)'], {'axis': '(1)'}), '(-1.0 / stdev * gout, axis=1)\n', (2144, 2173), True, 'import numpy as np\n'), ((3574, 3599), 'numpy.mean', 'np.mean', (['reshaped'], {'axis': '(1)'}), '(reshaped, axis=1)\n', (3581, 3599), True, 'import numpy as np\n'), ((1217, 1234), 'numpy.power', 'np.power', (['mean', '(2)'], {}), '(mean, 2)\n', (1225, 1234), True, 'import numpy as np\n'), ((2061, 2094), 'numpy.sum', 'np.sum', (['((X - mean) * gout)'], {'axis': '(1)'}), '((X - mean) * gout, axis=1)\n', (2067, 2094), True, 'import numpy as np\n'), ((3740, 3757), 'numpy.power', 'np.power', (['mean', '(2)'], {}), '(mean, 2)\n', (3748, 3757), True, 'import numpy as np\n'), ((1149, 1170), 'numpy.power', 'np.power', (['reshaped', '(2)'], {}), '(reshaped, 2)\n', (1157, 1170), True, 'import numpy as np\n'), ((3672, 3693), 'numpy.power', 'np.power', (['reshaped', '(2)'], {}), '(reshaped, 2)\n', (3680, 3693), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle import enable_static
@unittest.skipIf(not core.supports_bfloat16(),
"place does not support BF16 evaluation")
class TestElementwiseAddBf16MklDNNOp(OpTest):
def setUp(self):
self.op_type = "elementwise_add"
self.use_mkldnn = True
self.mkldnn_data_type = "bfloat16"
self.axis = -1
self.generate_data()
self.x_bf16 = convert_float_to_uint16(self.x)
self.y_bf16 = convert_float_to_uint16(self.y)
self.inputs = {'X': self.x_bf16, 'Y': self.y_bf16}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': convert_float_to_uint16(self.out)}
def generate_data(self):
self.x = np.random.random(100, ).astype(np.float32)
self.y = np.random.random(100, ).astype(np.float32)
self.out = np.add(self.x, self.y)
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
# elementwise_add grad is just passing upper gradients to either X or Y or both
def test_check_grad_normal(self):
self.check_grad_with_place(
core.CPUPlace(), ["X", "Y"],
"Out",
check_dygraph=False,
user_defined_grads=[self.x_bf16, self.x_bf16],
user_defined_grad_outputs=[self.x_bf16])
def test_check_grad_ingore_x(self):
self.check_grad_with_place(
core.CPUPlace(), ["Y"],
"Out",
check_dygraph=False,
user_defined_grads=[self.y_bf16],
user_defined_grad_outputs=[self.y_bf16])
def test_check_grad_ingore_y(self):
self.check_grad_with_place(
core.CPUPlace(), ["X"],
"Out",
check_dygraph=False,
user_defined_grads=[self.x_bf16],
user_defined_grad_outputs=[self.x_bf16])
if __name__ == '__main__':
enable_static()
unittest.main()
| [
"paddle.fluid.core.supports_bfloat16",
"numpy.add",
"paddle.fluid.tests.unittests.op_test.convert_float_to_uint16",
"numpy.random.random",
"paddle.enable_static",
"unittest.main",
"paddle.fluid.core.CPUPlace"
] | [((2685, 2700), 'paddle.enable_static', 'enable_static', ([], {}), '()\n', (2698, 2700), False, 'from paddle import enable_static\n'), ((2705, 2720), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2718, 2720), False, 'import unittest\n'), ((1196, 1227), 'paddle.fluid.tests.unittests.op_test.convert_float_to_uint16', 'convert_float_to_uint16', (['self.x'], {}), '(self.x)\n', (1219, 1227), False, 'from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16\n'), ((1250, 1281), 'paddle.fluid.tests.unittests.op_test.convert_float_to_uint16', 'convert_float_to_uint16', (['self.y'], {}), '(self.y)\n', (1273, 1281), False, 'from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16\n'), ((1649, 1671), 'numpy.add', 'np.add', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (1655, 1671), True, 'import numpy as np\n'), ((854, 878), 'paddle.fluid.core.supports_bfloat16', 'core.supports_bfloat16', ([], {}), '()\n', (876, 878), True, 'import paddle.fluid.core as core\n'), ((1445, 1478), 'paddle.fluid.tests.unittests.op_test.convert_float_to_uint16', 'convert_float_to_uint16', (['self.out'], {}), '(self.out)\n', (1468, 1478), False, 'from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16\n'), ((1743, 1758), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (1756, 1758), True, 'import paddle.fluid.core as core\n'), ((1931, 1946), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (1944, 1946), True, 'import paddle.fluid.core as core\n'), ((2213, 2228), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (2226, 2228), True, 'import paddle.fluid.core as core\n'), ((2477, 2492), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (2490, 2492), True, 'import paddle.fluid.core as core\n'), ((1527, 1548), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (1543, 1548), True, 'import numpy as np\n'), ((1587, 1608), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (1603, 1608), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.