code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
# from tsnecuda import TSNE
# from sklearn.manifold import TSNE
from data.IncrementalTSNE import IncrementalTSNE
import fastlapjv
from matplotlib import pyplot as plt
from scipy.spatial.distance import cdist
from fastlapjv import fastlapjv
import math
from time import time
class GridLayout(object):
def __init__(self):
super().__init__()
self.tsner = IncrementalTSNE(n_components=2, init='pca', method='barnes_hut', perplexity=30, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
def fit(self, X: np.ndarray, labels: np.ndarray = None, constraintX: np.ndarray = None, constraintY: np.ndarray = None, constraintLabels: np.ndarray = None, init = None):
"""main fit function
Args:
X (np.ndarray): n * d, n is the number of samples, d is the dimension of a sample
labels (np.ndarray): label of each sample in X
"""
X_embedded = self.tsne(X, constraintX = constraintX, constraintY = constraintY, labels = labels, constraintLabels = constraintLabels, init = init)
grid_ass, grid_size = self.grid(X_embedded)
return X_embedded, grid_ass, grid_size
def tsne(self, X: np.ndarray, labels: np.ndarray = None, perplexity: int = 15, learning_rate: int = 3, constraintX: np.ndarray = None, constraintY: np.ndarray = None, constraintLabels: np.ndarray = None, init = None) -> np.ndarray:
# remove empty labels
labelcnt = 0
removeEmptyTransform = np.zeros((np.max(labels)+1), dtype=int)-1
for label in labels:
if removeEmptyTransform[label]==-1:
removeEmptyTransform[label]=labelcnt
labelcnt += 1
labels = removeEmptyTransform[labels]
constraintLabels = removeEmptyTransform[constraintLabels]
self.tsner = IncrementalTSNE(n_components=2, init='pca' if init is None else init, method='barnes_hut', perplexity=30, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
if constraintX is None:
X_embedded = self.tsner.fit_transform(X, constraint_X = constraintX, constraint_Y = constraintY, prev_n = 0 if constraintX is None else len(constraintX),
alpha = 0.5, labels=labels, label_alpha=0.9)
else:
self.tsner = IncrementalTSNE(n_components=2, init='pca' if init is None else init, method='barnes_hut', perplexity=5, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
X_embedded = self.tsner.fit_transform(X, constraint_X = constraintX, constraint_Y = constraintY, constraint_labels = constraintLabels, prev_n = 0 if constraintX is None else len(constraintX),
alpha = 0.3, labels = labels, label_alpha=0.2)
return X_embedded
def grid(self, X_embedded: np.ndarray):
X_embedded -= X_embedded.min(axis=0)
X_embedded /= X_embedded.max(axis=0)
num = X_embedded.shape[0]
square_len = math.ceil(np.sqrt(num))
N = square_len * square_len
grids = np.dstack(np.meshgrid(np.linspace(0, 1 - 1.0 / square_len, square_len),
np.linspace(0, 1 - 1.0 / square_len, square_len))) \
.reshape(-1, 2)
original_cost_matrix = cdist(grids, X_embedded, "euclidean")
# knn process
dummy_points = np.ones((N - original_cost_matrix.shape[1], 2)) * 0.5
# dummy at [0.5, 0.5]
dummy_vertices = (1 - cdist(grids, dummy_points, "euclidean")) * 100
cost_matrix = np.concatenate((original_cost_matrix, dummy_vertices), axis=1)
row_asses, col_asses, info = fastlapjv(cost_matrix, k_value=50)
col_asses = col_asses[:num]
return col_asses, square_len
if __name__ == "__main__":
X = np.random.rand(500, 128)
labels = np.random.randint(10, size=500)
grid = GridLayout()
grid.fit(X, labels) | [
"numpy.sqrt",
"numpy.random.rand",
"numpy.ones",
"scipy.spatial.distance.cdist",
"data.IncrementalTSNE.IncrementalTSNE",
"numpy.max",
"numpy.random.randint",
"numpy.linspace",
"numpy.concatenate",
"fastlapjv.fastlapjv"
] | [((3732, 3756), 'numpy.random.rand', 'np.random.rand', (['(500)', '(128)'], {}), '(500, 128)\n', (3746, 3756), True, 'import numpy as np\n'), ((3770, 3801), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(500)'}), '(10, size=500)\n', (3787, 3801), True, 'import numpy as np\n'), ((392, 528), 'data.IncrementalTSNE.IncrementalTSNE', 'IncrementalTSNE', ([], {'n_components': '(2)', 'init': '"""pca"""', 'method': '"""barnes_hut"""', 'perplexity': '(30)', 'angle': '(0.3)', 'n_jobs': '(8)', 'n_iter': '(1000)', 'random_state': '(100)'}), "(n_components=2, init='pca', method='barnes_hut', perplexity\n =30, angle=0.3, n_jobs=8, n_iter=1000, random_state=100)\n", (407, 528), False, 'from data.IncrementalTSNE import IncrementalTSNE\n'), ((1835, 2000), 'data.IncrementalTSNE.IncrementalTSNE', 'IncrementalTSNE', ([], {'n_components': '(2)', 'init': "('pca' if init is None else init)", 'method': '"""barnes_hut"""', 'perplexity': '(30)', 'angle': '(0.3)', 'n_jobs': '(8)', 'n_iter': '(1000)', 'random_state': '(100)'}), "(n_components=2, init='pca' if init is None else init,\n method='barnes_hut', perplexity=30, angle=0.3, n_jobs=8, n_iter=1000,\n random_state=100)\n", (1850, 2000), False, 'from data.IncrementalTSNE import IncrementalTSNE\n'), ((3214, 3251), 'scipy.spatial.distance.cdist', 'cdist', (['grids', 'X_embedded', '"""euclidean"""'], {}), "(grids, X_embedded, 'euclidean')\n", (3219, 3251), False, 'from scipy.spatial.distance import cdist\n'), ((3480, 3542), 'numpy.concatenate', 'np.concatenate', (['(original_cost_matrix, dummy_vertices)'], {'axis': '(1)'}), '((original_cost_matrix, dummy_vertices), axis=1)\n', (3494, 3542), True, 'import numpy as np\n'), ((3580, 3614), 'fastlapjv.fastlapjv', 'fastlapjv', (['cost_matrix'], {'k_value': '(50)'}), '(cost_matrix, k_value=50)\n', (3589, 3614), False, 'from fastlapjv import fastlapjv\n'), ((2290, 2454), 'data.IncrementalTSNE.IncrementalTSNE', 'IncrementalTSNE', ([], {'n_components': '(2)', 'init': "('pca' if init is None else init)", 'method': '"""barnes_hut"""', 'perplexity': '(5)', 'angle': '(0.3)', 'n_jobs': '(8)', 'n_iter': '(1000)', 'random_state': '(100)'}), "(n_components=2, init='pca' if init is None else init,\n method='barnes_hut', perplexity=5, angle=0.3, n_jobs=8, n_iter=1000,\n random_state=100)\n", (2305, 2454), False, 'from data.IncrementalTSNE import IncrementalTSNE\n'), ((2943, 2955), 'numpy.sqrt', 'np.sqrt', (['num'], {}), '(num)\n', (2950, 2955), True, 'import numpy as np\n'), ((3297, 3344), 'numpy.ones', 'np.ones', (['(N - original_cost_matrix.shape[1], 2)'], {}), '((N - original_cost_matrix.shape[1], 2))\n', (3304, 3344), True, 'import numpy as np\n'), ((3411, 3450), 'scipy.spatial.distance.cdist', 'cdist', (['grids', 'dummy_points', '"""euclidean"""'], {}), "(grids, dummy_points, 'euclidean')\n", (3416, 3450), False, 'from scipy.spatial.distance import cdist\n'), ((1510, 1524), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (1516, 1524), True, 'import numpy as np\n'), ((3031, 3079), 'numpy.linspace', 'np.linspace', (['(0)', '(1 - 1.0 / square_len)', 'square_len'], {}), '(0, 1 - 1.0 / square_len, square_len)\n', (3042, 3079), True, 'import numpy as np\n'), ((3097, 3145), 'numpy.linspace', 'np.linspace', (['(0)', '(1 - 1.0 / square_len)', 'square_len'], {}), '(0, 1 - 1.0 / square_len, square_len)\n', (3108, 3145), True, 'import numpy as np\n')] |
import os
import numpy as np
from skmultiflow.data.generators.regression_generator import RegressionGenerator
def test_regression_generator(test_path):
stream = RegressionGenerator(n_samples=100, n_features=20, n_targets=4, n_informative=6, random_state=0)
stream.prepare_for_use()
assert stream.n_remaining_samples() == 100
expected_names = ['att_num_0', 'att_num_1', 'att_num_2', 'att_num_3', 'att_num_4',
'att_num_5', 'att_num_6', 'att_num_7', 'att_num_8', 'att_num_9',
'att_num_10', 'att_num_11', 'att_num_12', 'att_num_13', 'att_num_14',
'att_num_15', 'att_num_16', 'att_num_17', 'att_num_18', 'att_num_19']
assert stream.feature_names == expected_names
assert stream.target_values == [float] * stream.n_targets
expected_names = ['target_0', 'target_1', 'target_2', 'target_3']
assert stream.target_names == expected_names
assert stream.n_features == 20
assert stream.n_cat_features == 0
assert stream.n_num_features == 20
assert stream.n_targets == 4
assert stream.get_data_info() == 'Regression Generator - 4 targets, 20 features'
assert stream.has_more_samples() is True
assert stream.is_restartable() is True
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'regression_stream.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X, y = stream.next_sample()
assert np.allclose(X[0], X_expected[0])
assert np.allclose(y[0], y_expected[0])
X, y = stream.last_sample()
assert np.allclose(X[0], X_expected[0])
assert np.allclose(y[0], y_expected[0])
stream.restart()
X, y = stream.next_sample(10)
assert np.allclose(X, X_expected)
assert np.allclose(y, y_expected)
assert stream.n_targets == y.shape[1]
assert stream.n_features == X.shape[1]
| [
"os.path.join",
"numpy.load",
"skmultiflow.data.generators.regression_generator.RegressionGenerator",
"numpy.allclose"
] | [((167, 266), 'skmultiflow.data.generators.regression_generator.RegressionGenerator', 'RegressionGenerator', ([], {'n_samples': '(100)', 'n_features': '(20)', 'n_targets': '(4)', 'n_informative': '(6)', 'random_state': '(0)'}), '(n_samples=100, n_features=20, n_targets=4,\n n_informative=6, random_state=0)\n', (186, 266), False, 'from skmultiflow.data.generators.regression_generator import RegressionGenerator\n'), ((1334, 1382), 'os.path.join', 'os.path.join', (['test_path', '"""regression_stream.npz"""'], {}), "(test_path, 'regression_stream.npz')\n", (1346, 1382), False, 'import os\n'), ((1394, 1412), 'numpy.load', 'np.load', (['test_file'], {}), '(test_file)\n', (1401, 1412), True, 'import numpy as np\n'), ((1511, 1543), 'numpy.allclose', 'np.allclose', (['X[0]', 'X_expected[0]'], {}), '(X[0], X_expected[0])\n', (1522, 1543), True, 'import numpy as np\n'), ((1555, 1587), 'numpy.allclose', 'np.allclose', (['y[0]', 'y_expected[0]'], {}), '(y[0], y_expected[0])\n', (1566, 1587), True, 'import numpy as np\n'), ((1632, 1664), 'numpy.allclose', 'np.allclose', (['X[0]', 'X_expected[0]'], {}), '(X[0], X_expected[0])\n', (1643, 1664), True, 'import numpy as np\n'), ((1676, 1708), 'numpy.allclose', 'np.allclose', (['y[0]', 'y_expected[0]'], {}), '(y[0], y_expected[0])\n', (1687, 1708), True, 'import numpy as np\n'), ((1776, 1802), 'numpy.allclose', 'np.allclose', (['X', 'X_expected'], {}), '(X, X_expected)\n', (1787, 1802), True, 'import numpy as np\n'), ((1814, 1840), 'numpy.allclose', 'np.allclose', (['y', 'y_expected'], {}), '(y, y_expected)\n', (1825, 1840), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
def combine_flat_list(_structure, _flat_list, axis=1):
_combined = []
for i in range(len(_flat_list[0])):
t = []
for v in _flat_list:
t.append(v[i])
if len(t[0].get_shape()) == 0:
cc = tf.stack(t, axis)
else:
cc = tf.concat(t, axis)
_combined.append(cc)
return nest.pack_sequence_as(_structure, _combined)
def to_bool(_t):
return tf.cast(_t, tf.bool)
def switch_time_and_batch_dimension(_tensor):
rank = len(_tensor.get_shape())
perm = np.arange(rank)
perm[0], perm[1] = 1, 0
if _tensor.dtype == tf.bool:
_tensor = tf.cast(_tensor, tf.int64)
res = tf.transpose(_tensor, perm, name='switch_time_and_batch_dimension')
if _tensor.dtype == tf.bool:
return tf.cast(res, tf.bool)
return res
def exp_convolve(tensor, decay, initializer=None):
with tf.name_scope('ExpConvolve'):
assert tensor.dtype in [tf.float16, tf.float32, tf.float64]
if initializer is None:
initializer = tf.zeros_like(tensor)
filtered_tensor = tf.scan(lambda a, x: a * decay + (1-decay) * x, tensor, initializer=initializer)
return filtered_tensor
| [
"tensorflow.transpose",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.stack",
"tensorflow.concat",
"tensorflow.name_scope",
"tensorflow.zeros_like",
"tensorflow.scan",
"tensorflow.cast",
"numpy.arange"
] | [((434, 478), 'tensorflow.python.util.nest.pack_sequence_as', 'nest.pack_sequence_as', (['_structure', '_combined'], {}), '(_structure, _combined)\n', (455, 478), False, 'from tensorflow.python.util import nest\n'), ((509, 529), 'tensorflow.cast', 'tf.cast', (['_t', 'tf.bool'], {}), '(_t, tf.bool)\n', (516, 529), True, 'import tensorflow as tf\n'), ((625, 640), 'numpy.arange', 'np.arange', (['rank'], {}), '(rank)\n', (634, 640), True, 'import numpy as np\n'), ((757, 824), 'tensorflow.transpose', 'tf.transpose', (['_tensor', 'perm'], {'name': '"""switch_time_and_batch_dimension"""'}), "(_tensor, perm, name='switch_time_and_batch_dimension')\n", (769, 824), True, 'import tensorflow as tf\n'), ((720, 746), 'tensorflow.cast', 'tf.cast', (['_tensor', 'tf.int64'], {}), '(_tensor, tf.int64)\n', (727, 746), True, 'import tensorflow as tf\n'), ((873, 894), 'tensorflow.cast', 'tf.cast', (['res', 'tf.bool'], {}), '(res, tf.bool)\n', (880, 894), True, 'import tensorflow as tf\n'), ((972, 1000), 'tensorflow.name_scope', 'tf.name_scope', (['"""ExpConvolve"""'], {}), "('ExpConvolve')\n", (985, 1000), True, 'import tensorflow as tf\n'), ((1178, 1265), 'tensorflow.scan', 'tf.scan', (['(lambda a, x: a * decay + (1 - decay) * x)', 'tensor'], {'initializer': 'initializer'}), '(lambda a, x: a * decay + (1 - decay) * x, tensor, initializer=\n initializer)\n', (1185, 1265), True, 'import tensorflow as tf\n'), ((326, 343), 'tensorflow.stack', 'tf.stack', (['t', 'axis'], {}), '(t, axis)\n', (334, 343), True, 'import tensorflow as tf\n'), ((375, 393), 'tensorflow.concat', 'tf.concat', (['t', 'axis'], {}), '(t, axis)\n', (384, 393), True, 'import tensorflow as tf\n'), ((1129, 1150), 'tensorflow.zeros_like', 'tf.zeros_like', (['tensor'], {}), '(tensor)\n', (1142, 1150), True, 'import tensorflow as tf\n')] |
import math
from typing import Union
import numpy as np
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
class Rectangle:
def __init__(self):
self._name = None
self._local_vertex_1 = None
self._local_vertex_2 = None
self._temperature = None
self._emissivity = 1.0
self._type = None
self._is_reverse = False
self._local2global_xyz = None
self._local2global_rotation_angle = None
self._local2global_rotation_axis = None
self._global_vertex_1 = None
self._global_vertex_2 = None
@property
def name(self):
return self._name
@name.setter
def name(self, name: str):
self._name = name
@property
def local_vertex_1(self) -> np.ndarray:
return self._local_vertex_1
@local_vertex_1.setter
def local_vertex_1(self, vertex: Union[tuple, list, np.ndarray]):
self._local_vertex_1 = vertex
@property
def local_vertex_2(self) -> np.ndarray:
return self._local_vertex_2
@local_vertex_2.setter
def local_vertex_2(self, vertex: Union[tuple, list, np.ndarray]):
self._local_vertex_2 = vertex
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, temperature: float):
self._temperature = temperature
@property
def local2global_rotation_angle(self):
return self._local2global_rotation_angle
@local2global_rotation_angle.setter
def local2global_rotation_angle(self, angle):
self._local2global_rotation_angle = angle
@property
def local2global_rotation_axis(self):
return self._local2global_rotation_axis
@local2global_rotation_axis.setter
def local2global_rotation_axis(self, axis):
self._local2global_rotation_axis = axis
@property
def local2global_xyz(self):
return self._local2global_xyz
@local2global_xyz.setter
def local2global_xyz(self, xyz: Union[list, tuple, np.ndarray]):
self._local2global_xyz = xyz
@property
def global_vertex_1(self):
return self._global_vertex_1
@global_vertex_1.setter
def global_vertex_1(self, vertex):
self._global_vertex_1 = vertex
@property
def global_vertex_2(self):
return self._global_vertex_2
@global_vertex_2.setter
def global_vertex_2(self, vertex):
self._global_vertex_2 = vertex
@property
def type(self):
return self._type
@type.setter
def type(self, x: str):
self._type = x
@property
def is_reverse(self):
return self._is_reverse
@is_reverse.setter
def is_reverse(self, x: bool):
self._is_reverse = x
def local2global_vertices(
self,
v1=None,
v2=None,
xyz: Union[list, tuple, np.ndarray] = None,
axis: Union[list, tuple, np.ndarray] = None,
angle: float = None
):
# assign parameters
if v1:
self.local_vertex_1 = v1
if v2:
self.local_vertex_2 = v2
if xyz:
self.local2global_xyz = xyz
if axis:
self.local2global_rotation_axis = axis
if angle:
self.local2global_rotation_angle = angle
if not (self.local_vertex_1 or self.local_vertex_2):
raise ValueError('Missing local vertex information.')
# local2global rotation
rot_mat = rotation_matrix(self.local2global_rotation_axis, self.local2global_rotation_angle)
vertex_1 = np.dot(rot_mat, self.local_vertex_1)
vertex_2 = np.dot(rot_mat, self.local_vertex_2)
# local2global shift
vertex_1 += self.local2global_xyz
vertex_2 += self.local2global_xyz
# assign global vertices to object
self.global_vertex_1 = vertex_1
self.global_vertex_2 = vertex_2
return vertex_1, vertex_2
@staticmethod
def get_global_vertices(v1, v2):
def max_a_min_b(a, b):
# if a < b:
# a += b
# b = a - b
# a -= b
return a, b
xmax, xmin = max_a_min_b(v1[0], v2[0])
ymax, ymin = max_a_min_b(v1[1], v2[1])
zmax, zmin = max_a_min_b(v1[2], v2[2])
vv1 = [xmin, ymin, zmin]
vv2 = [xmax, ymax, zmin]
vv3 = [xmax, ymax, zmax]
vv4 = [xmin, ymin, zmax]
return vv1, vv2, vv3, vv4
def get_tra_command(self):
type = self.type
geometry = self.get_global_vertices(self.global_vertex_1, self.global_vertex_2)
geometry = [':'.join(['{:.3f}'.format(c) for c in v]) for v in geometry]
geometry = '*'.join(geometry)
name = self.name
temperature = self.temperature
reverse = self.is_reverse
emissivity = self._emissivity
return f'<Type={type}, Geometry={geometry}, Name={name}, Temperature={temperature}, Reverse={reverse}, Emissivity={emissivity}>'
def array_windows(
x: list,
z: list,
h: list,
w: list,
temperature: list,
angle: float,
local2global_xyz: np.ndarray = [0, 0, 0]
) -> list:
p_ = list()
for i, cx in enumerate(x):
z_ = z[i]
h_ = h[i]
w_ = w[i]
p = Rectangle()
p.name = f'w3_1_{i}'
p.local_vertex_1 = [cx - w_ / 2, 0, z_ - h_ / 2]
p.local_vertex_2 = [cx + w_ / 2, 0, z_ + h_ / 2]
p.local2global_rotation_axis = [0, 0, 1]
p.local2global_rotation_angle = angle
p.local2global_xyz = local2global_xyz
p.local2global_vertices()
p.type = 'Emitter'
p.is_reverse = True
p.temperature = temperature[i]
p_.append(p)
return p_
def w3_emitter():
"""
Type=Emitter
Geometry=5:0:0 * 10.2:0:0 * 10.2:-1.47:5.39 * 5:-1.47:5.39
Name=Glazing
Temperature=1105
Reverse=FALSE
Emissivity=1
<Type=Emitter,Geometry=5:0:0 * 10.2:0:0 * 10.2:-1.47:5.39 * 5:-1.47:5.39,Name=Glazing,Temperature=1105,Reverse=FALSE,Emissivity=1>
"""
angle = (180 + 90 + 9.5) / 180 * np.pi
# w3 - level 0
# x = [1.5, 1 * 6 + 1.5, 2 * 6 + 1.5, 4 * 6, 6 * 6 - 1.5, 7 * 6 - 1.5]
# x = [1.5, 1 * 6 + 1.5, 2 * 6 + 1.5, 4 * 6, 6 * 6 - 1.5]
# z = [1.75, 1.75, 1.75, 1.75, 1.75, 1.75]
# w = [3, 3, 3, 6, 3, 3]
# h = [3.5, 3.5, 3.5, 3.5, 3.5, 3.5]
# t = np.full_like(x, 1105)
# p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
# [print(p.get_tra_command()) for p in p_]
# w3 - level 1 timber facade
x = [0.75, 3.75, 6.75, 9.75, 12.75, 15.75, 32.25, 35.25, 38.25, 41.25, 44.25]
z = np.full_like(x, 4.25+3.55/2)
w = np.full_like(x, 1.5)
h = np.full_like(x, 3.55)
t = np.full_like(x, 931)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 1 window
x = [2.25, 5.25, 8.25, 11.25, 14.25, 17.25, 30.75, 33.75, 36.75, 39.75, 42.75, 46.5]
x = [2.25, 5.25, 8.25, 11.25, 14.25, 17.25, 30.75, 33.75, 36.75, 39.75, 42.75,] # fire rate 1 windows
z = np.full_like(x, 4.25+3.55/2)
w = np.full_like(x, 1.5)
w[-1] = 3
h = np.full_like(x, 3.55)
t = np.full_like(x, 1105)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 1 soffit timber
x = [9, 5*6+5*3/2]
z = np.full_like(x, 4.25+3.55+1.45/2)
w = [3*6, 5*3]
h = np.full_like(x, 1.45)
t = np.full_like(x, 931)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 2 timber facade
x = [0.75, 3.75, 6.75, 9.75, 12.75, 15.75, 32.25, 35.25, 38.25, 41.25, 44.25, 47.25]
z = np.full_like(x, 8.5+3.55/2)
w = np.full_like(x, 1.5)
h = np.full_like(x, 3.55)
t = np.full_like(x, 931)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 2 window
x = [2.25, 5.25, 8.25, 11.25, 14.25, 17.25, 24, 30.75, 33.75, 36.75, 39.75, 42.75, 45.75]
x = [2.25, 5.25, 8.25, 11.25, 14.25, 17.25, 24, 30.75, 33.75, 36.75, 39.75, 42.75,] # fire rate the end three
z = np.full_like(x, 8.5+3.55/2)
w = np.full_like(x, 1.5)
w[6] = 12 # to add the central windows
h = np.full_like(x, 3.55)
t = np.full_like(x, 1105)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 2 soffit timber
x = [9, 5*6+3*6/2]
z = np.full_like(x, 8.5+3.55+1.45/2)
w = [3*6, 3*6]
h = np.full_like(x, 1.45)
t = np.full_like(x, 931)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w2 - recessed windows
# x = [24]
# z = np.full_like(x, 4.25+3.55/2)
# w = np.full_like(x, 6)
# h = np.full_like(x, 3.55)
# t = np.full_like(x, 1105)
# local2global_xyz = np.array([0, -45, 0])
# p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
# [print(p.get_tra_command()) for p in p_]
# w3 - far end bit
# angle = (180 + 90 + 75) / 180 * np.pi
# x = [5.75/2, ]
# z = [3.5/2, ]
# w = [5.75, ]
# h = [3.5, ]
# t = np.full_like(x, 1105)
# local2global_xyz = np.array([7.8, -45, 0])
# p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle, local2global_xyz=local2global_xyz)
# [print(p.get_tra_command()) for p in p_]
def w3_receiver():
angle = (180 + 90 + 9.5) / 180 * np.pi
# w3 - receiver
cx__ = [13.5 / 2]
cz__ = [15 / 2]
width__ = np.full_like(cx__, 55)
height__ = np.full_like(cx__, 13.5)
temperature__ = np.full_like(cx__, 293.15)
p_w3_lm = list()
for i, cx in enumerate(cx__):
cz = cz__[i]
h = height__[i]
w = width__[i]
p = Rectangle()
p.name = f'w3_m_{i}'
p.local_vertex_1 = [cx - w / 2, 0, cz - h / 2]
p.local_vertex_2 = [cx + w / 2, 0, cz + h / 2]
p.local2global_rotation_axis = [0, 0, 1]
p.local2global_rotation_angle = angle
p.local2global_xyz = [0, 0, 0]
p.local2global_vertices()
p.type = 'Receiver'
p.is_reverse = True
p.temperature = temperature__[i]
p_w3_lm.append(p)
for p in p_w3_lm:
# print(p.name , p.global_vertex_1, p.global_vertex_2)
print(p.get_tra_command())
def w2_receiver():
angle = (90 + 36.5) / 180 * np.pi
# angle = 0. * np.pi
# w2 - receiver
cx__ = [54 / 2]
cz__ = [13.5 / 2]
width__ = np.full_like(cx__, 54)
height__ = np.full_like(cx__, 13.5)
temperature__ = np.full_like(cx__, 293.15)
p_w2_all = list()
for i, cx in enumerate(cx__):
cz = cz__[i]
h = height__[i]
w = width__[i]
p = Rectangle()
p.name = f'w2_2_{i}'
p.local_vertex_1 = [cx - w / 2, 0, cz - h / 2]
p.local_vertex_2 = [cx + w / 2, 0, cz + h / 2]
p.local2global_rotation_axis = [0, 0, 1]
p.local2global_rotation_angle = angle
p.local2global_xyz = np.asarray([-0.6, -50.8, 0])
# p.local2global_xyz = [0, 0, 0]
p.local2global_vertices()
p.type = 'Receiver'
p.is_reverse = True
p.temperature = temperature__[i]
p_w2_all.append(p)
for p in p_w2_all:
# print(p.name, p.global_vertex_1, p.global_vertex_2)
print(p.get_tra_command())
def w2_emitter():
angle = (90 + 36.5) / 180 * np.pi
# angle = 0. * np.pi
# w2 - receiver
cx__ = [13 / 2]
cz__ = [5 / 2]
width__ = np.full_like(cx__, 13)
height__ = np.full_like(cx__, 5)
temperature__ = np.full_like(cx__, 1313)
p_w2_all = list()
for i, cx in enumerate(cx__):
cz = cz__[i]
h = height__[i]
w = width__[i]
p = Rectangle()
p.name = f'w2_2_{i}'
p.local_vertex_1 = [cx - w / 2, 0, cz - h / 2]
p.local_vertex_2 = [cx + w / 2, 0, cz + h / 2]
p.local2global_rotation_axis = [0, 0, 1]
p.local2global_rotation_angle = angle
p.local2global_xyz = np.asarray([-0.6, -50.8, 0])
# p.local2global_xyz = [0, 0, 0]
p.local2global_vertices()
p.type = 'Emitter'
p.is_reverse = True
p.temperature = temperature__[i]
p_w2_all.append(p)
for p in p_w2_all:
# print(p.name, p.global_vertex_1, p.global_vertex_2)
print(p.get_tra_command())
if __name__ == '__main__':
w3_emitter()
w2_receiver()
# w3_receiver()
# w2_emitter()
| [
"numpy.full_like",
"numpy.asarray",
"math.cos",
"numpy.array",
"numpy.dot",
"math.sin"
] | [((236, 252), 'numpy.asarray', 'np.asarray', (['axis'], {}), '(axis)\n', (246, 252), True, 'import numpy as np\n'), ((309, 330), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (317, 330), False, 'import math\n'), ((504, 675), 'numpy.array', 'np.array', (['[[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc -\n bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]'], {}), '([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad),\n aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa +\n dd - bb - cc]])\n', (512, 675), True, 'import numpy as np\n'), ((7334, 7366), 'numpy.full_like', 'np.full_like', (['x', '(4.25 + 3.55 / 2)'], {}), '(x, 4.25 + 3.55 / 2)\n', (7346, 7366), True, 'import numpy as np\n'), ((7371, 7391), 'numpy.full_like', 'np.full_like', (['x', '(1.5)'], {}), '(x, 1.5)\n', (7383, 7391), True, 'import numpy as np\n'), ((7400, 7421), 'numpy.full_like', 'np.full_like', (['x', '(3.55)'], {}), '(x, 3.55)\n', (7412, 7421), True, 'import numpy as np\n'), ((7430, 7450), 'numpy.full_like', 'np.full_like', (['x', '(931)'], {}), '(x, 931)\n', (7442, 7450), True, 'import numpy as np\n'), ((7798, 7830), 'numpy.full_like', 'np.full_like', (['x', '(4.25 + 3.55 / 2)'], {}), '(x, 4.25 + 3.55 / 2)\n', (7810, 7830), True, 'import numpy as np\n'), ((7835, 7855), 'numpy.full_like', 'np.full_like', (['x', '(1.5)'], {}), '(x, 1.5)\n', (7847, 7855), True, 'import numpy as np\n'), ((7878, 7899), 'numpy.full_like', 'np.full_like', (['x', '(3.55)'], {}), '(x, 3.55)\n', (7890, 7899), True, 'import numpy as np\n'), ((7908, 7929), 'numpy.full_like', 'np.full_like', (['x', '(1105)'], {}), '(x, 1105)\n', (7920, 7929), True, 'import numpy as np\n'), ((8111, 8150), 'numpy.full_like', 'np.full_like', (['x', '(4.25 + 3.55 + 1.45 / 2)'], {}), '(x, 4.25 + 3.55 + 1.45 / 2)\n', (8123, 8150), True, 'import numpy as np\n'), ((8172, 8193), 'numpy.full_like', 'np.full_like', (['x', '(1.45)'], {}), '(x, 1.45)\n', (8184, 8193), True, 'import numpy as np\n'), ((8202, 8222), 'numpy.full_like', 'np.full_like', (['x', '(931)'], {}), '(x, 931)\n', (8214, 8222), True, 'import numpy as np\n'), ((8470, 8501), 'numpy.full_like', 'np.full_like', (['x', '(8.5 + 3.55 / 2)'], {}), '(x, 8.5 + 3.55 / 2)\n', (8482, 8501), True, 'import numpy as np\n'), ((8506, 8526), 'numpy.full_like', 'np.full_like', (['x', '(1.5)'], {}), '(x, 1.5)\n', (8518, 8526), True, 'import numpy as np\n'), ((8535, 8556), 'numpy.full_like', 'np.full_like', (['x', '(3.55)'], {}), '(x, 3.55)\n', (8547, 8556), True, 'import numpy as np\n'), ((8565, 8585), 'numpy.full_like', 'np.full_like', (['x', '(931)'], {}), '(x, 931)\n', (8577, 8585), True, 'import numpy as np\n'), ((8946, 8977), 'numpy.full_like', 'np.full_like', (['x', '(8.5 + 3.55 / 2)'], {}), '(x, 8.5 + 3.55 / 2)\n', (8958, 8977), True, 'import numpy as np\n'), ((8982, 9002), 'numpy.full_like', 'np.full_like', (['x', '(1.5)'], {}), '(x, 1.5)\n', (8994, 9002), True, 'import numpy as np\n'), ((9055, 9076), 'numpy.full_like', 'np.full_like', (['x', '(3.55)'], {}), '(x, 3.55)\n', (9067, 9076), True, 'import numpy as np\n'), ((9085, 9106), 'numpy.full_like', 'np.full_like', (['x', '(1105)'], {}), '(x, 1105)\n', (9097, 9106), True, 'import numpy as np\n'), ((9288, 9326), 'numpy.full_like', 'np.full_like', (['x', '(8.5 + 3.55 + 1.45 / 2)'], {}), '(x, 8.5 + 3.55 + 1.45 / 2)\n', (9300, 9326), True, 'import numpy as np\n'), ((9348, 9369), 'numpy.full_like', 'np.full_like', (['x', '(1.45)'], {}), '(x, 1.45)\n', (9360, 9369), True, 'import numpy as np\n'), ((9378, 9398), 'numpy.full_like', 'np.full_like', (['x', '(931)'], {}), '(x, 931)\n', (9390, 9398), True, 'import numpy as np\n'), ((10399, 10421), 'numpy.full_like', 'np.full_like', (['cx__', '(55)'], {}), '(cx__, 55)\n', (10411, 10421), True, 'import numpy as np\n'), ((10437, 10461), 'numpy.full_like', 'np.full_like', (['cx__', '(13.5)'], {}), '(cx__, 13.5)\n', (10449, 10461), True, 'import numpy as np\n'), ((10482, 10508), 'numpy.full_like', 'np.full_like', (['cx__', '(293.15)'], {}), '(cx__, 293.15)\n', (10494, 10508), True, 'import numpy as np\n'), ((11368, 11390), 'numpy.full_like', 'np.full_like', (['cx__', '(54)'], {}), '(cx__, 54)\n', (11380, 11390), True, 'import numpy as np\n'), ((11406, 11430), 'numpy.full_like', 'np.full_like', (['cx__', '(13.5)'], {}), '(cx__, 13.5)\n', (11418, 11430), True, 'import numpy as np\n'), ((11451, 11477), 'numpy.full_like', 'np.full_like', (['cx__', '(293.15)'], {}), '(cx__, 293.15)\n', (11463, 11477), True, 'import numpy as np\n'), ((12395, 12417), 'numpy.full_like', 'np.full_like', (['cx__', '(13)'], {}), '(cx__, 13)\n', (12407, 12417), True, 'import numpy as np\n'), ((12433, 12454), 'numpy.full_like', 'np.full_like', (['cx__', '(5)'], {}), '(cx__, 5)\n', (12445, 12454), True, 'import numpy as np\n'), ((12475, 12499), 'numpy.full_like', 'np.full_like', (['cx__', '(1313)'], {}), '(cx__, 1313)\n', (12487, 12499), True, 'import numpy as np\n'), ((353, 374), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (361, 374), False, 'import math\n'), ((4212, 4248), 'numpy.dot', 'np.dot', (['rot_mat', 'self.local_vertex_1'], {}), '(rot_mat, self.local_vertex_1)\n', (4218, 4248), True, 'import numpy as np\n'), ((4268, 4304), 'numpy.dot', 'np.dot', (['rot_mat', 'self.local_vertex_2'], {}), '(rot_mat, self.local_vertex_2)\n', (4274, 4304), True, 'import numpy as np\n'), ((11889, 11917), 'numpy.asarray', 'np.asarray', (['[-0.6, -50.8, 0]'], {}), '([-0.6, -50.8, 0])\n', (11899, 11917), True, 'import numpy as np\n'), ((12911, 12939), 'numpy.asarray', 'np.asarray', (['[-0.6, -50.8, 0]'], {}), '([-0.6, -50.8, 0])\n', (12921, 12939), True, 'import numpy as np\n'), ((281, 299), 'numpy.dot', 'np.dot', (['axis', 'axis'], {}), '(axis, axis)\n', (287, 299), True, 'import numpy as np\n')] |
# Hamiltonian Monte Carlo by calling R function 'rtmg'
import numpy as np
import scipy.linalg
from rpy2 import robjects
from rpy2.robjects.packages import importr
if not robjects.packages.isinstalled('tmg'):
utils = importr('utils')
utils.chooseCRANmirror(ind=1)
utils.install_packages('tmg')
def np2r(x):
nr, nc = x.shape
xvec = robjects.FloatVector(x.transpose().reshape(x.size))
xr = robjects.r.matrix(xvec, nrow=nr, ncol=nc)
return xr
def py_rtmg(n, mu, Sigma, initial, f=None, g=None, burn_in=30):
"""
This function generates samples from a Markov chain whose equilibrium distribution is a d-dimensional
multivariate Gaussian truncated by linear inequalities. The probability log density is
log p(X) = - 0.5 X^T M X + r^T X + const
in terms of a precision matrix M and a vector r. The constraints are imposed as explained below.
The Markov chain is built using the Hamiltonian Monte Carlo technique.
The input M and mu are covariance matrix and mean, so we transform them into precision matrix and
linear coefficient first.
M = Sigma^-1
r = M*mu
:param n: Number of samples.
:param mu: (m,) vector for the mean of multivariate Gaussian density
:param Sigma: (m,m) covariance matrix of the multivariate Gaussian density
:param initial: (m,) vector with the initial value of the Markov chain. Must satisfy
the truncation inequalities strictly.
:param f: (q,m) matrix, where q is the number of linear constraints. The constraints require each component
of the m-dimensional vector fX+g to be non-negative
:param g: (q,) vector with the constant terms in the above linear constraints.
:param burn_in: The number of burn-in iterations. The Markov chain is sampled n + burn_in
times, and the last n samples are returned.
:return: (n, m)
"""
tmg = importr('tmg')
M = scipy.linalg.inv(Sigma)
r = M@mu
n = robjects.IntVector([n])
M = np2r(M)
r = robjects.FloatVector(r)
initial = robjects.FloatVector(initial)
burn_in = robjects.IntVector([burn_in])
if f is None and g is None:
res = np.array(tmg.rtmg(n, M, r, initial, burn_in=burn_in))
else:
# g man contains infinity, extract valid constraints
valid = np.logical_and(g < np.inf, g > -np.inf)
g = g[valid]
f = f[valid]
if not np.all(f@initial+g >= 0):
raise ValueError("initial value does not satisfy the constraints.")
f = np2r(f)
g = robjects.FloatVector(g)
res = np.array(tmg.rtmg(n, M, r, initial, f, g, burn_in=burn_in))
return res
| [
"numpy.logical_and",
"rpy2.robjects.IntVector",
"rpy2.robjects.packages.isinstalled",
"rpy2.robjects.packages.importr",
"rpy2.robjects.r.matrix",
"rpy2.robjects.FloatVector",
"numpy.all"
] | [((172, 208), 'rpy2.robjects.packages.isinstalled', 'robjects.packages.isinstalled', (['"""tmg"""'], {}), "('tmg')\n", (201, 208), False, 'from rpy2 import robjects\n'), ((222, 238), 'rpy2.robjects.packages.importr', 'importr', (['"""utils"""'], {}), "('utils')\n", (229, 238), False, 'from rpy2.robjects.packages import importr\n'), ((415, 456), 'rpy2.robjects.r.matrix', 'robjects.r.matrix', (['xvec'], {'nrow': 'nr', 'ncol': 'nc'}), '(xvec, nrow=nr, ncol=nc)\n', (432, 456), False, 'from rpy2 import robjects\n'), ((1947, 1961), 'rpy2.robjects.packages.importr', 'importr', (['"""tmg"""'], {}), "('tmg')\n", (1954, 1961), False, 'from rpy2.robjects.packages import importr\n'), ((2017, 2040), 'rpy2.robjects.IntVector', 'robjects.IntVector', (['[n]'], {}), '([n])\n', (2035, 2040), False, 'from rpy2 import robjects\n'), ((2065, 2088), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['r'], {}), '(r)\n', (2085, 2088), False, 'from rpy2 import robjects\n'), ((2104, 2133), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['initial'], {}), '(initial)\n', (2124, 2133), False, 'from rpy2 import robjects\n'), ((2148, 2177), 'rpy2.robjects.IntVector', 'robjects.IntVector', (['[burn_in]'], {}), '([burn_in])\n', (2166, 2177), False, 'from rpy2 import robjects\n'), ((2365, 2404), 'numpy.logical_and', 'np.logical_and', (['(g < np.inf)', '(g > -np.inf)'], {}), '(g < np.inf, g > -np.inf)\n', (2379, 2404), True, 'import numpy as np\n'), ((2600, 2623), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['g'], {}), '(g)\n', (2620, 2623), False, 'from rpy2 import robjects\n'), ((2462, 2490), 'numpy.all', 'np.all', (['(f @ initial + g >= 0)'], {}), '(f @ initial + g >= 0)\n', (2468, 2490), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.utils.data import Dataset
from mypath import Path
from tqdm import trange
import os
from torchvision import transforms
from dataloaders import custom_transforms as tr
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Comp5421Segmentation(Dataset):
NUM_CLASSES = 7
CAT_LIST = [0, 1, 2, 3, 4, 5, 6, 7]
def __init__(self,
args,
base_dir=Path.db_root_dir('comp5421'),
split='train'):
super().__init__()
ids_file = os.path.join(base_dir, '{}/{}.txt'.format(split, split))
self.ids = []
with open(ids_file, 'r') as f:
for i, l in enumerate(f):
self.ids.append(l.rsplit()[0])
self.img_dir = os.path.join(base_dir, '{}/images'.format(split))
self.label_dir = os.path.join(base_dir, '{}/labels'.format(split))
self.split = split
self.args = args
def __getitem__(self, index):
_img, _target, _img_id = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target, 'imgId': _img_id}
if self.split == "train":
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample)
def _make_img_gt_point_pair(self, index):
img_id = self.ids[index]
_img = Image.open(os.path.join(self.img_dir, "{}.png".format(img_id))).convert('RGB')
_target = Image.open(os.path.join(self.label_dir, "{}.png".format(img_id))).convert('L')
return _img, _target, img_id
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
transformed = composed_transforms(sample)
transformed['imgId'] = sample['imgId']
transformed['resolution'] = sample['image'].size
return transformed
def __len__(self):
return len(self.ids)
if __name__ == "__main__":
from dataloaders import custom_transforms as tr
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
comp5421_val = Comp5421Segmentation(args, split='val')
dataloader = DataLoader(comp5421_val, batch_size=4, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='comp5421')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True) | [
"matplotlib.pyplot.imshow",
"argparse.ArgumentParser",
"dataloaders.custom_transforms.RandomHorizontalFlip",
"dataloaders.custom_transforms.RandomScaleCrop",
"dataloaders.custom_transforms.FixScaleCrop",
"mypath.Path.db_root_dir",
"numpy.array",
"matplotlib.pyplot.figure",
"dataloaders.custom_transf... | [((2807, 2832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2830, 2832), False, 'import argparse\n'), ((2992, 3059), 'torch.utils.data.DataLoader', 'DataLoader', (['comp5421_val'], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(comp5421_val, batch_size=4, shuffle=True, num_workers=0)\n', (3002, 3059), False, 'from torch.utils.data import DataLoader\n'), ((3802, 3822), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (3810, 3822), True, 'import matplotlib.pyplot as plt\n'), ((454, 482), 'mypath.Path.db_root_dir', 'Path.db_root_dir', (['"""comp5421"""'], {}), "('comp5421')\n", (470, 482), False, 'from mypath import Path\n'), ((3314, 3352), 'dataloaders.utils.decode_segmap', 'decode_segmap', (['tmp'], {'dataset': '"""comp5421"""'}), "(tmp, dataset='comp5421')\n", (3327, 3352), False, 'from dataloaders.utils import decode_segmap\n'), ((3375, 3412), 'numpy.transpose', 'np.transpose', (['img[jj]'], {'axes': '[1, 2, 0]'}), '(img[jj], axes=[1, 2, 0])\n', (3387, 3412), True, 'import numpy as np\n'), ((3591, 3603), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3601, 3603), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3636), 'matplotlib.pyplot.title', 'plt.title', (['"""display"""'], {}), "('display')\n", (3625, 3636), True, 'import matplotlib.pyplot as plt\n'), ((3649, 3665), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (3660, 3665), True, 'import matplotlib.pyplot as plt\n'), ((3678, 3697), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_tmp'], {}), '(img_tmp)\n', (3688, 3697), True, 'import matplotlib.pyplot as plt\n'), ((3710, 3726), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (3721, 3726), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3757), 'matplotlib.pyplot.imshow', 'plt.imshow', (['segmap'], {}), '(segmap)\n', (3749, 3757), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1743), 'dataloaders.custom_transforms.RandomHorizontalFlip', 'tr.RandomHorizontalFlip', ([], {}), '()\n', (1741, 1743), True, 'from dataloaders import custom_transforms as tr\n'), ((1757, 1842), 'dataloaders.custom_transforms.RandomScaleCrop', 'tr.RandomScaleCrop', ([], {'base_size': 'self.args.base_size', 'crop_size': 'self.args.crop_size'}), '(base_size=self.args.base_size, crop_size=self.args.crop_size\n )\n', (1775, 1842), True, 'from dataloaders import custom_transforms as tr\n'), ((1851, 1874), 'dataloaders.custom_transforms.RandomGaussianBlur', 'tr.RandomGaussianBlur', ([], {}), '()\n', (1872, 1874), True, 'from dataloaders import custom_transforms as tr\n'), ((1888, 1955), 'dataloaders.custom_transforms.Normalize', 'tr.Normalize', ([], {'mean': '(0.485, 0.456, 0.406)', 'std': '(0.229, 0.224, 0.225)'}), '(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n', (1900, 1955), True, 'from dataloaders import custom_transforms as tr\n'), ((1969, 1982), 'dataloaders.custom_transforms.ToTensor', 'tr.ToTensor', ([], {}), '()\n', (1980, 1982), True, 'from dataloaders import custom_transforms as tr\n'), ((2130, 2176), 'dataloaders.custom_transforms.FixScaleCrop', 'tr.FixScaleCrop', ([], {'crop_size': 'self.args.crop_size'}), '(crop_size=self.args.crop_size)\n', (2145, 2176), True, 'from dataloaders import custom_transforms as tr\n'), ((2190, 2257), 'dataloaders.custom_transforms.Normalize', 'tr.Normalize', ([], {'mean': '(0.485, 0.456, 0.406)', 'std': '(0.229, 0.224, 0.225)'}), '(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n', (2202, 2257), True, 'from dataloaders import custom_transforms as tr\n'), ((2271, 2284), 'dataloaders.custom_transforms.ToTensor', 'tr.ToTensor', ([], {}), '()\n', (2282, 2284), True, 'from dataloaders import custom_transforms as tr\n'), ((3259, 3275), 'numpy.array', 'np.array', (['gt[jj]'], {}), '(gt[jj])\n', (3267, 3275), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__description__ = '''
Experiments with multiple D415, T265 cameras.
'''
import sys
import os
import io
import pyrealsense2 as rs
import cv2
import numpy as np
from pprint import pprint
try:
from PIL import ImageFont, ImageDraw, Image
except ModuleNotFoundError:
from willow import ImageFont, ImageDraw, Image
# fot nice text data interpretation
import tableprint
# for TTFontSource
import tempfile
import requests
# --- Realsence problem core -------------------------------------------------------------------------------------------
class RealsenseCamera:
'''
Abstraction of any RealsenseCamera
'''
__colorizer = rs.colorizer()
def __init__(
self,
serial_number :str,
name: str
):
self.__serial_number = serial_number
self.__name = name
self.__pipeline = None
self.__started = False
self.__start_pipeline()
def __del__(self):
if self.__started and not self.__pipeline is None:
self.__pipeline.stop()
def get_full_name(self):
return f'{self.__name} ({self.__serial_number})'
def __start_pipeline(self):
# Configure depth and color streams
self.__pipeline = rs.pipeline()
config = rs.config()
config.enable_device(self.__serial_number)
self.__pipeline.start(config)
self.__started = True
print(f'{self.get_full_name()} camera is ready.')
def get_frames(self) -> [rs.frame]:
'''
Return a frame do not care about type
'''
frameset = self.__pipeline.wait_for_frames()
if frameset:
return [f for f in frameset]
else:
return []
@classmethod
def get_title(cls, frame: rs.frame, whole: bool) -> str:
# <pyrealsense2.video_stream_profile: Fisheye(2) 848x800 @ 30fps Y8>
profile_str = str(frame.profile)
first_space_pos = profile_str.find(' ')
whole_title = profile_str[first_space_pos + 1: -1]
if whole:
return whole_title
return whole_title.split(' ')[0]
@classmethod
def get_images_from_video_frames(cls, frames: [rs.frame]) -> ([(np.ndarray, rs.frame)] , [rs.frame], int, int):
'''
From all the frames, it selects those that can be easily interpreted as pictures.
Converts them to images and finds the maximum width and maximum height from all of them.
'''
max_width = -1
max_height = -1
img_frame_tuples = []
unused_frames = []
for frame in frames:
if frame.is_video_frame():
if frame.is_depth_frame():
img = np.asanyarray(RealsenseCamera.__colorizer.process(frame).get_data())
else:
img = np.asanyarray(frame.get_data())
img = img[...,::-1].copy() # RGB<->BGR
max_height = max(max_height, img.shape[0])
max_width = max(max_width, img.shape[1])
img_frame_tuples.append((img,frame))
else:
unused_frames.append(frame)
return img_frame_tuples, unused_frames, max_width, max_height
@classmethod
def get_table_from_text_data_frame(cls, frame: rs.frame, round_ndigits: int = 2, int_len: int = 3) -> (list, rs.frame):
'''
Returns list of rows which ase ists of columns.
Result can be interpreted as table.
First row is a header.
@TODO add interpreatation of other than T265 and D415 camera. (I do not have it)
'''
title = RealsenseCamera.get_title(frame, whole=False)
if frame.is_motion_frame():
motion_data = frame.as_motion_frame().get_motion_data()
table = [
['name', 'x', 'y', 'z'],
[title, round(motion_data.x, 2), round(motion_data.y, 2), round(motion_data.z, 2)]
]
elif frame.is_pose_frame():
data = frame.as_pose_frame().get_pose_data()
table= [
[title, 'x', 'y', 'z', 'w'],
['acceleration', data.acceleration.x, data.acceleration.y, data.acceleration.z, ''],
['angular_acceleration', data.angular_acceleration.x, data.angular_acceleration.y,
data.angular_acceleration.z, ''],
['angular_velocity', data.angular_velocity.x, data.angular_velocity.y, data.angular_velocity.z, ''],
['rotation', data.rotation.x, data.rotation.y, data.rotation.z, data.rotation.w],
['translation', data.translation.x, data.translation.y, data.translation.z, ''],
['velocity', data.velocity.x, data.velocity.y, data.velocity.z, ''],
['mapper_confidence', data.mapper_confidence, '', '', ''],
['tracker_confidence', data.tracker_confidence, '', '', ''],
]
else:
sys.stderr.write(f'No frame to date/image convertor for {frame}.\n')
return [], None
if not round_ndigits is None:
# tabled data to formated strings
for i, row in enumerate(table):
for j, cell in enumerate(row):
if isinstance(cell, float):
formated_str = f'{round(cell, round_ndigits):{round_ndigits + 3}.{round_ndigits}}'
elif isinstance(cell, int):
formated_str = f'{cell:{int_len}}'
else:
formated_str = str(cell)
table[i][j] = formated_str
return table, frame
# --- GUI --------------------------------------------------------------------------------------------------------------
class TTFontSource:
URLS = [
'https://github.com/bluescan/proggyfonts/blob/master/ProggyCrossed/ProggyCrossed%20Regular.ttf?raw=true',
'https://github.com/bluescan/proggyfonts/blob/master/ProggyVector/ProggyVector%20Regular%20Mac.ttf?raw=true'
]
__size_casched_fonts = {} # fonts strorage for size as key
@classmethod
def __get_font_from_url(cls, url: str):
'''
Returns font data from url.
The results is casched to file in the tmp directory.
'''
cache_path = cls.__url_to_path(url)
if os.path.isfile(cache_path):
with open(cache_path, 'rb') as f:
return io.BytesIO(f.read())
try:
response = requests.get(url)
content = response.content
except Exception as e:
sys.stderr.write(f'{cls.__class__.__name__}: {e}, url="{url}"\n')
content = None
with open(cache_path, 'wb') as f:
f.write(content)
return io.BytesIO(content)
@classmethod
def __url_to_path(cls, url: str, expected_extension: str = 'ttf') -> str:
filename = url + '.' + expected_extension
filename = filename.replace('/', '_').replace('&', '\&')
return os.path.join(tempfile.gettempdir(), filename)
@classmethod
def get_font(cls, size: int = 15):
try:
return cls.__size_casched_fonts[size]
except KeyError:
for url in cls.URLS:
font = cls.__get_font_from_url(url)
try:
font = ImageFont.truetype(font, size)
cls.__size_casched_fonts[size] = font
return font
except Exception as e:
sys.stderr.write(f'{cls.__class__.__name__}.: {e}, url="{url}"\n')
return None
class ImgWindow:
'''
Window from OpenCv for showing the result [in the loop].
'''
def __init__(self, name: str = 'ImgWindow', type: int = cv2.WINDOW_NORMAL):
self._name = name
cv2.namedWindow(self._name, type)
def swow(self, img_array: np.ndarray) -> bool:
if img_array is None:
return True
cv2.imshow(self._name, img_array)
return True
def is_stopped(self) -> bool:
key = cv2.waitKey(1)
if key == ord('q') or key == 27:
return True
return cv2.getWindowProperty(self._name, cv2.WND_PROP_VISIBLE) < 1
class RealsenseFramesToImage:
'''
Take all frames in one moment and interpret them as one image.
- Starts with the interpretation of each frame to separate the image.
- Connects all images together.
'''
def __init__(self):
self.__casched_fonts = {}
def get_image_from_frames(self, frames: [rs.frame], add_tile: bool = True) -> np.array:
# 'image' kind of frames
img_frame_tuples, unsed_frames, max_width, max_height = RealsenseCamera.get_images_from_video_frames(frames)
if add_tile:
images, max_height = self.__add_titles(img_frame_tuples, max_height)
else:
images = [img_frame[0] for img_frame in img_frame_tuples]
# 'data' or 'tex' kind of frames
images_from_text_frames = self.__images_from_text_frames(unsed_frames, max_width, max_height)
# together
images += images_from_text_frames
if len(images) > 0:
# concat all to one image
ret_img = self.__concat_images(images, max_width, max_height)
else:
# placeholder for no frames (no images)
ret_img = np.zeros(shape=(800, 600, 3))
return ret_img
def __images_from_text_frames(self, frames: [rs.frame], width:int, height: int) -> [np.ndarray]:
return [
self.__from_lines_to_img(
self.__from_tabled_data_to_str(
RealsenseCamera.get_table_from_text_data_frame(frame)
),
width,
height)
for frame in frames
]
def __from_tabled_data_to_str(self, table_frame_tuple: ([[str]], rs.frame)) -> str:
table, frame = table_frame_tuple
max_columns_len = []
for r, row in enumerate(table):
for c, cell in enumerate(row):
if r==0: # first row
max_columns_len.append(len(cell))
else:
max_columns_len[c] = max(max_columns_len[c], len(cell))
str_io = io.StringIO('')
tableprint.table(table[1:], table[0], width=max_columns_len, out=str_io, style='round')
title = ' '*3 + RealsenseCamera.get_title(frame, whole=True)
table_str = str_io.getvalue()
return title + '\n' + table_str
def __add_titles(
self,
img_frm_tuples: [(np.ndarray, rs.frame)],
max_height:int,
default_height: int = 40,
default_font_size: int = 28,
bacground_color = (255, 255, 255),
color = (0, 0, 0),
dx: int = 10,
dy: int = 10
) -> ([np.ndarray], int):
ret_images = []
font = TTFontSource.get_font(size=default_font_size)
for img, frame in img_frm_tuples:
title = RealsenseCamera.get_title(frame, whole=True)
if len(img.shape) > 2:
rgb = True
height, width, _ = img.shape
title_img = Image.new('RGB', (width, default_height), color=bacground_color)
else:
rgb = False
height, width = img.shape
r, g, b = bacground_color
intcolor = (b << 16) | (g << 8) | r
title_img = Image.new('RGB', (width, default_height), color=intcolor)
draw = ImageDraw.Draw(title_img)
draw.text((dx, dy), title, font=font, fill=color)
title_img = np.array(title_img)
if not rgb:
title_img = title_img[:,:,0]
ret_images.append(np.vstack((title_img, img)))
return ret_images, max_height + default_height
def __from_lines_to_img(
self,
text: [str],
width: int,
height: int,
bacground_color = (255,255,255),
color=(0, 0, 0),
dx : int = 10,
dy : int = 10
) -> np.ndarray:
'''
Create an image of a given width height, where the text with a known number of lines (of the same length) will be large enough.
'''
rows = text.splitlines()
# rows had Title and table rows[1] is first row of table
font = self.__get_font_with_good_size(rows[1], width, dx)
img = Image.new('RGB', (width, height), color=bacground_color)
draw = ImageDraw.Draw(img)
draw.text((dx, dy), text, font=font, fill=color)
# for i, row in enumerate(text):
# draw.text((10, 20 * i), row, font=font, fill=color)
return np.array(img)
def __get_font_with_good_size(self, first_row: str, width:int, dx: int):
l = len(first_row)
try:
return self.__casched_fonts[l]
except KeyError:
font_size = 10 # starting font size
font = TTFontSource.get_font(size=font_size)
width_dx = width - 2 * dx
while font.getsize(first_row)[0] < width_dx:
# iterate until the text size is just larger than the criteria
font_size += 1
font = TTFontSource.get_font(size=font_size)
# de-increment to be sure it is less than criteria
font_size -= 2
font = TTFontSource.get_font(size=font_size)
self.__casched_fonts[l] = font
return font
def __concat_images(
self,
images: [np.array],
max_width: int,
max_height: int,
max_columns: int = 4,
bacground_color=(255,255,255)
) -> np.array:
# diferent images in the set, transform all to RGB
images = [cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) if len(img.shape) < 3 else img for img in images]
# reshape to the same size max_width x max_height
images = [self.__enlarge_img_by_add_background(img, max_width, max_height, bacground_color) for img in images]
# divide images to rows and columns
images = [images[i:i + max_columns] for i in range(0, len(images), max_columns)]
for row_index, rows_images in enumerate(images):
# concat images in one rows
for col_index, img in enumerate(rows_images):
if col_index == 0:
# first one
col_img = img
else:
col_img = np.hstack((col_img, img))
# add placeholder to shor column
for i in range(max_columns - len(rows_images)):
placeholder_img = np.zeros((max_height, max_width, 3), np.uint8)
placeholder_img[:, :] = bacground_color
col_img = np.hstack((col_img, placeholder_img))
# concat rows to one image
if row_index == 0:
# first one
ret_img = col_img
else:
ret_img = np.vstack((ret_img, col_img))
return ret_img
def __enlarge_img_by_add_background(
self, img: np.ndarray, enlarge_width: int, enlarge_height: int,
bacground_color = (255,255,255)
) -> np.ndarray:
width, height = img.shape[1], img.shape[0]
if width >= enlarge_width and height >= enlarge_height:
# good enough
return img
new_img = np.zeros((enlarge_height,enlarge_width,3), np.uint8)
new_img[:,:] = bacground_color
x = int((enlarge_width - width) / 2)
y = int((enlarge_height - height) / 2)
new_img[y:y+height, x:x+width] = img
return new_img
class AllCamerasLoop:
'''
Take info from all conected cameras in the loop.
'''
@classmethod
def get_conected_cameras_info(cls, camera_name_suffix: str = 'T265') -> [(str, str)]:
'''
Return list of (serial number,names) conected devices.
Eventualy only fit given suffix (like T265, D415, ...)
(based on https://github.com/IntelRealSense/librealsense/issues/2332)
'''
ret_list = []
ctx = rs.context()
for d in ctx.devices:
serial_number = d.get_info(rs.camera_info.serial_number)
name = d.get_info(rs.camera_info.name)
if camera_name_suffix and not name.endswith(camera_name_suffix):
continue
ret_list.append((serial_number, name))
return ret_list
@classmethod
def get_all_conected_cameras(cls) -> [RealsenseCamera]:
cameras = cls.get_conected_cameras_info(camera_name_suffix=None)
return [RealsenseCamera(serial_number, name) for serial_number, name in cameras]
def __init__(self):
self.__cameras = self.get_all_conected_cameras()
self.__frames_interpreter = RealsenseFramesToImage()
def get_frames(self) -> [rs.frame]:
'''
Return frames in given order.
'''
ret_frames = []
for camera in self.__cameras:
frames = camera.get_frames()
if frames:
ret_frames += frames
return ret_frames
def __get_window_name(self):
s = ''
for camera in self.__cameras:
if s:
s += ', '
s += camera.get_full_name()
return s
def run_loop(self):
stop = False
window = ImgWindow(name=self.__get_window_name())
while not stop:
frames = self.get_frames()
window.swow(self.__frames_interpreter.get_image_from_frames(frames))
stop = window.is_stopped()
if __name__ == "__main__":
viewer = AllCamerasLoop()
viewer.run_loop()
| [
"numpy.hstack",
"io.BytesIO",
"cv2.imshow",
"numpy.array",
"willow.ImageFont.truetype",
"pyrealsense2.colorizer",
"numpy.vstack",
"willow.Image.new",
"pyrealsense2.config",
"io.StringIO",
"willow.ImageDraw.Draw",
"cv2.waitKey",
"requests.get",
"os.path.isfile",
"sys.stderr.write",
"cv2... | [((738, 752), 'pyrealsense2.colorizer', 'rs.colorizer', ([], {}), '()\n', (750, 752), True, 'import pyrealsense2 as rs\n'), ((1325, 1338), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (1336, 1338), True, 'import pyrealsense2 as rs\n'), ((1356, 1367), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (1365, 1367), True, 'import pyrealsense2 as rs\n'), ((6408, 6434), 'os.path.isfile', 'os.path.isfile', (['cache_path'], {}), '(cache_path)\n', (6422, 6434), False, 'import os\n'), ((6841, 6860), 'io.BytesIO', 'io.BytesIO', (['content'], {}), '(content)\n', (6851, 6860), False, 'import io\n'), ((7892, 7925), 'cv2.namedWindow', 'cv2.namedWindow', (['self._name', 'type'], {}), '(self._name, type)\n', (7907, 7925), False, 'import cv2\n'), ((8040, 8073), 'cv2.imshow', 'cv2.imshow', (['self._name', 'img_array'], {}), '(self._name, img_array)\n', (8050, 8073), False, 'import cv2\n'), ((8143, 8157), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8154, 8157), False, 'import cv2\n'), ((10338, 10353), 'io.StringIO', 'io.StringIO', (['""""""'], {}), "('')\n", (10349, 10353), False, 'import io\n'), ((10362, 10453), 'tableprint.table', 'tableprint.table', (['table[1:]', 'table[0]'], {'width': 'max_columns_len', 'out': 'str_io', 'style': '"""round"""'}), "(table[1:], table[0], width=max_columns_len, out=str_io,\n style='round')\n", (10378, 10453), False, 'import tableprint\n'), ((12493, 12549), 'willow.Image.new', 'Image.new', (['"""RGB"""', '(width, height)'], {'color': 'bacground_color'}), "('RGB', (width, height), color=bacground_color)\n", (12502, 12549), False, 'from willow import ImageFont, ImageDraw, Image\n'), ((12565, 12584), 'willow.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (12579, 12584), False, 'from willow import ImageFont, ImageDraw, Image\n'), ((12764, 12777), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (12772, 12777), True, 'import numpy as np\n'), ((15454, 15508), 'numpy.zeros', 'np.zeros', (['(enlarge_height, enlarge_width, 3)', 'np.uint8'], {}), '((enlarge_height, enlarge_width, 3), np.uint8)\n', (15462, 15508), True, 'import numpy as np\n'), ((16172, 16184), 'pyrealsense2.context', 'rs.context', ([], {}), '()\n', (16182, 16184), True, 'import pyrealsense2 as rs\n'), ((6562, 6579), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6574, 6579), False, 'import requests\n'), ((7100, 7121), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (7119, 7121), False, 'import tempfile\n'), ((8238, 8293), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['self._name', 'cv2.WND_PROP_VISIBLE'], {}), '(self._name, cv2.WND_PROP_VISIBLE)\n', (8259, 8293), False, 'import cv2\n'), ((9449, 9478), 'numpy.zeros', 'np.zeros', ([], {'shape': '(800, 600, 3)'}), '(shape=(800, 600, 3))\n', (9457, 9478), True, 'import numpy as np\n'), ((11602, 11627), 'willow.ImageDraw.Draw', 'ImageDraw.Draw', (['title_img'], {}), '(title_img)\n', (11616, 11627), False, 'from willow import ImageFont, ImageDraw, Image\n'), ((11714, 11733), 'numpy.array', 'np.array', (['title_img'], {}), '(title_img)\n', (11722, 11733), True, 'import numpy as np\n'), ((5027, 5095), 'sys.stderr.write', 'sys.stderr.write', (['f"""No frame to date/image convertor for {frame}.\n"""'], {}), "(f'No frame to date/image convertor for {frame}.\\n')\n", (5043, 5095), False, 'import sys\n'), ((6662, 6727), 'sys.stderr.write', 'sys.stderr.write', (['f"""{cls.__class__.__name__}: {e}, url="{url}\\"\n"""'], {}), '(f\'{cls.__class__.__name__}: {e}, url="{url}"\\n\')\n', (6678, 6727), False, 'import sys\n'), ((11249, 11313), 'willow.Image.new', 'Image.new', (['"""RGB"""', '(width, default_height)'], {'color': 'bacground_color'}), "('RGB', (width, default_height), color=bacground_color)\n", (11258, 11313), False, 'from willow import ImageFont, ImageDraw, Image\n'), ((11524, 11581), 'willow.Image.new', 'Image.new', (['"""RGB"""', '(width, default_height)'], {'color': 'intcolor'}), "('RGB', (width, default_height), color=intcolor)\n", (11533, 11581), False, 'from willow import ImageFont, ImageDraw, Image\n'), ((11833, 11860), 'numpy.vstack', 'np.vstack', (['(title_img, img)'], {}), '((title_img, img))\n', (11842, 11860), True, 'import numpy as np\n'), ((13831, 13868), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2RGB'], {}), '(img, cv2.COLOR_GRAY2RGB)\n', (13843, 13868), False, 'import cv2\n'), ((14701, 14747), 'numpy.zeros', 'np.zeros', (['(max_height, max_width, 3)', 'np.uint8'], {}), '((max_height, max_width, 3), np.uint8)\n', (14709, 14747), True, 'import numpy as np\n'), ((14830, 14867), 'numpy.hstack', 'np.hstack', (['(col_img, placeholder_img)'], {}), '((col_img, placeholder_img))\n', (14839, 14867), True, 'import numpy as np\n'), ((15044, 15073), 'numpy.vstack', 'np.vstack', (['(ret_img, col_img)'], {}), '((ret_img, col_img))\n', (15053, 15073), True, 'import numpy as np\n'), ((14536, 14561), 'numpy.hstack', 'np.hstack', (['(col_img, img)'], {}), '((col_img, img))\n', (14545, 14561), True, 'import numpy as np\n'), ((7411, 7441), 'willow.ImageFont.truetype', 'ImageFont.truetype', (['font', 'size'], {}), '(font, size)\n', (7429, 7441), False, 'from willow import ImageFont, ImageDraw, Image\n'), ((7591, 7657), 'sys.stderr.write', 'sys.stderr.write', (['f"""{cls.__class__.__name__}.: {e}, url="{url}\\"\n"""'], {}), '(f\'{cls.__class__.__name__}.: {e}, url="{url}"\\n\')\n', (7607, 7657), False, 'import sys\n')] |
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import pandas as pd
import tools
import numpy as np
import uts
from gensim.models import Word2Vec
from nltk import tokenize
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
factory = StemmerFactory()
stemmer = factory.create_stemmer()
from nltk.metrics import windowdiff, pk
STDEVIATION =0.02
def c99(sent_tokenized, window=5, K=10):
model = uts.C99(window=window)
boundary = model.segment([" ".join(s) for s in sent_tokenized])
return "".join([str(i) for i in boundary])
def text_tiling(sent_tokenized, window=5, K=10):
model = uts.TextTiling(window=window)
boundary = model.segment([" ".join(s) for s in sent_tokenized])
hypt = "".join([str(i) for i in boundary])
return hypt
def load_stop_words(stopword='stopword.txt'):
if stopword:
file = open(stopword, 'r')
text = file.read()
return set(text.split("\n"))
def load_file_txt(source):
file = open(source, 'r')
text = file.read()
return text.split("\n")
def load_data(name='id.albaqarah.cut.txt'):
data = load_file_txt(name)
expected = "".join([str(i.split(",")[0]) for i in data])
return data, expected
def stem(word):
return stemmer.stem(word)
def gensig_model(X, minlength=1, maxlength=None, lam=0.0):
N, D = X.shape
over_sqrtD = 1. / np.sqrt(D)
cs = np.cumsum(X, 0)
# print("SQRT:", over_sqrtD)
def sigma(a, b):
length = (b - a)
if minlength:
if length < minlength: return np.inf
if maxlength:
if length > maxlength: return np.inf
tot = cs[b - 1].copy()
if a > 0:
tot -= cs[a - 1]
signs = np.sign(tot)
# print("A: {} B: {}, Nilai sigma: {}".format(a,b, -over_sqrtD * (signs*tot).sum()))
# print("sigma (a b):",a,b,-over_sqrtD * (signs*tot).sum())
hade = tot.sum()
return -over_sqrtD * (signs * tot).sum(), hade
return sigma
def greedysplit(n, k, sigma):
""" Do a greedy split """
splits = [n]
s = sigma(0, n)
def score(splits, sigma):
splits = sorted(splits)
result = []
# result = sum( sigma(a,b) for (a,b) in tools.seg_iter(splits) )
check = []
result2 = []
for (a, b) in tools.seg_iter(splits):
print("pos {} {}".format(a,b))
check.append([a, b])
o, n = sigma(a, b)
result.append(o)
result2.append(n)
print("result sigma:", result)
# print splits, check, sum(result)
# print(result2, sum(result2))
# print("--")
return sum(result)
new_score = []
k = k - 1
while k > 0:
usedinds = set(splits)
new_arr = []
# print "menghitung ke K-", k
# print("--begin scoring----")
for i in xrange(1, n):
if i not in usedinds:
new_arr.append([score(splits + [i], sigma), splits + [i]])
# print("--end scoring----")
#
# print("pemilihan batas:", min(new_arr))
# print("sorted batas:", sorted(min(new_arr)[1]))
new = min(new_arr)
print(new_arr)
new_score.append(new)
splits = new[1]
s = new[0]
k -= 1
if 1 not in splits:
splits = splits + [1]
return sorted(splits), new_arr
def load_model(model_name):
model = Word2Vec.load(model_name)
index2word_set = set(model.wv.index2word)
return model, index2word_set
def avg_feature_vector(words, model, num_features, index2word_set):
feature_vec = np.zeros((num_features,), dtype='float32')
n_words = 0
for word in words:
if word in index2word_set:
n_words += 1
feature_vec = np.add(feature_vec, model[word])
if (n_words > 0):
feature_vec = np.divide(feature_vec, n_words)
return feature_vec
def word2sent(model, sent_tokenized, index2word_set):
X = []
for i in range(0, len(sent_tokenized)):
sent1avg = avg_feature_vector(sent_tokenized[i], model, 400, index2word_set)
X.append(sent1avg)
X = np.array(X)
return X
def OriginalGreedy(sent_tokenized, window=5, K=10):
X = word2sent(_model, sent_tokenized, index2word_set)
sig = gensig_model(X)
spl, e = greedysplit(X.shape[0], K, sig)
segs = [1] * len(sent_tokenized)
for i in range(1, len(sent_tokenized)):
if i in spl:
segs[i] = 1
else:
segs[i] = 0
segs[1] = 0
return "".join([str(i) for i in segs])
models = ["models/w2vec_wiki_id_non_case"]
def NewSegV1(sent_tokenized, window=5, K=10):
# _model, index2word_set = load_model(model)
X = word2sent(_model, sent_tokenized, index2word_set)
results = []
i = 0
X_ = len(X)
cursor = window
splits = [1]
while cursor - window < X_ and X[i:cursor].shape[0] > 1:
K = 2
sig = gensig_model(X[i:cursor])
# print("window:", window)
print("trying segment from {} to {}".format(i, cursor))
# if X[i:cursor].shape[0] == 1:
# import pdb
# pdb.set_trace()
spl, e = greedysplit(X[i:cursor].shape[0], K, sig)
stdeviation = np.std([a[0] for a in e])
print("cut or no:", np.std([a[0] for a in e]), splits, spl)
if stdeviation > STDEVIATION: #0.0206:
i = spl[1]
if len(splits) == 1:
new_seg1 = i
else:
new_seg1 = splits[-1] + i
splits.append(new_seg1)
i = new_seg1
cursor = i + window
else:
cursor = cursor + window
# elif stdeviation == 0:
# i = X_ + 1
# else:
# cursor = i + (2 * window)
# if cursor - window == X_ - 1:
# break
# if cursor > X_:
# cursor = X_
# if i + window > X_:
# break
# splits, e = new_greedy_split(X.shape[0], K, sig, 10)
# print(splits)
# sim = 1 - spatial.distance.cosine(X[0], X[1])
# exp = "".join([sent.split(",")[0] for sent in sents])
# pdb.set_trace()
segs = [1] * len(sent_tokenized)
for i in range(1, len(sent_tokenized)):
if i in splits:
segs[i] = 1
else:
segs[i] = 0
# print("expected ", expected)
segs[1] = 0
# print("experiment", "".join([str(i) for i in segs]))
# print(windowdiff(expected, "".join([str(i) for i in segs]), K))
# results.append(windowdiff(expected, "".join([str(i) for i in segs]), K))
# print(results)
return "".join([str(i) for i in segs])
def NewSeg(sent_tokenized, window=5, K=10):
# _model, index2word_set = load_model(model)
X = word2sent(_model, sent_tokenized, index2word_set)
results = []
i = 0
X_ = len(X)
cursor = window
splits = [1]
while cursor < X_:
K = 2
sig = gensig_model(X[i:cursor])
print("window:", window)
print("trying segment from {} to {}".format(i, cursor))
spl, e = greedysplit(X[i:cursor].shape[0], K, sig)
i = spl[1]
if len(splits) == 1:
new_seg1 = i
else:
new_seg1 = splits[-1] + i
splits.append(new_seg1)
i = new_seg1
cursor = i + window
# splits, e = new_greedy_split(X.shape[0], K, sig, 10)
# print(splits)
# sim = 1 - spatial.distance.cosine(X[0], X[1])
# exp = "".join([sent.split(",")[0] for sent in sents])
# pdb.set_trace()
segs = [1] * len(sent_tokenized)
for i in range(1, len(sent_tokenized)):
if i in splits:
segs[i] = 1
else:
segs[i] = 0
# print("expected ", expected)
segs[1] = 0
# print("experiment", "".join([str(i) for i in segs]))
# print(windowdiff(expected, "".join([str(i) for i in segs]), K))
# results.append(windowdiff(expected, "".join([str(i) for i in segs]), K))
# print(results)
return "".join([str(i) for i in segs])
import argparse
if __name__ == "__main__":
K = 8
isStopWord = [True, False]
isStemmed = [True, False]
parser = argparse.ArgumentParser()
parser.add_argument("model", help="please input the model")
args = parser.parse_args()
model = "models/{}".format(args.model)
_model, index2word_set = load_model(model)
data = [
['data/id.albaqarah.original.txt', 55],
['data/juz_amma.txt', 60],
['data/al-imron.txt', 34],
['data/annisa.txt', 33],
['data/almaaidah.txt', 22],
['data/alanam.txt', 13],
# ['data/fadhail-amal.txt', 6],
# ['data/sintesis.detik.txt', 10],
# ['data/id.albaqarah.cut.txt', 3],
# ['data/sintesis.extreme1.txt', 5],
# ['data/sintesis.extreme2.txt', 6],
# ['data/sintesis.extreme3.txt', 8],
# ['data/sintesis.extreme4.txt', 8],
# ['data/sintesis.extreme5.txt', 4],
# ['data/sintesis.extreme6.txt', 9],
# ['data/sintesis.extreme7.txt', 9],
# ['data/sintesis.extreme8.txt', 7],
# ['data/sintesis.extreme9.txt', 9],
# ['data/sintesis.extreme10.txt', 9],
#
# ['data/sintesis.extreme11.txt', 11],
# ['data/sintesis.extreme12.txt', 9],
# ['data/sintesis.extreme13.txt', 8],
# ['data/sintesis.extreme14.txt', 9],
# ['data/sintesis.extreme15.txt', 7],
# ['data/sintesis.extreme16.txt', 6],
# ['data/sintesis.extreme17.txt', 7],
# ['data/sintesis.extreme18.txt', 8],
# ['data/sintesis.extreme19.txt', 8],
# ['data/sintesis.extreme20.txt', 7],
# #
# ['data/sintesis.extreme21.txt', 8],
# ['data/sintesis.extreme22.txt', 8],
# ['data/sintesis.extreme23.txt', 8],
# ['data/sintesis.extreme24.txt', 9],
# ['data/sintesis.extreme25.txt', 10],
# ['data/sintesis.extreme26.txt', 10],
# ['data/sintesis.extreme27.txt', 9],
# ['data/sintesis.extreme28.txt', 8],
# ['data/sintesis.extreme29.txt', 9],
# ['data/sintesis.extreme30.txt', 8],
#
# ['data/sintesis.kompas.android.smooth.txt', 7],
# ['data/sintesis.kompas.android.smooth2.txt', 3],
# ['data/sintesis.kompas.politik.smooth.txt', 3],
# ['data/sintesis.kompas.tekno.middle.txt', 7],
# ['data/sintetis.konsyar.sholat.smooth.txt', 5]
]
# sents, expected = get_albaqarah('id.albaqarah.original.v2.txt')
stopword = load_stop_words()
methods = [NewSegV1, OriginalGreedy, c99, text_tiling]
# import pdb
# pdb.set_trace()
results = []
for sw in isStopWord:
for ist in isStemmed:
# if sw and ist: #check all true. please remove after finished
for window in [6, 10, 15, 20]:
for expe in data:
sents, expected = load_data(expe[0])
sent_tokenized = []
for sent in sents:
words = tokenize.word_tokenize(sent)
if sw:
if ist:
sent_tokenized.append(
[stem(word.lower()) for word in words if
word.lower() not in stopword and word.isalpha()])
# for word in words:
# if word.lower() not in stopword and word.isalpha():
# sent_tokenized.append(stem(word.lower()))
else:
sent_tokenized.append(
[word.lower() for word in words if word.lower() not in stopword and word.isalpha()])
else:
if ist:
sent_tokenized.append(
[stem(word.lower()) for word in words if word.isalpha()])
else:
sent_tokenized.append(
[word.lower() for word in words if word.isalpha()])
# tt = TextTilingTokenizer(demo_mode=False, stopwords=sw, k=56, w=20)
# s, ss, d, b = tt.tokenize([" ".join(sent) for sent in sent_tokenized])
for method in methods:
# try:
result = method(sent_tokenized, window, expe[1])
diff = windowdiff(expected, result, expe[1])
pk_diff = pk(expected, result, expe[1])
# print(result, expe[1])
record = {
'File': expe[0],
'Method Name': method.__name__,
'window': window,
# 'hypt segment': result,
# 'expe segment': expected,
'window diff': diff,
'isStemmed': ist,
'isStopped': sw
}
results.append(record)
print(
"Method {} | File: {} | StopWord: {} | Stemmed: {} | result: {} | {} {}".format(
method.__name__, expe,
sw, ist, diff, expected, result))
# except:
# import pdb
# pdb.set_trace()
print("===")
df = pd.DataFrame(results)
print(df.to_string())
df.to_csv('df_quran.csv')
dfpivot = df.pivot_table(index=['File','window', 'isStemmed', 'isStopped'], columns='Method Name', values='window diff', aggfunc=np.average)
dfpivot.to_csv('dfpivot_quran.csv')
print(dfpivot.to_string())
print(df.groupby(['Method Name', 'window', 'isStemmed', 'isStopped']).mean())
# import pdb
# pdb.set_trace()
df[df['window']==6].groupby(['Method Name', 'isStemmed', 'isStopped']).mean()
df[df['window'] == 6].groupby(['Method Name', 'isStemmed', 'isStopped']).mean().to_csv('results/summary_window_6.csv')
# dfgroup = df.groupby(['Method Name', 'window']).mean().
dfpivot = df[df['window'] == 6].pivot_table(index=['Method Name'], columns=['isStemmed', 'isStopped'], values='window diff', aggfunc=np.average)
dfpivot.to_csv('results/summary_window_6.csv')
dfpivot = df[df['window'] == 10].pivot_table(index=['Method Name'], columns=['isStemmed', 'isStopped'], values='window diff', aggfunc=np.average)
dfpivot.to_csv('results/summary_window_10.csv')
dfpivot = df[df['window'] == 15].pivot_table(index=['Method Name'], columns=['isStemmed', 'isStopped'], values='window diff', aggfunc=np.average)
dfpivot.to_csv('results/summary_window_15.csv')
dfpivot = df[df['window'] == 20].pivot_table(index=['Method Name'],columns=['isStemmed', 'isStopped'],values='window diff', aggfunc=np.average)
dfpivot.to_csv('results/summary_window_20.csv')
| [
"sys.setdefaultencoding",
"numpy.sqrt",
"numpy.array",
"tools.seg_iter",
"numpy.divide",
"Sastrawi.Stemmer.StemmerFactory.StemmerFactory",
"argparse.ArgumentParser",
"gensim.models.Word2Vec.load",
"uts.TextTiling",
"pandas.DataFrame",
"numpy.add",
"nltk.metrics.windowdiff",
"numpy.sign",
"... | [((24, 54), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (46, 54), False, 'import sys\n'), ((250, 266), 'Sastrawi.Stemmer.StemmerFactory.StemmerFactory', 'StemmerFactory', ([], {}), '()\n', (264, 266), False, 'from Sastrawi.Stemmer.StemmerFactory import StemmerFactory\n'), ((416, 438), 'uts.C99', 'uts.C99', ([], {'window': 'window'}), '(window=window)\n', (423, 438), False, 'import uts\n'), ((617, 646), 'uts.TextTiling', 'uts.TextTiling', ([], {'window': 'window'}), '(window=window)\n', (631, 646), False, 'import uts\n'), ((1385, 1400), 'numpy.cumsum', 'np.cumsum', (['X', '(0)'], {}), '(X, 0)\n', (1394, 1400), True, 'import numpy as np\n'), ((3417, 3442), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['model_name'], {}), '(model_name)\n', (3430, 3442), False, 'from gensim.models import Word2Vec\n'), ((3610, 3652), 'numpy.zeros', 'np.zeros', (['(num_features,)'], {'dtype': '"""float32"""'}), "((num_features,), dtype='float32')\n", (3618, 3652), True, 'import numpy as np\n'), ((4141, 4152), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4149, 4152), True, 'import numpy as np\n'), ((8202, 8227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8225, 8227), False, 'import argparse\n'), ((13661, 13682), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (13673, 13682), True, 'import pandas as pd\n'), ((1365, 1375), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (1372, 1375), True, 'import numpy as np\n'), ((1718, 1730), 'numpy.sign', 'np.sign', (['tot'], {}), '(tot)\n', (1725, 1730), True, 'import numpy as np\n'), ((2309, 2331), 'tools.seg_iter', 'tools.seg_iter', (['splits'], {}), '(splits)\n', (2323, 2331), False, 'import tools\n'), ((3855, 3886), 'numpy.divide', 'np.divide', (['feature_vec', 'n_words'], {}), '(feature_vec, n_words)\n', (3864, 3886), True, 'import numpy as np\n'), ((5246, 5271), 'numpy.std', 'np.std', (['[a[0] for a in e]'], {}), '([a[0] for a in e])\n', (5252, 5271), True, 'import numpy as np\n'), ((3778, 3810), 'numpy.add', 'np.add', (['feature_vec', 'model[word]'], {}), '(feature_vec, model[word])\n', (3784, 3810), True, 'import numpy as np\n'), ((5300, 5325), 'numpy.std', 'np.std', (['[a[0] for a in e]'], {}), '([a[0] for a in e])\n', (5306, 5325), True, 'import numpy as np\n'), ((11071, 11099), 'nltk.tokenize.word_tokenize', 'tokenize.word_tokenize', (['sent'], {}), '(sent)\n', (11093, 11099), False, 'from nltk import tokenize\n'), ((12579, 12616), 'nltk.metrics.windowdiff', 'windowdiff', (['expected', 'result', 'expe[1]'], {}), '(expected, result, expe[1])\n', (12589, 12616), False, 'from nltk.metrics import windowdiff, pk\n'), ((12651, 12680), 'nltk.metrics.pk', 'pk', (['expected', 'result', 'expe[1]'], {}), '(expected, result, expe[1])\n', (12653, 12680), False, 'from nltk.metrics import windowdiff, pk\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import copy
np.random.seed(10)
paddle.seed(10)
class TestNormalAPI(unittest.TestCase):
def setUp(self):
self.mean = 1.0
self.std = 0.0
self.shape = None
self.repeat_num = 2000
self.set_attrs()
self.dtype = self.get_dtype()
self.place=paddle.CUDAPlace(0) \
if paddle.fluid.core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def set_attrs(self):
self.shape = [8, 12]
def get_shape(self):
if isinstance(self.mean, np.ndarray):
shape = self.mean.shape
elif isinstance(self.std, np.ndarray):
shape = self.std.shape
else:
shape = self.shape
return list(shape)
def get_dtype(self):
if isinstance(self.mean, np.ndarray):
return self.mean.dtype
elif isinstance(self.std, np.ndarray):
return self.std.dtype
else:
return 'float32'
def static_api(self):
shape = self.get_shape()
ret_all_shape = copy.deepcopy(shape)
ret_all_shape.insert(0, self.repeat_num)
ret_all = np.zeros(ret_all_shape, self.dtype)
if isinstance(self.mean, np.ndarray) \
and isinstance(self.std, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.fluid.data('Mean', self.mean.shape,
self.mean.dtype)
std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)
out = paddle.normal(mean, std, self.shape)
exe = paddle.static.Executor(self.place)
for i in range(self.repeat_num):
ret = exe.run(feed={
'Mean': self.mean,
'Std': self.std.reshape(shape)
},
fetch_list=[out])
ret_all[i] = ret[0]
return ret_all
elif isinstance(self.mean, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.fluid.data('Mean', self.mean.shape,
self.mean.dtype)
out = paddle.normal(mean, self.std, self.shape)
exe = paddle.static.Executor(self.place)
for i in range(self.repeat_num):
ret = exe.run(feed={'Mean': self.mean}, fetch_list=[out])
ret_all[i] = ret[0]
return ret_all
elif isinstance(self.std, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()):
std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)
out = paddle.normal(self.mean, std, self.shape)
exe = paddle.static.Executor(self.place)
for i in range(self.repeat_num):
ret = exe.run(feed={'Std': self.std}, fetch_list=[out])
ret_all[i] = ret[0]
return ret_all
else:
with paddle.static.program_guard(paddle.static.Program()):
out = paddle.normal(self.mean, self.std, self.shape)
exe = paddle.static.Executor(self.place)
for i in range(self.repeat_num):
ret = exe.run(fetch_list=[out])
ret_all[i] = ret[0]
return ret_all
def dygraph_api(self):
paddle.disable_static(self.place)
shape = self.get_shape()
ret_all_shape = copy.deepcopy(shape)
ret_all_shape.insert(0, self.repeat_num)
ret_all = np.zeros(ret_all_shape, self.dtype)
mean = paddle.to_tensor(self.mean) \
if isinstance(self.mean, np.ndarray) else self.mean
std = paddle.to_tensor(self.std) \
if isinstance(self.std, np.ndarray) else self.std
for i in range(self.repeat_num):
out = paddle.normal(mean, std, self.shape)
ret_all[i] = out.numpy()
paddle.enable_static()
return ret_all
def test_api(self):
ret_static = self.static_api()
ret_dygraph = self.dygraph_api()
for ret in [ret_static, ret_dygraph]:
shape_ref = self.get_shape()
self.assertEqual(shape_ref, list(ret[0].shape))
ret = ret.flatten().reshape([self.repeat_num, -1])
mean = np.mean(ret, axis=0)
std = np.std(ret, axis=0)
mean_ref=self.mean.reshape([1, -1]) \
if isinstance(self.mean, np.ndarray) else self.mean
std_ref=self.std.reshape([1, -1]) \
if isinstance(self.std, np.ndarray) else self.std
self.assertTrue(np.allclose(mean_ref, mean, 0.2, 0.2))
self.assertTrue(np.allclose(std_ref, std, 0.2, 0.2))
class TestNormalAPI_mean_is_tensor(TestNormalAPI):
def set_attrs(self):
self.mean = np.random.uniform(-2, -1, [2, 3, 4, 5]).astype('float64')
class TestNormalAPI_std_is_tensor(TestNormalAPI):
def set_attrs(self):
self.std = np.random.uniform(0.7, 1, [2, 3, 17]).astype('float64')
class TestNormalAPI_mean_std_are_tensor(TestNormalAPI):
def set_attrs(self):
self.mean = np.random.uniform(1, 2, [1, 100]).astype('float64')
self.std = np.random.uniform(0.5, 1, [1, 100]).astype('float64')
class TestNormalAPI_mean_std_are_tensor_with_different_dtype(TestNormalAPI):
def set_attrs(self):
self.mean = np.random.uniform(1, 2, [100]).astype('float64')
self.std = np.random.uniform(1, 2, [100]).astype('float32')
class TestNormalAlias(unittest.TestCase):
def test_alias(self):
paddle.disable_static()
shape = [1, 2, 3]
out1 = paddle.normal(shape=shape)
out2 = paddle.tensor.normal(shape=shape)
out3 = paddle.tensor.random.normal(shape=shape)
paddle.enable_static()
class TestNormalErrors(unittest.TestCase):
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
mean = [1, 2, 3]
self.assertRaises(TypeError, paddle.normal, mean)
std = [1, 2, 3]
self.assertRaises(TypeError, paddle.normal, std=std)
mean = paddle.fluid.data('Mean', [100], 'int32')
self.assertRaises(TypeError, paddle.normal, mean)
std = paddle.fluid.data('Std', [100], 'int32')
self.assertRaises(TypeError, paddle.normal, mean=1.0, std=std)
self.assertRaises(TypeError, paddle.normal, shape=1)
self.assertRaises(TypeError, paddle.normal, shape=[1.0])
shape = paddle.fluid.data('Shape', [100], 'float32')
self.assertRaises(TypeError, paddle.normal, shape=shape)
if __name__ == "__main__":
unittest.main()
| [
"paddle.tensor.random.normal",
"paddle.tensor.normal",
"paddle.seed",
"paddle.disable_static",
"copy.deepcopy",
"unittest.main",
"paddle.normal",
"numpy.mean",
"paddle.CPUPlace",
"paddle.enable_static",
"paddle.to_tensor",
"numpy.random.seed",
"paddle.fluid.core.is_compiled_with_cuda",
"nu... | [((675, 693), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (689, 693), True, 'import numpy as np\n'), ((694, 709), 'paddle.seed', 'paddle.seed', (['(10)'], {}), '(10)\n', (705, 709), False, 'import paddle\n'), ((7487, 7502), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7500, 7502), False, 'import unittest\n'), ((1708, 1728), 'copy.deepcopy', 'copy.deepcopy', (['shape'], {}), '(shape)\n', (1721, 1728), False, 'import copy\n'), ((1796, 1831), 'numpy.zeros', 'np.zeros', (['ret_all_shape', 'self.dtype'], {}), '(ret_all_shape, self.dtype)\n', (1804, 1831), True, 'import numpy as np\n'), ((4136, 4169), 'paddle.disable_static', 'paddle.disable_static', (['self.place'], {}), '(self.place)\n', (4157, 4169), False, 'import paddle\n'), ((4227, 4247), 'copy.deepcopy', 'copy.deepcopy', (['shape'], {}), '(shape)\n', (4240, 4247), False, 'import copy\n'), ((4315, 4350), 'numpy.zeros', 'np.zeros', (['ret_all_shape', 'self.dtype'], {}), '(ret_all_shape, self.dtype)\n', (4323, 4350), True, 'import numpy as np\n'), ((4707, 4729), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (4727, 4729), False, 'import paddle\n'), ((6371, 6394), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (6392, 6394), False, 'import paddle\n'), ((6436, 6462), 'paddle.normal', 'paddle.normal', ([], {'shape': 'shape'}), '(shape=shape)\n', (6449, 6462), False, 'import paddle\n'), ((6478, 6511), 'paddle.tensor.normal', 'paddle.tensor.normal', ([], {'shape': 'shape'}), '(shape=shape)\n', (6498, 6511), False, 'import paddle\n'), ((6527, 6567), 'paddle.tensor.random.normal', 'paddle.tensor.random.normal', ([], {'shape': 'shape'}), '(shape=shape)\n', (6554, 6567), False, 'import paddle\n'), ((6576, 6598), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (6596, 6598), False, 'import paddle\n'), ((997, 1038), 'paddle.fluid.core.is_compiled_with_cuda', 'paddle.fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (1036, 1038), False, 'import paddle\n'), ((960, 979), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (976, 979), False, 'import paddle\n'), ((1058, 1075), 'paddle.CPUPlace', 'paddle.CPUPlace', ([], {}), '()\n', (1073, 1075), False, 'import paddle\n'), ((4367, 4394), 'paddle.to_tensor', 'paddle.to_tensor', (['self.mean'], {}), '(self.mean)\n', (4383, 4394), False, 'import paddle\n'), ((4475, 4501), 'paddle.to_tensor', 'paddle.to_tensor', (['self.std'], {}), '(self.std)\n', (4491, 4501), False, 'import paddle\n'), ((4625, 4661), 'paddle.normal', 'paddle.normal', (['mean', 'std', 'self.shape'], {}), '(mean, std, self.shape)\n', (4638, 4661), False, 'import paddle\n'), ((5088, 5108), 'numpy.mean', 'np.mean', (['ret'], {'axis': '(0)'}), '(ret, axis=0)\n', (5095, 5108), True, 'import numpy as np\n'), ((5127, 5146), 'numpy.std', 'np.std', (['ret'], {'axis': '(0)'}), '(ret, axis=0)\n', (5133, 5146), True, 'import numpy as np\n'), ((6944, 6985), 'paddle.fluid.data', 'paddle.fluid.data', (['"""Mean"""', '[100]', '"""int32"""'], {}), "('Mean', [100], 'int32')\n", (6961, 6985), False, 'import paddle\n'), ((7067, 7107), 'paddle.fluid.data', 'paddle.fluid.data', (['"""Std"""', '[100]', '"""int32"""'], {}), "('Std', [100], 'int32')\n", (7084, 7107), False, 'import paddle\n'), ((7340, 7384), 'paddle.fluid.data', 'paddle.fluid.data', (['"""Shape"""', '[100]', '"""float32"""'], {}), "('Shape', [100], 'float32')\n", (7357, 7384), False, 'import paddle\n'), ((2023, 2082), 'paddle.fluid.data', 'paddle.fluid.data', (['"""Mean"""', 'self.mean.shape', 'self.mean.dtype'], {}), "('Mean', self.mean.shape, self.mean.dtype)\n", (2040, 2082), False, 'import paddle\n'), ((2146, 2202), 'paddle.fluid.data', 'paddle.fluid.data', (['"""Std"""', 'self.std.shape', 'self.std.dtype'], {}), "('Std', self.std.shape, self.std.dtype)\n", (2163, 2202), False, 'import paddle\n'), ((2225, 2261), 'paddle.normal', 'paddle.normal', (['mean', 'std', 'self.shape'], {}), '(mean, std, self.shape)\n', (2238, 2261), False, 'import paddle\n'), ((2285, 2319), 'paddle.static.Executor', 'paddle.static.Executor', (['self.place'], {}), '(self.place)\n', (2307, 2319), False, 'import paddle\n'), ((5407, 5444), 'numpy.allclose', 'np.allclose', (['mean_ref', 'mean', '(0.2)', '(0.2)'], {}), '(mean_ref, mean, 0.2, 0.2)\n', (5418, 5444), True, 'import numpy as np\n'), ((5474, 5509), 'numpy.allclose', 'np.allclose', (['std_ref', 'std', '(0.2)', '(0.2)'], {}), '(std_ref, std, 0.2, 0.2)\n', (5485, 5509), True, 'import numpy as np\n'), ((5610, 5649), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(-1)', '[2, 3, 4, 5]'], {}), '(-2, -1, [2, 3, 4, 5])\n', (5627, 5649), True, 'import numpy as np\n'), ((5765, 5802), 'numpy.random.uniform', 'np.random.uniform', (['(0.7)', '(1)', '[2, 3, 17]'], {}), '(0.7, 1, [2, 3, 17])\n', (5782, 5802), True, 'import numpy as np\n'), ((5925, 5958), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2)', '[1, 100]'], {}), '(1, 2, [1, 100])\n', (5942, 5958), True, 'import numpy as np\n'), ((5996, 6031), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)', '[1, 100]'], {}), '(0.5, 1, [1, 100])\n', (6013, 6031), True, 'import numpy as np\n'), ((6175, 6205), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2)', '[100]'], {}), '(1, 2, [100])\n', (6192, 6205), True, 'import numpy as np\n'), ((6243, 6273), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2)', '[100]'], {}), '(1, 2, [100])\n', (6260, 6273), True, 'import numpy as np\n'), ((6713, 6736), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (6734, 6736), False, 'import paddle\n'), ((1974, 1997), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (1995, 1997), False, 'import paddle\n'), ((2792, 2851), 'paddle.fluid.data', 'paddle.fluid.data', (['"""Mean"""', 'self.mean.shape', 'self.mean.dtype'], {}), "('Mean', self.mean.shape, self.mean.dtype)\n", (2809, 2851), False, 'import paddle\n'), ((2915, 2956), 'paddle.normal', 'paddle.normal', (['mean', 'self.std', 'self.shape'], {}), '(mean, self.std, self.shape)\n', (2928, 2956), False, 'import paddle\n'), ((2980, 3014), 'paddle.static.Executor', 'paddle.static.Executor', (['self.place'], {}), '(self.place)\n', (3002, 3014), False, 'import paddle\n'), ((2743, 2766), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (2764, 2766), False, 'import paddle\n'), ((3349, 3405), 'paddle.fluid.data', 'paddle.fluid.data', (['"""Std"""', 'self.std.shape', 'self.std.dtype'], {}), "('Std', self.std.shape, self.std.dtype)\n", (3366, 3405), False, 'import paddle\n'), ((3428, 3469), 'paddle.normal', 'paddle.normal', (['self.mean', 'std', 'self.shape'], {}), '(self.mean, std, self.shape)\n', (3441, 3469), False, 'import paddle\n'), ((3493, 3527), 'paddle.static.Executor', 'paddle.static.Executor', (['self.place'], {}), '(self.place)\n', (3515, 3527), False, 'import paddle\n'), ((3827, 3873), 'paddle.normal', 'paddle.normal', (['self.mean', 'self.std', 'self.shape'], {}), '(self.mean, self.std, self.shape)\n', (3840, 3873), False, 'import paddle\n'), ((3897, 3931), 'paddle.static.Executor', 'paddle.static.Executor', (['self.place'], {}), '(self.place)\n', (3919, 3931), False, 'import paddle\n'), ((3301, 3324), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (3322, 3324), False, 'import paddle\n'), ((3779, 3802), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (3800, 3802), False, 'import paddle\n')] |
import numpy as np
b=np.array([[1,3,2],[4,1,3],[2,5,2]])
print(b)
print(np.linalg.det(b))
| [
"numpy.array",
"numpy.linalg.det"
] | [((21, 64), 'numpy.array', 'np.array', (['[[1, 3, 2], [4, 1, 3], [2, 5, 2]]'], {}), '([[1, 3, 2], [4, 1, 3], [2, 5, 2]])\n', (29, 64), True, 'import numpy as np\n'), ((72, 88), 'numpy.linalg.det', 'np.linalg.det', (['b'], {}), '(b)\n', (85, 88), True, 'import numpy as np\n')] |
import json
import numpy as np
from ..utils import *
from .. import logger
class Randomizer:
def __init__(self, randomization_config_fp='default_dr.json', default_config_fp='default.json'):
try:
with open(get_file_path('randomization/config', randomization_config_fp, 'json'), mode='r') as f:
self.randomization_config = json.load(f)
except:
logger.warning("Couldn't find {} in randomization/config subdirectory".format(randomization_config_fp))
self.randomization_config = dict()
with open(get_file_path('randomization/config', default_config_fp, 'json'), mode='r') as f:
self.default_config = json.load(f)
self.keys = set(list(self.randomization_config.keys()) + list(self.default_config.keys()))
def randomize(self):
"""Returns a dictionary of randomized parameters, with key: parameter name and value: randomized
value
"""
randomization_settings = dict()
for k in self.keys:
setting = None
if k in self.randomization_config:
randomization_definition = self.randomization_config[k]
if randomization_definition['type'] == 'int':
try:
low = randomization_definition['low']
high = randomization_definition['high']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.randint(low=low, high=high, size=size)
elif randomization_definition['type'] == 'uniform':
try:
low = randomization_definition['low']
high = randomization_definition['high']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.uniform(low=low, high=high, size=size)
elif randomization_definition['type'] == 'normal':
try:
loc = randomization_definition['loc']
scale = randomization_definition['scale']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.normal(loc=loc, scale=scale, size=size)
else:
raise NotImplementedError("You've specified an unsupported distribution type")
elif k in self.default_config:
randomization_definition = self.default_config[k]
setting = randomization_definition['default']
randomization_settings[k] = setting
return randomization_settings
| [
"json.load",
"numpy.random.randint",
"numpy.random.normal",
"numpy.random.uniform"
] | [((691, 703), 'json.load', 'json.load', (['f'], {}), '(f)\n', (700, 703), False, 'import json\n'), ((364, 376), 'json.load', 'json.load', (['f'], {}), '(f)\n', (373, 376), False, 'import json\n'), ((1657, 1705), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'low', 'high': 'high', 'size': 'size'}), '(low=low, high=high, size=size)\n', (1674, 1705), True, 'import numpy as np\n'), ((2161, 2209), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'low', 'high': 'high', 'size': 'size'}), '(low=low, high=high, size=size)\n', (2178, 2209), True, 'import numpy as np\n'), ((2666, 2715), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'loc', 'scale': 'scale', 'size': 'size'}), '(loc=loc, scale=scale, size=size)\n', (2682, 2715), True, 'import numpy as np\n')] |
import numpy as np
from keras.models import load_model
from utils.img_process import process, yolo_img_process
import utils.yolo_util as yolo_util
import tensorflow as tf
import cv2
from PIL import Image
import pickle
from deepgtav.messages import frame2numpy
import gzip
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
IMAGE_H, IMAGE_W = 416, 416
CAP_IMG_W, CAP_IMG_H = 1914, 1051
data_path = "video_drive_file/dataset.pz"
data_path = gzip.open(data_path)
def drive(model, image, speed, warning):
throttle = 0
breakk = 0
roi, radar = process(image)
controls = model.predict([np.array([roi]), np.array([radar]), np.array([speed])], batch_size=1)
controls = controls[0][0]*5/3.14
if warning:
return "--> %lf.2 throttle:%lf brake:%lf" % (controls, False, True)
if speed < 5: # control speed
throttle = 1
elif speed < 20:
throttle = 0.5
elif speed > 25:
throttle = 0.0
breakk = 0.4
if controls > 0:
controls = controls
info = "--> %lf.2 throttle:%lf brake:%lf" % (controls, throttle, breakk)
else:
info = "<-- %lf.2 throttle:%lf brake:%lf" % (controls, throttle, breakk)
print(info)
return info
def main():
# load yolo v3
classes = yolo_util.read_coco_names('./files/coco/coco.names')
num_classes = len(classes)
input_tensor, output_tensors = yolo_util.read_pb_return_tensors(tf.get_default_graph(),
"./files/trained_models/yolov3.pb",
["Placeholder:0", "concat_9:0", "mul_6:0"])
print("load yolo v3 successfully!")
with tf.Session() as sess:
model = load_model("files/trained_models/main_model.h5")
print("load main_model successfully!")
while True:
try:
data_dict = pickle.load(data_path) # 读取数据中的每一帧
speed = data_dict['speed']
frame = data_dict['frame']
frame = frame2numpy(frame,(CAP_IMG_W,CAP_IMG_H))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
except EOFError:
print("===========end=============")
exit(0)
boxes, scores = sess.run(output_tensors, feed_dict={input_tensor: np.expand_dims(yolo_img_process(frame), axis=0)})
boxes, scores, labels = yolo_util.cpu_nms(boxes, scores, num_classes, score_thresh=0.4, iou_thresh=0.1)
image, warning = yolo_util.draw_boxes(image, boxes, scores, labels, classes, (IMAGE_H, IMAGE_W), show=False)
info = drive(model=model, image=frame, speed=speed, warning=warning)
result = np.asarray(image)
cv2.putText(result, text=info, org=(50, 70), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2)
result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
while True:
cv2.imshow("result", result)
if cv2.waitKey(0) & 0xFF == 32: # 点击空格下一张
break
elif cv2.waitKey(0) & 0xFF == ord('q'): # 点击q退出程序
print("====================done===================")
exit(0)
if __name__ == '__main__':
main()
| [
"utils.yolo_util.read_coco_names",
"gzip.open",
"utils.yolo_util.draw_boxes",
"cv2.imshow",
"numpy.array",
"utils.img_process.process",
"tensorflow.Session",
"numpy.asarray",
"utils.yolo_util.cpu_nms",
"deepgtav.messages.frame2numpy",
"tensorflow.ConfigProto",
"cv2.waitKey",
"utils.img_proce... | [((282, 298), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (296, 298), True, 'import tensorflow as tf\n'), ((345, 370), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (355, 370), True, 'import tensorflow as tf\n'), ((489, 509), 'gzip.open', 'gzip.open', (['data_path'], {}), '(data_path)\n', (498, 509), False, 'import gzip\n'), ((603, 617), 'utils.img_process.process', 'process', (['image'], {}), '(image)\n', (610, 617), False, 'from utils.img_process import process, yolo_img_process\n'), ((1329, 1381), 'utils.yolo_util.read_coco_names', 'yolo_util.read_coco_names', (['"""./files/coco/coco.names"""'], {}), "('./files/coco/coco.names')\n", (1354, 1381), True, 'import utils.yolo_util as yolo_util\n'), ((1481, 1503), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1501, 1503), True, 'import tensorflow as tf\n'), ((1771, 1783), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1781, 1783), True, 'import tensorflow as tf\n'), ((1809, 1857), 'keras.models.load_model', 'load_model', (['"""files/trained_models/main_model.h5"""'], {}), "('files/trained_models/main_model.h5')\n", (1819, 1857), False, 'from keras.models import load_model\n'), ((649, 664), 'numpy.array', 'np.array', (['[roi]'], {}), '([roi])\n', (657, 664), True, 'import numpy as np\n'), ((666, 683), 'numpy.array', 'np.array', (['[radar]'], {}), '([radar])\n', (674, 683), True, 'import numpy as np\n'), ((685, 702), 'numpy.array', 'np.array', (['[speed]'], {}), '([speed])\n', (693, 702), True, 'import numpy as np\n'), ((2541, 2620), 'utils.yolo_util.cpu_nms', 'yolo_util.cpu_nms', (['boxes', 'scores', 'num_classes'], {'score_thresh': '(0.4)', 'iou_thresh': '(0.1)'}), '(boxes, scores, num_classes, score_thresh=0.4, iou_thresh=0.1)\n', (2558, 2620), True, 'import utils.yolo_util as yolo_util\n'), ((2650, 2745), 'utils.yolo_util.draw_boxes', 'yolo_util.draw_boxes', (['image', 'boxes', 'scores', 'labels', 'classes', '(IMAGE_H, IMAGE_W)'], {'show': '(False)'}), '(image, boxes, scores, labels, classes, (IMAGE_H,\n IMAGE_W), show=False)\n', (2670, 2745), True, 'import utils.yolo_util as yolo_util\n'), ((2846, 2863), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2856, 2863), True, 'import numpy as np\n'), ((2876, 3005), 'cv2.putText', 'cv2.putText', (['result'], {'text': 'info', 'org': '(50, 70)', 'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': '(1)', 'color': '(255, 0, 0)', 'thickness': '(2)'}), '(result, text=info, org=(50, 70), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0), thickness=2)\n', (2887, 3005), False, 'import cv2\n'), ((3046, 3085), 'cv2.cvtColor', 'cv2.cvtColor', (['result', 'cv2.COLOR_RGB2BGR'], {}), '(result, cv2.COLOR_RGB2BGR)\n', (3058, 3085), False, 'import cv2\n'), ((1972, 1994), 'pickle.load', 'pickle.load', (['data_path'], {}), '(data_path)\n', (1983, 1994), False, 'import pickle\n'), ((2119, 2161), 'deepgtav.messages.frame2numpy', 'frame2numpy', (['frame', '(CAP_IMG_W, CAP_IMG_H)'], {}), '(frame, (CAP_IMG_W, CAP_IMG_H))\n', (2130, 2161), False, 'from deepgtav.messages import frame2numpy\n'), ((2184, 2222), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2196, 2222), False, 'import cv2\n'), ((2247, 2269), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (2262, 2269), False, 'from PIL import Image\n'), ((3127, 3155), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'result'], {}), "('result', result)\n", (3137, 3155), False, 'import cv2\n'), ((3175, 3189), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3186, 3189), False, 'import cv2\n'), ((2470, 2493), 'utils.img_process.yolo_img_process', 'yolo_img_process', (['frame'], {}), '(frame)\n', (2486, 2493), False, 'from utils.img_process import process, yolo_img_process\n'), ((3263, 3277), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3274, 3277), False, 'import cv2\n')] |
import traversalHelper as tr
from itertools import combinations
import helper as hr
import math, time
import multiprocessing
from multiprocessing import Pool, Manager
from functools import partial
import numpy as np
from statistics import mean
import json, sys, os
from collections import defaultdict
class helperFunc:
@staticmethod
def uvSpec_basic(node, classNodes, PPIr):
# given a node, and a class of complementary node, the ratio of neigh node being complement over all neigh nodes
if len(classNodes) == 0: return 0
return len(classNodes&PPIr[node])/len(classNodes|PPIr[node])
@staticmethod
def uvSpec_noPad(node, classNodes, PPIr):
if len(classNodes) == 0: return 0
return len(classNodes&PPIr[node])/(len(classNodes|PPIr[node])-1)
@staticmethod
def uvSpec_linear(node, classNodes, PPIr):
if len(classNodes) == 0: return 0
return len(classNodes&PPIr[node])-len(PPIr[node]-classNodes)
@staticmethod
def xySpec_basic(node, classNodes, PPIr):
# same as uvSpec
if len(classNodes) == 0: return 0
return len(classNodes&PPIr[node])/len(classNodes|PPIr[node])
@staticmethod
def xyContrib(node, classNodes, PPIr):
# given nodeX (nodeY), check the ratio of degree being nodeU (nodeV)
if len(PPIr[node]) == 0: return 0
return len(PPIr[node]&classNodes)/len(PPIr[node])
@staticmethod
def uvContrib(node, parentCNodes, peerCNodes, PPIr):
# have inherient normalization (different edge weight for node of different deg)
if len(PPIr[node]) == 0: return 0
return len(PPIr[node]&parentCNodes&peerCNodes)/len(PPIr[node])
@staticmethod
def uvContrib_padded(node, parentCNodes, peerCNodes, PPIr):
if len(PPIr[node]) == 0: return 0
return len(PPIr[node]&peerCNodes)/len(PPIr[node])
@staticmethod
def dualCN(parentNode, childNode, PPIr):
if len(PPIr[parentNode]) == 0 and len(PPIr[childNode]) == 0: return 0
return len(PPIr[parentNode]&PPIr[childNode])/len(PPIr[parentNode]|PPIr[childNode])
@staticmethod
def uvEval(parentNode, classNodes, PPIr):
# parentNode = u or v, classNodes = V or U
if len(PPIr[parentNode]) == 0: return 0
return len(PPIr[parentNode]&classNodes)/len(PPIr[parentNode])
@staticmethod
def logging(count, lastCount, total, avgTime, startTime, frequency=1000):
count += 1
if count == 1: avgTime = time.time()-startTime
else: avgTime = (avgTime*(count-1)+(time.time()-startTime))/count
if count-lastCount > frequency:
print("reference core's count: {}/{}. tick rate: {}. Expected sec to finish (hr): {} ({})".format(
count, total, frequency, round(avgTime*(total-count), 2), round(avgTime*(total-count)/60/60, 2)), end="\r")
lastCount = count
return count, lastCount, avgTime
class ns:
BRToRelat = tr.Helper.binary_to_relation
toDualBR = tr.Helper.to_dual_binary_relation
BRToNode = tr.Helper.binary_relation_to_node
arr_pStr = tr.Helper.list_to_pathStrs
pStr_arr = tr.Helper.pathStrs_to_list
br_str = tr.Helper.br_to_pathStr
L3Scoring = ["L3Normalizing", "L3uvJoin", "L3Raw", "Sim"]
L2Scoring = ["commonNeighbor"]
CARBasedScoring = ["CRA", "CAR", "CH2_L3"]
interStrScoring = ["interStr"]
normFuncMapping = {"sqrt": math.sqrt, "log":math.log, "none": lambda x: x, "null": lambda x: 1}
uvSpecMapping = {"basic": helperFunc.uvSpec_basic, "linear": helperFunc.uvSpec_linear, "noPad": helperFunc.uvSpec_noPad, "null": lambda node, classNodes, PPIr: 1}
xyContribMapping = {"basic": helperFunc.xyContrib, "null": lambda node, classNodes, PPIr: 1}
uvContribMapping = {"basic": helperFunc.uvContrib, "padded": helperFunc.uvContrib_padded, "null": lambda node, parentCNodes, peerCNodes, PPIr: 1}
xySpecMapping = {"basic": helperFunc.xySpec_basic, "null": lambda node, classNodes, PPIr: 1}
dualCNMapping = {"basic": helperFunc.dualCN, "null": lambda parentNode, childNode, PPIr: 1}
uvJoinMapping = {"basic": True, "null": False}
def L3_normalization(PPIr, uvPair, normFunc):
score = 0
for uv in uvPair:
if normFunc(len(PPIr[uv[0]])*len(PPIr[uv[1]])) == 0: continue
score += 1/normFunc(len(PPIr[uv[0]])*len(PPIr[uv[1]]))
return score
def get_uv(x, y, PPIr, uvJoin=False):
candidateUs = PPIr[x]
candidateVs = PPIr[y]
if not uvJoin:
candidateUs = candidateUs-candidateVs
candidateVs = candidateVs-candidateUs
uvPair = []
for u in candidateUs:
for v in candidateVs:
if u not in PPIr[v]: continue
uvPair.append([u,v])
return uvPair, candidateUs, candidateVs
def deserialize_args(scoreArgs):
normFunc, uvSpec, xySpec, xyContrib, uvContrib, dualCN, uvJoin = None, None, None, None, None, None, None
scoreArgs = scoreArgs+["null"]*(6-len(scoreArgs))
for i in range(len(scoreArgs)):
if i == 0: normFunc = ns.normFuncMapping[scoreArgs[i]]
if i == 1: uvSpec = ns.uvSpecMapping[scoreArgs[i]]
if i == 2: xySpec = ns.xySpecMapping[scoreArgs[i]]
if i == 3: xyContrib = ns.xyContribMapping[scoreArgs[i]]
if i == 4: uvContrib = ns.uvContribMapping[scoreArgs[i]]
if i == 5: dualCN = ns.dualCNMapping[scoreArgs[i]]
if i == 6: uvJoin = ns.uvJoinMapping[scoreArgs[i]]
return normFunc, uvSpec, xySpec, xyContrib, uvContrib, dualCN, uvJoin
def interStr_Scoring(samplePPIr, nodeX, nodeY, scoringMethod, scoreArgs):
normFunc, uvSpec, xySpec, xyContrib, uvContrib, dualCN, uvJoin = deserialize_args(scoreArgs)
uvPair, candidateUs, candidateVs = get_uv(nodeX, nodeY, samplePPIr, uvJoin=uvJoin)
nodeUs, nodeVs = set([uv[0] for uv in uvPair]), ([uv[1] for uv in uvPair])
classU, classV = set(list(nodeUs)+[nodeY]), set(list(nodeVs)+[nodeX])
score = 0
for [u, v] in uvPair:
term = uvContrib(u, set([nodeX]), classV, samplePPIr)*uvContrib(v, set([nodeY]), classU, samplePPIr)
term *= uvSpec(v, classU, samplePPIr)*uvSpec(u, classV, samplePPIr)
term *= dualCN(nodeX, v, samplePPIr)*dualCN(nodeY, u, samplePPIr)
score += term*1/normFunc(len(samplePPIr[u])*len(samplePPIr[v]))
score *= xyContrib(nodeX, classU, samplePPIr)*xyContrib(nodeY, classV, samplePPIr)
score *= xySpec(nodeX, classU, samplePPIr)*xySpec(nodeY, classV, samplePPIr)
return score
def Sim(samplePPIr, nodeX, nodeY, uvPair):
nodeUs, nodeVs = set([uv[0] for uv in uvPair]), set([uv[1] for uv in uvPair])
score = 0
for v in nodeVs:
score += helperFunc.dualCN(nodeX, v, samplePPIr)
for u in nodeUs:
score += helperFunc.dualCN(nodeY, u, samplePPIr)
return score
def L3_Scoring(samplePPIr, nodeX, nodeY, scoringMethod):
if scoringMethod == "L3Normalizing":
uvPair, candidateUs, candidateVs = get_uv(nodeX, nodeY, samplePPIr)
score = L3_normalization(samplePPIr, uvPair, math.sqrt)
elif scoringMethod == "L3uvJoin":
uvPair, candidateUs, candidateVs = get_uv(nodeX, nodeY, samplePPIr, uvJoin=True)
score = L3_normalization(samplePPIr, uvPair, math.sqrt)
elif scoringMethod == "L3Raw":
uvPair, candidateUs, candidateVs = get_uv(nodeX, nodeY, samplePPIr, uvJoin=True)
score = L3_normalization(samplePPIr, uvPair, lambda x: 1)
elif scoringMethod == "Sim":
uvPair, candidateUs, candidateVs = get_uv(nodeX, nodeY, samplePPIr, uvJoin=True)
score = Sim(samplePPIr, nodeX, nodeY, uvPair)
return score
def L2_Scoring(samplePPIr, nodeX, nodeY, scoringMethod):
if scoringMethod == "commonNeighbor":
score = len(samplePPIr[nodeX]&samplePPIr[nodeY])
return score
def CRA(samplePPIr, nodeX, nodeY):
score, cn = 0, samplePPIr[nodeX]&samplePPIr[nodeY]
for node in cn: score += len(samplePPIr[node]&cn)/len(samplePPIr[node])
return score
def CAR(samplePPIr, nodeX, nodeY):
score, cn = 0, samplePPIr[nodeX]&samplePPIr[nodeY]
for node in cn: score += len(samplePPIr[node]&cn)/2
return len(cn)*score
def CH2_L3(samplePPIr, nodeX, nodeY):
uvPair, _, _ = get_uv(nodeX, nodeY, samplePPIr, uvJoin=True)
U, V = set([uv[0] for uv in uvPair]), set([uv[1] for uv in uvPair])
localCommunity = U|V
score = 0
for [u, v] in uvPair:
numerator = math.sqrt((1+len(samplePPIr[u]&localCommunity))*(1+len(samplePPIr[v]&localCommunity)))
denominator = math.sqrt((1+len(samplePPIr[u]-localCommunity-{nodeX, nodeY}))*(1+len(samplePPIr[v]-localCommunity-{nodeX, nodeY})))
score += numerator/denominator
return score
def CARBased_Scoring(samplePPIr, nodeX, nodeY, scoringMethod):
if scoringMethod == 'CRA':
score = CRA(samplePPIr, nodeX, nodeY)
if scoringMethod == 'CAR':
score = CAR(samplePPIr, nodeX, nodeY)
if scoringMethod == "CH2_L3":
score = CH2_L3(samplePPIr, nodeX, nodeY)
return score
def _PPILinkPred(nodePairs, samplePPIr, scoringMethod, scoreArgs=[], logging=False):
scores, predictedPPIbrs = [], []
count, lastCount, total, avgTime = 0, 0, len(nodePairs), 0
for nodePair in nodePairs:
startTime = time.time()
if nodePair[1] in samplePPIr[nodePair[0]]: continue
nodeX, nodeY = nodePair[0], nodePair[1]
if scoringMethod in ns.L3Scoring:
score = L3_Scoring(samplePPIr, nodeX, nodeY, scoringMethod)
elif scoringMethod in ns.L2Scoring:
score = L2_Scoring(samplePPIr, nodeX, nodeY, scoringMethod)
elif scoringMethod in ns.interStrScoring:
score = interStr_Scoring(samplePPIr, nodeX, nodeY, scoringMethod, scoreArgs)
elif scoringMethod in ns.CARBasedScoring:
score = CARBased_Scoring(samplePPIr, nodeX, nodeY, scoringMethod)
scores.append(score)
if logging: count, lastCount, avgTime = helperFunc.logging(count, lastCount, total, avgTime, startTime)
predictedPPIbrs.append(nodePair)
return scores, predictedPPIbrs
def _multiCore_handler(args, iterable):
(nodePairs, splitStartIndex, splitEndIndex, samplePPIr, scoringMethod, scoreArgs, logging, PPIresQ) = args
nodePairs = nodePairs[splitStartIndex[iterable]:splitEndIndex[iterable]]
logging = logging[iterable]
scores, predictedPPIbrs = _PPILinkPred(nodePairs, samplePPIr, scoringMethod, scoreArgs, logging)
PPIresQ.put([predictedPPIbrs, scores])
return
def multiCore_PPILinkPred(samplePPIbr, scoringMethod, scoreArgsDict, coreNo, topNo=None, logging=False, nodePairs=None):
# @param scoreArgs: dict, assign the normalization functions (normFunc, uvSpec, xySpec, uvContrib, xyContrib, dualCN)
normOrder = ['normFunc', 'uvSpec', 'xySpec', 'uvContrib', 'xyContrib', 'dualCN', 'uvJoin']
scoreArgs = ['null' if normTag not in scoreArgsDict else scoreArgsDict[normTag] for normTag in normOrder]
samplePPIr = ns.BRToRelat(ns.toDualBR(samplePPIbr), rSet=True)
sampleNodes = ns.BRToNode(samplePPIbr)
if nodePairs is None: nodePairs = list(combinations(sampleNodes, 2))
splitStartIndex = [i*math.floor(len(nodePairs)/coreNo) for i in range(0, coreNo)] # both splitting is correct
splitEndIndex = [(i+1)*math.floor(len(nodePairs)/coreNo) if i != coreNo-1 else len(nodePairs) for i in range(0, coreNo)]
mgr = Manager()
PPIresQ = mgr.Queue()
if logging: logging = [True if i == 0 else False for i in range(coreNo)]
else: logging = [False for i in range(coreNo)]
args = (nodePairs, splitStartIndex, splitEndIndex, samplePPIr, scoringMethod, scoreArgs, logging, PPIresQ)
func = partial(_multiCore_handler, args)
with Pool(coreNo) as p:
p.map(func, [i for i in range(coreNo)])
if logging: print("\n")
mergedScores, mergedPPIbrs = [], []
PPIresL = [PPIresQ.get() for i in range(coreNo)]
for [predictedPPIbr, scores] in PPIresL:
mergedScores += scores
mergedPPIbrs += predictedPPIbr
sortedPPIbrs, sortedScores = hr.sort_key_val(mergedPPIbrs, mergedScores)
if topNo is None: topNo = len(sortedPPIbrs)
topPredPPIbrs = sortedPPIbrs[0:topNo]
topScores = sortedScores[0:topNo]
return topPredPPIbrs, topScores
def _multiCore_handler_shared(args, iterable):
(PPIdataQ, samplePPIr, scoringMethod, scoreArgs, logging, PPIresQ) = args
nodePairs = PPIdataQ.get()
logging = logging[iterable]
scores, predictedPPIbrs = _PPILinkPred(nodePairs, samplePPIr, scoringMethod, scoreArgs, logging)
PPIresQ.put([predictedPPIbrs, scores])
return
def multiCore_PPILinkPred_shared(samplePPIbr, scoringMethod, scoreArgsDict, coreNo, topNo=None, logging=False, nodePairs=None):
# @param scoreArgs: dict, assign the normalization functions (normFunc, uvSpec, xySpec, uvContrib, xyContrib, dualCN)
normOrder = ['normFunc', 'uvSpec', 'xySpec', 'uvContrib', 'xyContrib', 'dualCN', 'uvJoin']
scoreArgs = ['null' if normTag not in scoreArgsDict else scoreArgsDict[normTag] for normTag in normOrder]
samplePPIr = ns.BRToRelat(ns.toDualBR(samplePPIbr), rSet=True)
sampleNodes = ns.BRToNode(samplePPIbr)
if nodePairs is None: nodePairs = list(combinations(sampleNodes, 2))
splitStartIndex = [i*math.floor(len(nodePairs)/coreNo) for i in range(0, coreNo)] # both splitting is correct
splitEndIndex = [(i+1)*math.floor(len(nodePairs)/coreNo) if i != coreNo-1 else len(nodePairs) for i in range(0, coreNo)]
mgr, dataMgr = Manager(), Manager()
PPIdataQ, PPIresQ = dataMgr.Queue(), mgr.Queue()
if logging: logging = [True if i == 0 else False for i in range(coreNo)]
else: logging = [False for i in range(coreNo)]
for i in range(len(splitStartIndex)):
PPIdataQ.put(nodePairs[splitStartIndex[i]:splitEndIndex[i]])
nodePairs = None
args = (PPIdataQ, samplePPIr, scoringMethod, scoreArgs, logging, PPIresQ)
func = partial(_multiCore_handler_shared, args)
with Pool(coreNo) as p:
p.map(func, [i for i in range(coreNo)])
if logging: print("\n")
mergedScores, mergedPPIbrs = [], []
PPIresL = [PPIresQ.get() for i in range(coreNo)]
for [predictedPPIbr, scores] in PPIresL:
mergedScores += scores
mergedPPIbrs += predictedPPIbr
sortedPPIbrs, sortedScores = hr.sort_key_val(mergedPPIbrs, mergedScores)
if topNo is None: topNo = len(sortedPPIbrs)
topPredPPIbrs = sortedPPIbrs[0:topNo]
topScores = sortedScores[0:topNo]
return topPredPPIbrs, topScores
def get_prec(fullPPIbr, topPredPPIbrs):
# all input supposed to be monoBR
prec = len(set(ns.arr_pStr(topPredPPIbrs))&set(ns.arr_pStr(ns.toDualBR(fullPPIbr))))/len(topPredPPIbrs)
return prec
def get_sliding_prec(fullPPIbr, topPredPPIbrs, loopRange, logging):
minI, maxI = loopRange[0], loopRange[1]
fullPPIbr = set(ns.arr_pStr(ns.toDualBR(fullPPIbr)))
precTop, precBot = len(set(ns.arr_pStr(topPredPPIbrs[0:minI]))&fullPPIbr), minI
precTops = []
precs = [precTop/precBot]
count, lastCount, total, avgTime, startTime = minI+1, 0, maxI, 0, time.time()
for i in range(minI+1, maxI):
if ns.br_str(topPredPPIbrs[i]) in fullPPIbr or ns.br_str(topPredPPIbrs[i][::-1]) in fullPPIbr: precTop += 1
precTops.append(precTop)
if logging: count, lastCount, avgTime = helperFunc.logging(count, lastCount, total, avgTime, startTime)
precTops = np.asarray(precTops)
precBots = np.asarray([i for i in range(minI+1, maxI)])
precs += list(np.divide(precTops, precBots))
return precs
def get_rec(fullPPIbr, samplePPIbrs, topPredPPIbrs):
relevant = ns.pStr_arr(set(ns.arr_pStr(fullPPIbr))-set(ns.arr_pStr(ns.toDualBR(samplePPIbrs))))
rec = len(set(ns.arr_pStr(ns.toDualBR(topPredPPIbrs)))&set(ns.arr_pStr(fullPPIbr)))/len(relevant)
return rec
def get_sliding_rec(fullPPIbr, samplePPIbrs, topPredPPIbrs, loopRange, logging):
minI, maxI = loopRange[0], loopRange[1]
relevant = set(ns.arr_pStr(fullPPIbr))-set(ns.arr_pStr(ns.toDualBR(samplePPIbrs)))
fullPPIbr = set(ns.arr_pStr(ns.toDualBR(fullPPIbr)))
recTop, recBot = len(set(ns.arr_pStr(topPredPPIbrs[0:minI]))&fullPPIbr), len(relevant)
recTops = []
recs = [recTop/recBot]
count, lastCount, total, avgTime, startTime = minI+1, 0, maxI, 0, time.time()
for i in range(minI+1, maxI):
if ns.br_str(topPredPPIbrs[i]) in fullPPIbr or ns.br_str(topPredPPIbrs[i][::-1]) in fullPPIbr: recTop += 1
recTops.append(recTop)
if logging: count, lastCount, avgTime = helperFunc.logging(count, lastCount, total, avgTime, startTime)
recTops = np.asarray(recTops)
recBots = np.asarray([recBot for i in range(minI+1, maxI)])
recs += list(np.divide(recTops, recBots))
return recs
def precRecMap_handler(args, iterable):
(perTagNo, tags, predPPIbr, samplePPIbr, fullPPIbr, logging, resQ) = args
logging = logging[iterable]
partialPrecRecMap = {}
for tagNo in perTagNo[iterable]:
loopRange = (1, len(predPPIbr[tagNo]))
partialPrecRecMap[tags[tagNo]] = {}
partialPrecRecMap[tags[tagNo]]["prec"] = get_sliding_prec(fullPPIbr[tagNo], predPPIbr[tagNo], loopRange, logging)
partialPrecRecMap[tags[tagNo]]["rec"] = get_sliding_rec(fullPPIbr[tagNo], samplePPIbr[tagNo], predPPIbr[tagNo], loopRange, logging)
resQ.put(partialPrecRecMap)
return
def precRecMap_multiCore(tags, predPPIbr, samplePPIbr, fullPPIbr, coreNo, logging=False):
tagNo, perTagNo = [i for i in range(len(tags))], [[] for i in range(coreNo)]
for i in range(math.ceil(len(tags)/coreNo)):
for j in range(coreNo*i, min(coreNo*(i+1), coreNo*i+(len(tags)-coreNo*i))):
perTagNo[j-coreNo*i].append(tagNo[j])
mgr = Manager()
resQ = mgr.Queue()
if logging: logging = [True if i == 0 else False for i in range(coreNo)]
else: logging = [False for i in range(coreNo)]
args = (perTagNo, tags, predPPIbr, samplePPIbr, fullPPIbr, logging, resQ)
func = partial(precRecMap_handler, args)
with Pool(coreNo) as p:
p.map(func, [i for i in range(coreNo)])
precRecMap = {}
for i in range(coreNo): precRecMap.update(resQ.get())
return precRecMap
if __name__ == "__main__":
pass | [
"numpy.asarray",
"itertools.combinations",
"helper.sort_key_val",
"functools.partial",
"multiprocessing.Pool",
"multiprocessing.Manager",
"time.time",
"numpy.divide"
] | [((11364, 11373), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (11371, 11373), False, 'from multiprocessing import Pool, Manager\n'), ((11650, 11683), 'functools.partial', 'partial', (['_multiCore_handler', 'args'], {}), '(_multiCore_handler, args)\n', (11657, 11683), False, 'from functools import partial\n'), ((12030, 12073), 'helper.sort_key_val', 'hr.sort_key_val', (['mergedPPIbrs', 'mergedScores'], {}), '(mergedPPIbrs, mergedScores)\n', (12045, 12073), True, 'import helper as hr\n'), ((13906, 13946), 'functools.partial', 'partial', (['_multiCore_handler_shared', 'args'], {}), '(_multiCore_handler_shared, args)\n', (13913, 13946), False, 'from functools import partial\n'), ((14293, 14336), 'helper.sort_key_val', 'hr.sort_key_val', (['mergedPPIbrs', 'mergedScores'], {}), '(mergedPPIbrs, mergedScores)\n', (14308, 14336), True, 'import helper as hr\n'), ((15402, 15422), 'numpy.asarray', 'np.asarray', (['precTops'], {}), '(precTops)\n', (15412, 15422), True, 'import numpy as np\n'), ((16613, 16632), 'numpy.asarray', 'np.asarray', (['recTops'], {}), '(recTops)\n', (16623, 16632), True, 'import numpy as np\n'), ((17735, 17744), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (17742, 17744), False, 'from multiprocessing import Pool, Manager\n'), ((17985, 18018), 'functools.partial', 'partial', (['precRecMap_handler', 'args'], {}), '(precRecMap_handler, args)\n', (17992, 18018), False, 'from functools import partial\n'), ((9231, 9242), 'time.time', 'time.time', ([], {}), '()\n', (9240, 9242), False, 'import math, time\n'), ((11693, 11705), 'multiprocessing.Pool', 'Pool', (['coreNo'], {}), '(coreNo)\n', (11697, 11705), False, 'from multiprocessing import Pool, Manager\n'), ((13481, 13490), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (13488, 13490), False, 'from multiprocessing import Pool, Manager\n'), ((13492, 13501), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (13499, 13501), False, 'from multiprocessing import Pool, Manager\n'), ((13956, 13968), 'multiprocessing.Pool', 'Pool', (['coreNo'], {}), '(coreNo)\n', (13960, 13968), False, 'from multiprocessing import Pool, Manager\n'), ((15080, 15091), 'time.time', 'time.time', ([], {}), '()\n', (15089, 15091), False, 'import math, time\n'), ((15501, 15530), 'numpy.divide', 'np.divide', (['precTops', 'precBots'], {}), '(precTops, precBots)\n', (15510, 15530), True, 'import numpy as np\n'), ((16295, 16306), 'time.time', 'time.time', ([], {}), '()\n', (16304, 16306), False, 'import math, time\n'), ((16714, 16741), 'numpy.divide', 'np.divide', (['recTops', 'recBots'], {}), '(recTops, recBots)\n', (16723, 16741), True, 'import numpy as np\n'), ((18028, 18040), 'multiprocessing.Pool', 'Pool', (['coreNo'], {}), '(coreNo)\n', (18032, 18040), False, 'from multiprocessing import Pool, Manager\n'), ((11084, 11112), 'itertools.combinations', 'combinations', (['sampleNodes', '(2)'], {}), '(sampleNodes, 2)\n', (11096, 11112), False, 'from itertools import combinations\n'), ((13192, 13220), 'itertools.combinations', 'combinations', (['sampleNodes', '(2)'], {}), '(sampleNodes, 2)\n', (13204, 13220), False, 'from itertools import combinations\n'), ((2490, 2501), 'time.time', 'time.time', ([], {}), '()\n', (2499, 2501), False, 'import math, time\n'), ((2556, 2567), 'time.time', 'time.time', ([], {}), '()\n', (2565, 2567), False, 'import math, time\n')] |
from matplotlib import animation
import math
import matplotlib.pyplot as plt
import numpy as np
def distance(point1: tuple, point2: tuple) -> bool:
return math.sqrt(sum((x - y) ** 2 for x, y in zip(point1, point2)))
class TwoPointsAverageDistanceEstimation:
def __init__(self, display: "Display"):
self.display = display
@staticmethod
def expected_value(bottom_left_corner, top_right_corner):
width, height = top_right_corner[ 0 ] - bottom_left_corner[ 0 ], top_right_corner[ 1 ] - bottom_left_corner[ 1 ]
# the rectangle will have sides equal to 1 and h and solution will be scaled
h = height / width
d = math.sqrt(1 + h * h) # diameter of the rectangle
return round(width * (2 + 2 * h ** 5 - 2 * d + 6 * h * h * d - 2 * h ** 4 * d + 5 * h * math.log(h + d) + 5 * h ** 4 * math.log((1 + d) / h)) / (30 * h * h), 4)
@property
def average_distance(self):
return self.distance_sum / self.cnt_points
def init(self):
self.distance_sum = 0
self.cnt_points = 0
def step(self):
return np.random.uniform(self.display.bottom_left_corner, self.display.top_right_corner, (self.display.pairs_per_iteration, 2, 2))
def display_step(self, i: int):
pair_of_points = self.step()
self.distance_sum += sum(distance(point1, point2) for point1, point2 in pair_of_points)
self.cnt_points += len(pair_of_points)
self.display.display_step(self, i, pair_of_points)
def estimate(self):
self.init()
self.display.estimate(self.display_step)
class Display:
def __init__(self, iterations: int, pairs_per_iteration: int, bottom_left_corner: tuple, top_right_corner: tuple):
self.iterations = iterations
self.pairs_per_iteration = pairs_per_iteration
self.bottom_left_corner = bottom_left_corner
self.top_right_corner = top_right_corner
class GraphicDisplay(Display):
def __init__(self, iterations: int, pairs_per_iteration: int, bottom_left_corner: tuple, top_right_corner: tuple):
super(GraphicDisplay, self).__init__(iterations, pairs_per_iteration, bottom_left_corner, top_right_corner)
def init(self):
self.fig, (self.monte_carlo_graph, self.average_distance_estimation_graph) = plt.subplots(1, 2)
self.fig.canvas.manager.set_window_title("Two Points Average Distance Estimation")
self.clear()
self.monte_carlo_graph.set_xlabel(f"I = {self.iterations} ; N = {self.pairs_per_iteration}")
self.average_distance_estimation_graph.set_xlim(1, self.iterations)
self.average_distance_estimation_graph.set_ylim(0, distance(self.bottom_left_corner, self.top_right_corner))
self.average_distance_estimation_graph.set_title("Average Distance Estimation")
self.average_distance_estimation_graph.set_xlabel(f"Iteration")
expected_value = TwoPointsAverageDistanceEstimation.expected_value(self.bottom_left_corner, self.top_right_corner)
self.average_distance_estimation_graph.set_ylabel(f"Average Distance Estimation\n(E = {expected_value})")
self.average_distance_estimation_graph.axhline(y = expected_value, color = "red", linestyle = "--")
self.fig.tight_layout()
self.estimations = [ [], [] ]
def clear(self):
self.monte_carlo_graph.cla()
self.monte_carlo_graph.set_xlim(self.bottom_left_corner[ 0 ], self.top_right_corner[ 0 ])
self.monte_carlo_graph.set_ylim(self.bottom_left_corner[ 1 ], self.top_right_corner[ 1 ])
self.monte_carlo_graph.set_aspect("equal")
def display_step(self, obj: TwoPointsAverageDistanceEstimation, i: int, pair_of_points: list):
self.clear()
self.monte_carlo_graph.set_title(f"Iteration {i + 1}\nAverage distance estimation: {obj.average_distance:.4f}")
for point1, point2 in pair_of_points:
self.monte_carlo_graph.plot(*list(zip(point1, point2)), color = "blue", linestyle = '-', pickradius = 1, marker = '.')
self.estimations[ 0 ].append(i + 1)
self.estimations[ 1 ].append(obj.average_distance)
self.average_distance_estimation_graph.plot(*self.estimations, color = "blue", linestyle = "solid", marker = '')
def estimate(self, func: "Function"):
self.init()
anim = animation.FuncAnimation(self.fig, func, frames = self.iterations, init_func = lambda: None, repeat = False)
# anim.save("demo.gif")
plt.show()
class ConsoleDisplay(Display):
def __init__(self, iterations: int, pairs_per_iteration: int, bottom_left_corner: tuple, top_right_corner: tuple, *, log: bool = True):
super(ConsoleDisplay, self).__init__(iterations, pairs_per_iteration, bottom_left_corner, top_right_corner)
self.log = log
def display_step(self, obj: TwoPointsAverageDistanceEstimation, i: int, pair_of_points: list):
if self.log:
print(f"Iteration {i + 1} - Average distance estimation: {obj.average_distance:.4f}")
def estimate(self, func: "Function"):
for i in range(self.iterations):
func(i)
if __name__ == "__main__":
graph = TwoPointsAverageDistanceEstimation(
display = GraphicDisplay(
iterations = 100,
pairs_per_iteration = 1000,
bottom_left_corner = (0, 0),
top_right_corner = (1, 1)
)
)
graph.estimate()
# average_distance_estimation = TwoPointsAverageDistanceEstimation(
# display = ConsoleDisplay(
# iterations = 1000,
# pairs_per_iteration = 1000,
# bottom_left_corner = (0, 0),
# top_right_corner = (1, 1),
# log = False
# )
# )
# average_distance_sum, trials = 0, 1
# for i in range(trials):
# average_distance_estimation.estimate()
# print(f"Iteration {i + 1} - Average distance estimation: {average_distance_estimation.average_distance:.4f}")
# average_distance_sum += average_distance_estimation.average_distance
# print(f"After {trials} iterations, by the Law of Large Numbers the average distance between two points in the rectangle" \
# f" [{average_distance_estimation.display.bottom_left_corner[ 0 ]},{average_distance_estimation.display.top_right_corner[ 0 ]}]" \
# f"x[{average_distance_estimation.display.bottom_left_corner[ 1 ]},{average_distance_estimation.display.top_right_corner[ 1 ]}]" \
# f" estimates to: {average_distance_sum / trials:.4f}") | [
"matplotlib.animation.FuncAnimation",
"math.sqrt",
"math.log",
"numpy.random.uniform",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((625, 645), 'math.sqrt', 'math.sqrt', (['(1 + h * h)'], {}), '(1 + h * h)\n', (634, 645), False, 'import math\n'), ((1014, 1142), 'numpy.random.uniform', 'np.random.uniform', (['self.display.bottom_left_corner', 'self.display.top_right_corner', '(self.display.pairs_per_iteration, 2, 2)'], {}), '(self.display.bottom_left_corner, self.display.\n top_right_corner, (self.display.pairs_per_iteration, 2, 2))\n', (1031, 1142), True, 'import numpy as np\n'), ((2123, 2141), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (2135, 2141), True, 'import matplotlib.pyplot as plt\n'), ((3991, 4098), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'func'], {'frames': 'self.iterations', 'init_func': '(lambda : None)', 'repeat': '(False)'}), '(self.fig, func, frames=self.iterations, init_func=\n lambda : None, repeat=False)\n', (4014, 4098), False, 'from matplotlib import animation\n'), ((4127, 4137), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4135, 4137), True, 'import matplotlib.pyplot as plt\n'), ((795, 816), 'math.log', 'math.log', (['((1 + d) / h)'], {}), '((1 + d) / h)\n', (803, 816), False, 'import math\n'), ((764, 779), 'math.log', 'math.log', (['(h + d)'], {}), '(h + d)\n', (772, 779), False, 'import math\n')] |
import streamlit as st
import tensorflow
import numpy as np
np.random.seed(4)
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
import datetime as dt
from datetime import datetime
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard
from sklearn.preprocessing import StandardScaler
import streamlit.components.v1 as components
import time
#
# Funciones auxiliares
#
def graficar_predicciones(real, prediccion):
plt.figure(figsize=(20,8))
plt.plot(real[0:len(prediccion)],color='m', label='Valor real de la acción')
plt.plot(prediccion, color='blue', label='Predicción de la acción')
plt.ylim(1.1 * np.min(prediccion)/2, 1.1 * np.max(prediccion))
plt.xlabel('Tiempo')
plt.ylabel('kWh')
plt.legend()
plt.grid(True)
plt.show()
#
# Lectura de los datos
#
dataset = pd.read_excel('HLAD_ORLIST2020.xlsx',index_col='Fecha', parse_dates=['Fecha'])
dataset.head()
#dataset['2019-12-26 00:00:00':].plot(figsize=(10,5))
set_entrenamiento = dataset['2019-09-01 00:00:00':'2019-12-25 00:00:00'].iloc[:,0:1]
set_validacion = dataset['2019-12-25 00:00:00':].iloc[:,0:1]
set_entrenamiento['Carga (MW)'].plot(legend=True,figsize=(14, 5))
set_validacion['Carga (MW)'].plot(legend=True,figsize=(14, 5))
plt.axvline(x = '2019-12-23 00:00:00', color='c', linewidth=2, linestyle='--')
plt.axvline(x = '2020-01-01 00:00:00', color='r', linewidth=2, linestyle='--')
plt.grid(True)
plt.legend(['Datos de aprendizaje de 2019-10-30 23:00:00 a 2019-12-20 00:00:00', 'Comprobacion 2019-12-20 00:00:00 a 2020-01-8 00:00:00' ])
plt.show()
# Normalización del set de entrenamiento
sc = MinMaxScaler(feature_range=(0,1))
set_entrenamiento_escalado = sc.fit_transform(set_entrenamiento)
# La red LSTM tendrá como entrada "time_step" datos consecutivos, y como salida 1 dato (la predicción a
# partir de esos "time_step" datos). Se conformará de esta forma el set de entrenamiento
time_step = 30
X_train = []
Y_train = []
m = len(set_entrenamiento_escalado)
for i in range(time_step,m):
# X: bloques de "time_step" datos: 0-time_step, 1-time_step+1, 2-time_step+2, etc
X_train.append(set_entrenamiento_escalado[i-time_step:i,0])
# Y: el siguiente dato
Y_train.append(set_entrenamiento_escalado[i,0])
X_train, Y_train = np.array(X_train), np.array(Y_train)
# Reshape X_train para que se ajuste al modelo en Keras
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
#
# Red LSTM
#
dim_entrada = (X_train.shape[1],1)
dim_salida = 1
na = 80
modelo = Sequential()
modelo.add(LSTM(units=na, input_shape=dim_entrada))
modelo.add(Dense(units=dim_salida))
modelo.compile(optimizer='rmsprop', loss='mse')
modelo.fit(X_train,Y_train,epochs=30,batch_size=32)
#
# Validación (predicción del valor de las acciones)
#
x_test = set_validacion.values
x_test = sc.transform(x_test)
X_test = []
for i in range(time_step,len(x_test)):
X_test.append(x_test[i-time_step:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
prediccion = modelo.predict(X_test)
prediccion = sc.inverse_transform(prediccion)
# Graficar resultados
plt.figure(figsize=(14, 5))
plt.plot(prediccion, color='b', label='Predicción de la acción',linewidth=1.5)
plt.plot(set_validacion.values, color='m', label='Predicción de la acción',linewidth=1.5)
plt.tick_params(labelsize = 10)
plt.grid(True)
plt.title('daily sale graph test_id=505 ',fontdict={'fontsize':30})
modelo.summary()
plt.title('Predicciones de Demanda Electrica [MWh]', family='Arial', fontsize=12)
plt.xlabel('Tiempo', family='Arial', fontsize=10)
plt.ylabel('Carga Electrica [MWh]', family='Arial', fontsize=10)
plt.xticks(rotation=45, fontsize=8)
plt.grid(True)
plt.show()
modelo.summary()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.array",
"tensorflow.keras.layers.Dense",
"pandas.read_excel",
"numpy.reshape",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.random.seed",
"numpy.min",
"tensorflow.keras.models.Sequential",
"sklearn.prep... | [((64, 81), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (78, 81), True, 'import numpy as np\n'), ((1036, 1115), 'pandas.read_excel', 'pd.read_excel', (['"""HLAD_ORLIST2020.xlsx"""'], {'index_col': '"""Fecha"""', 'parse_dates': "['Fecha']"}), "('HLAD_ORLIST2020.xlsx', index_col='Fecha', parse_dates=['Fecha'])\n", (1049, 1115), True, 'import pandas as pd\n'), ((1470, 1546), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '"""2019-12-23 00:00:00"""', 'color': '"""c"""', 'linewidth': '(2)', 'linestyle': '"""--"""'}), "(x='2019-12-23 00:00:00', color='c', linewidth=2, linestyle='--')\n", (1481, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1626), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '"""2020-01-01 00:00:00"""', 'color': '"""r"""', 'linewidth': '(2)', 'linestyle': '"""--"""'}), "(x='2020-01-01 00:00:00', color='r', linewidth=2, linestyle='--')\n", (1561, 1626), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1644), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1638, 1644), True, 'import matplotlib.pyplot as plt\n'), ((1646, 1793), 'matplotlib.pyplot.legend', 'plt.legend', (["['Datos de aprendizaje de 2019-10-30 23:00:00 a 2019-12-20 00:00:00',\n 'Comprobacion 2019-12-20 00:00:00 a 2020-01-8 00:00:00']"], {}), "([\n 'Datos de aprendizaje de 2019-10-30 23:00:00 a 2019-12-20 00:00:00',\n 'Comprobacion 2019-12-20 00:00:00 a 2020-01-8 00:00:00'])\n", (1656, 1793), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1797), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1795, 1797), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1883), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1861, 1883), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2634, 2694), 'numpy.reshape', 'np.reshape', (['X_train', '(X_train.shape[0], X_train.shape[1], 1)'], {}), '(X_train, (X_train.shape[0], X_train.shape[1], 1))\n', (2644, 2694), True, 'import numpy as np\n'), ((2790, 2802), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2800, 2802), False, 'from tensorflow.keras.models import Sequential\n'), ((3230, 3246), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (3238, 3246), True, 'import numpy as np\n'), ((3257, 3314), 'numpy.reshape', 'np.reshape', (['X_test', '(X_test.shape[0], X_test.shape[1], 1)'], {}), '(X_test, (X_test.shape[0], X_test.shape[1], 1))\n', (3267, 3314), True, 'import numpy as np\n'), ((3425, 3452), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 5)'}), '(figsize=(14, 5))\n', (3435, 3452), True, 'import matplotlib.pyplot as plt\n'), ((3454, 3533), 'matplotlib.pyplot.plot', 'plt.plot', (['prediccion'], {'color': '"""b"""', 'label': '"""Predicción de la acción"""', 'linewidth': '(1.5)'}), "(prediccion, color='b', label='Predicción de la acción', linewidth=1.5)\n", (3462, 3533), True, 'import matplotlib.pyplot as plt\n'), ((3534, 3628), 'matplotlib.pyplot.plot', 'plt.plot', (['set_validacion.values'], {'color': '"""m"""', 'label': '"""Predicción de la acción"""', 'linewidth': '(1.5)'}), "(set_validacion.values, color='m', label='Predicción de la acción',\n linewidth=1.5)\n", (3542, 3628), True, 'import matplotlib.pyplot as plt\n'), ((3625, 3654), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(10)'}), '(labelsize=10)\n', (3640, 3654), True, 'import matplotlib.pyplot as plt\n'), ((3658, 3672), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3666, 3672), True, 'import matplotlib.pyplot as plt\n'), ((3674, 3743), 'matplotlib.pyplot.title', 'plt.title', (['"""daily sale graph test_id=505 """'], {'fontdict': "{'fontsize': 30}"}), "('daily sale graph test_id=505 ', fontdict={'fontsize': 30})\n", (3683, 3743), True, 'import matplotlib.pyplot as plt\n'), ((3763, 3848), 'matplotlib.pyplot.title', 'plt.title', (['"""Predicciones de Demanda Electrica [MWh]"""'], {'family': '"""Arial"""', 'fontsize': '(12)'}), "('Predicciones de Demanda Electrica [MWh]', family='Arial',\n fontsize=12)\n", (3772, 3848), True, 'import matplotlib.pyplot as plt\n'), ((3846, 3895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tiempo"""'], {'family': '"""Arial"""', 'fontsize': '(10)'}), "('Tiempo', family='Arial', fontsize=10)\n", (3856, 3895), True, 'import matplotlib.pyplot as plt\n'), ((3897, 3961), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Carga Electrica [MWh]"""'], {'family': '"""Arial"""', 'fontsize': '(10)'}), "('Carga Electrica [MWh]', family='Arial', fontsize=10)\n", (3907, 3961), True, 'import matplotlib.pyplot as plt\n'), ((3963, 3998), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)', 'fontsize': '(8)'}), '(rotation=45, fontsize=8)\n', (3973, 3998), True, 'import matplotlib.pyplot as plt\n'), ((4000, 4014), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4008, 4014), True, 'import matplotlib.pyplot as plt\n'), ((4016, 4026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4024, 4026), True, 'import matplotlib.pyplot as plt\n'), ((634, 661), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (644, 661), True, 'import matplotlib.pyplot as plt\n'), ((748, 815), 'matplotlib.pyplot.plot', 'plt.plot', (['prediccion'], {'color': '"""blue"""', 'label': '"""Predicción de la acción"""'}), "(prediccion, color='blue', label='Predicción de la acción')\n", (756, 815), True, 'import matplotlib.pyplot as plt\n'), ((889, 909), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tiempo"""'], {}), "('Tiempo')\n", (899, 909), True, 'import matplotlib.pyplot as plt\n'), ((915, 932), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""kWh"""'], {}), "('kWh')\n", (925, 932), True, 'import matplotlib.pyplot as plt\n'), ((938, 950), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (948, 950), True, 'import matplotlib.pyplot as plt\n'), ((956, 970), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (964, 970), True, 'import matplotlib.pyplot as plt\n'), ((976, 986), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (984, 986), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2544), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (2535, 2544), True, 'import numpy as np\n'), ((2546, 2563), 'numpy.array', 'np.array', (['Y_train'], {}), '(Y_train)\n', (2554, 2563), True, 'import numpy as np\n'), ((2815, 2854), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'na', 'input_shape': 'dim_entrada'}), '(units=na, input_shape=dim_entrada)\n', (2819, 2854), False, 'from tensorflow.keras.layers import Dense, LSTM\n'), ((2868, 2891), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'dim_salida'}), '(units=dim_salida)\n', (2873, 2891), False, 'from tensorflow.keras.layers import Dense, LSTM\n'), ((864, 882), 'numpy.max', 'np.max', (['prediccion'], {}), '(prediccion)\n', (870, 882), True, 'import numpy as np\n'), ((836, 854), 'numpy.min', 'np.min', (['prediccion'], {}), '(prediccion)\n', (842, 854), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import loco
import tinymath as tm
import numpy as np
import gc
def test_null_visualizer_functionality() :
vis_data = loco.sim.VisualData()
vis_data.type = loco.sim.ShapeType.CAPSULE
vis_data.size = [ 0.1, 0.2, 0.1 ]
col_data = loco.sim.CollisionData()
col_data.type = loco.sim.ShapeType.CAPSULE
col_data.size = [ 0.1, 0.2, 0.1 ]
body_data = loco.sim.BodyData()
body_data.dyntype = loco.sim.DynamicsType.DYNAMIC
body_data.collision = col_data
body_data.visual = vis_data
body_obj = loco.sim.SingleBody( 'body_0', body_data, [ 1.0, 1.0, 1.0 ], np.identity( 3 ) )
scenario = loco.sim.Scenario()
scenario.AddSingleBody( body_obj )
visualizer = loco.sim.NullVisualizer( scenario )
assert ( visualizer.GetCameraByName( 'cam_orbit_0' ) == None )
assert ( visualizer.GetLightByName( 'light_point_0' ) == None )
camera = visualizer.CreateCamera( 'cam_orbit_0',
loco.sim.VizCameraType.ORBIT,
[ 3.0, 3.0, 3.0 ],
[ 0.0, 0.0, 0.0 ] )
light = visualizer.CreateLight( 'light_point_0',
loco.sim.VizLightType.POINT,
[ 0.4, 0.4, 0.4 ],
[ 0.8, 0.8, 0.8 ],
[ 0.8, 0.8, 0.8 ] )
visualizer.Initialize()
visualizer.Update()
visualizer.Reset()
camera.position = [ 5.0, 5.0, 5.0 ]
camera.target = [ 0.0, 0.0, 1.0 ]
light.position = [ 0.0, 0.0, 7.0 ]
light.intensity = 0.9
assert ( visualizer.HasCameraNamed( 'cam_orbit_0' ) == True )
assert ( visualizer.HasLightNamed( 'light_point_0' ) == True )
assert ( visualizer.GetCameraByName( 'cam_orbit_0' ) != None )
assert ( visualizer.GetLightByName( 'light_point_0' ) != None )
assert np.allclose( visualizer.GetCameraByName( 'cam_orbit_0' ).position, camera.position )
assert np.allclose( visualizer.GetLightByName( 'light_point_0' ).ambient, light.ambient )
assert ( camera.type == loco.sim.VizCameraType.ORBIT )
assert np.allclose( camera.position, [ 5.0, 5.0, 5.0 ] )
assert np.allclose( camera.target, [ 0.0, 0.0, 1.0 ] )
assert ( light.type == loco.sim.VizLightType.POINT )
assert np.allclose( light.position, [ 0.0, 0.0, 7.0 ] )
assert np.allclose( light.ambient, [ 0.4, 0.4, 0.4 ] )
assert np.allclose( light.diffuse, [ 0.8, 0.8, 0.8 ] )
assert np.allclose( light.specular, [ 0.8, 0.8, 0.8 ] )
assert np.allclose( light.intensity, 0.9 )
del visualizer
del scenario
if __name__ == '__main__' :
_ = input( 'Press ENTER to start test : test_null_visualizer_functionality' )
test_null_visualizer_functionality()
_ = input( 'Press ENTER to continue ...' ) | [
"numpy.identity",
"loco.sim.BodyData",
"numpy.allclose",
"loco.sim.CollisionData",
"loco.sim.NullVisualizer",
"loco.sim.Scenario",
"loco.sim.VisualData"
] | [((145, 166), 'loco.sim.VisualData', 'loco.sim.VisualData', ([], {}), '()\n', (164, 166), False, 'import loco\n'), ((267, 291), 'loco.sim.CollisionData', 'loco.sim.CollisionData', ([], {}), '()\n', (289, 291), False, 'import loco\n'), ((394, 413), 'loco.sim.BodyData', 'loco.sim.BodyData', ([], {}), '()\n', (411, 413), False, 'import loco\n'), ((646, 665), 'loco.sim.Scenario', 'loco.sim.Scenario', ([], {}), '()\n', (663, 665), False, 'import loco\n'), ((723, 756), 'loco.sim.NullVisualizer', 'loco.sim.NullVisualizer', (['scenario'], {}), '(scenario)\n', (746, 756), False, 'import loco\n'), ((2165, 2210), 'numpy.allclose', 'np.allclose', (['camera.position', '[5.0, 5.0, 5.0]'], {}), '(camera.position, [5.0, 5.0, 5.0])\n', (2176, 2210), True, 'import numpy as np\n'), ((2226, 2269), 'numpy.allclose', 'np.allclose', (['camera.target', '[0.0, 0.0, 1.0]'], {}), '(camera.target, [0.0, 0.0, 1.0])\n', (2237, 2269), True, 'import numpy as np\n'), ((2343, 2387), 'numpy.allclose', 'np.allclose', (['light.position', '[0.0, 0.0, 7.0]'], {}), '(light.position, [0.0, 0.0, 7.0])\n', (2354, 2387), True, 'import numpy as np\n'), ((2403, 2446), 'numpy.allclose', 'np.allclose', (['light.ambient', '[0.4, 0.4, 0.4]'], {}), '(light.ambient, [0.4, 0.4, 0.4])\n', (2414, 2446), True, 'import numpy as np\n'), ((2462, 2505), 'numpy.allclose', 'np.allclose', (['light.diffuse', '[0.8, 0.8, 0.8]'], {}), '(light.diffuse, [0.8, 0.8, 0.8])\n', (2473, 2505), True, 'import numpy as np\n'), ((2521, 2565), 'numpy.allclose', 'np.allclose', (['light.specular', '[0.8, 0.8, 0.8]'], {}), '(light.specular, [0.8, 0.8, 0.8])\n', (2532, 2565), True, 'import numpy as np\n'), ((2581, 2614), 'numpy.allclose', 'np.allclose', (['light.intensity', '(0.9)'], {}), '(light.intensity, 0.9)\n', (2592, 2614), True, 'import numpy as np\n'), ((612, 626), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (623, 626), True, 'import numpy as np\n')] |
#This boring example just shows that using SigJoin and Sig
#you get the same signatures and derivatives.
import os
#os.environ["THEANO_FLAGS"]="floatX=float32,device=cpu,optimizer=fast_compile"
#os.environ["THEANO_FLAGS"]="floatX=float32,device=cpu,mode=DebugMode"
os.environ["THEANO_FLAGS"]="floatX=float32,device=cpu"
import theano, numpy, sys
import six.moves
#add the parent directory, so we find our iisignature build if it was built --inplace
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import iisignature
from iisignature_theano import Sig, SigJoin
import theano.tensor as T
#1: SETUP VARIABLES
dim=3
level=6
pathlength=4
fixed = float("nan")
fixed = 0.1
numpy.random.seed(51)
start = numpy.random.uniform(size=(pathlength,dim)).astype("float32")
#2: DEFINE THEANO STUFF
path = theano.shared(start, "path")
if not numpy.isnan(fixed):
fixedT = theano.shared(fixed, "fixedT")
path=theano.tensor.horizontal_stack(path[:,:-1],fixedT*T.shape_padright(T.arange(pathlength)))
cost1 = theano.tensor.mean(theano.tensor.sqr(Sig(path,level)))
grad1 = theano.grad(cost1,path)
signature = numpy.zeros((1,iisignature.siglength(dim,level))).astype("float32")
for i in six.moves.xrange(1,pathlength):
if not numpy.isnan(fixed):
displacement = path[i:(i+1),:-1]-path[(i-1):i,:-1]
signature = SigJoin(signature,displacement,level,fixedT)
else:
displacement = path[i:(i+1),:]-path[(i-1):i,:]
signature = SigJoin(signature,displacement,level)
cost2 = theano.tensor.mean(theano.tensor.sqr(signature))
grad2 = theano.grad(cost2,path)
#theano.printing.pydotprint(grad2,outfile="a.png")
ff = theano.function([],[grad1,grad2])
#theano.printing.pydotprint(ff,outfile="b.png")
#3: GO
numpy.set_printoptions(suppress=True)
f = ff()
if numpy.isnan(fixed):
print (f[0]-f[1])
print (f[0])
print (f[1])
else:
print ((f[0]-f[1])[:,:-1])
print (f[0][:,:-1])
print (f[1][:,:-1])
ff2 = theano.function([],[theano.grad(cost1,fixedT),theano.grad(cost2,fixedT)])()
print(ff2)
| [
"iisignature_theano.SigJoin",
"theano.shared",
"theano.function",
"iisignature_theano.Sig",
"iisignature.siglength",
"theano.tensor.sqr",
"os.path.dirname",
"theano.tensor.arange",
"numpy.isnan",
"numpy.random.seed",
"numpy.random.uniform",
"theano.grad",
"numpy.set_printoptions"
] | [((702, 723), 'numpy.random.seed', 'numpy.random.seed', (['(51)'], {}), '(51)\n', (719, 723), False, 'import theano, numpy, sys\n'), ((827, 855), 'theano.shared', 'theano.shared', (['start', '"""path"""'], {}), "(start, 'path')\n", (840, 855), False, 'import theano, numpy, sys\n'), ((1097, 1121), 'theano.grad', 'theano.grad', (['cost1', 'path'], {}), '(cost1, path)\n', (1108, 1121), False, 'import theano, numpy, sys\n'), ((1587, 1611), 'theano.grad', 'theano.grad', (['cost2', 'path'], {}), '(cost2, path)\n', (1598, 1611), False, 'import theano, numpy, sys\n'), ((1669, 1704), 'theano.function', 'theano.function', (['[]', '[grad1, grad2]'], {}), '([], [grad1, grad2])\n', (1684, 1704), False, 'import theano, numpy, sys\n'), ((1760, 1797), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (1782, 1797), False, 'import theano, numpy, sys\n'), ((1810, 1828), 'numpy.isnan', 'numpy.isnan', (['fixed'], {}), '(fixed)\n', (1821, 1828), False, 'import theano, numpy, sys\n'), ((863, 881), 'numpy.isnan', 'numpy.isnan', (['fixed'], {}), '(fixed)\n', (874, 881), False, 'import theano, numpy, sys\n'), ((896, 926), 'theano.shared', 'theano.shared', (['fixed', '"""fixedT"""'], {}), "(fixed, 'fixedT')\n", (909, 926), False, 'import theano, numpy, sys\n'), ((1549, 1577), 'theano.tensor.sqr', 'theano.tensor.sqr', (['signature'], {}), '(signature)\n', (1566, 1577), False, 'import theano, numpy, sys\n'), ((732, 776), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': '(pathlength, dim)'}), '(size=(pathlength, dim))\n', (752, 776), False, 'import theano, numpy, sys\n'), ((1071, 1087), 'iisignature_theano.Sig', 'Sig', (['path', 'level'], {}), '(path, level)\n', (1074, 1087), False, 'from iisignature_theano import Sig, SigJoin\n'), ((1254, 1272), 'numpy.isnan', 'numpy.isnan', (['fixed'], {}), '(fixed)\n', (1265, 1272), False, 'import theano, numpy, sys\n'), ((1353, 1400), 'iisignature_theano.SigJoin', 'SigJoin', (['signature', 'displacement', 'level', 'fixedT'], {}), '(signature, displacement, level, fixedT)\n', (1360, 1400), False, 'from iisignature_theano import Sig, SigJoin\n'), ((1483, 1522), 'iisignature_theano.SigJoin', 'SigJoin', (['signature', 'displacement', 'level'], {}), '(signature, displacement, level)\n', (1490, 1522), False, 'from iisignature_theano import Sig, SigJoin\n'), ((500, 525), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (515, 525), False, 'import os\n'), ((1003, 1023), 'theano.tensor.arange', 'T.arange', (['pathlength'], {}), '(pathlength)\n', (1011, 1023), True, 'import theano.tensor as T\n'), ((1149, 1182), 'iisignature.siglength', 'iisignature.siglength', (['dim', 'level'], {}), '(dim, level)\n', (1170, 1182), False, 'import iisignature\n'), ((2001, 2027), 'theano.grad', 'theano.grad', (['cost1', 'fixedT'], {}), '(cost1, fixedT)\n', (2012, 2027), False, 'import theano, numpy, sys\n'), ((2027, 2053), 'theano.grad', 'theano.grad', (['cost2', 'fixedT'], {}), '(cost2, fixedT)\n', (2038, 2053), False, 'import theano, numpy, sys\n')] |
from pandas import read_csv
from datetime import datetime
from matplotlib import pyplot
from datetime import datetime
from statsmodels.tsa.arima.model import ARIMA
import statsmodels.api as sm
# from statsmodels.tsa.statespace.sarimax.SARIMAX import SARIMAX
from sklearn.metrics import mean_squared_error
from math import sqrt
import pandas as pd
import os
import enum
import numpy as np
import csv
import multiprocessing
class TrainignTimeType(enum.IntEnum):
ONE_WEEK = 10080
ONE_MONTH = 43200
class TestingTimeType(enum.IntEnum):
ONE_DAY = 1440
# Save the time series given as parameter
def save_series_to_csv(series, fileName, seriesName):
path = "results/ARIMA/" + "ukdale_def3"
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print("Creation of the directory %s failed" % path)
path = "results/ARIMA/" + "ukdale_def3" + "/" + seriesName
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print("Creation of the directory %s failed" % path)
day = trainSize / 1440
file = open(path + "/" + str(int(day)) + "days_" + fileName, "w")
file.write(series.to_csv(header=False))
file.close()
#
def save_accuracy_to_csv(values, fileName, seriesName):
path = "results/ARIMA/" + "ukdale_def3"
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print("Creation of the directory %s failed" % path)
day = trainSize / 1440
# file = open(path + "/" + str(int(day)) + "days_" + fileName + "accuracy", "w")
with open(path + "/" + fileName, mode="a+") as csv_file:
lines = csv_file.readlines()
fieldnames = ['mape', 'corr', 'rmse', 'minmax', 'seriesName', 'days']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
if os.stat(path + "/" + fileName).st_size == 0:
writer.writerow({'mape': values.get("mape"), 'corr': values.get("corr"), 'rmse': values.get("rmse"),
'minmax': values.get("minmax"), 'seriesName': seriesName, 'days': str(int(day))})
else:
writer.writerow({'mape': values.get("mape"), 'corr': values.get("corr"), 'rmse': values.get("rmse"),
'minmax': values.get("minmax"), 'seriesName': seriesName, 'days': str(int(day))})
csv_file.close()
# Save the plot from pyplot
def save_plot(seriesName):
path = "results/ARIMA/" + "ukdale_def3"
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print("Creation of the directory %s failed" % path)
path = "results/ARIMA/" + "ukdale_def3" + "/" + seriesName
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print("Creation of the directory %s failed" % path)
day = trainSize / 1440
finalPath = path + "/" + str(int(day)) + "days_plot.png"
pyplot.savefig(finalPath, dpi=100)
# Parser for the read_csv
def parser(x):
return datetime.strptime(x, '%y-%m-%d %H:%M:%S')
# Accuracy metrics
def forecast_accuracy(forecast, actual):
mape = np.mean(np.abs(forecast - actual) / np.abs(actual)) # MAPE
corr = np.corrcoef(forecast, actual)[0, 1] # corr
rmse = np.mean((forecast - actual) ** 2) ** .5 # RMSE
mins = np.amin(np.hstack([forecast[:, None],
actual[:, None]]), axis=1)
maxs = np.amax(np.hstack([forecast[:, None],
actual[:, None]]), axis=1)
minmax = 1 - np.mean(mins / maxs) # minmax
return ({'mape': mape,
'corr': corr, 'rmse': rmse, 'minmax': minmax})
'''
PUT HERE THE CONFIGURATION VALUES
'''
trainSize = TrainignTimeType.ONE_WEEK
testSize = TestingTimeType.ONE_DAY
shiftRow = 26423
originFileName = "ukdale_def3.csv"
seriesName = "Gas_Boiler"
def main(seriesName):
# main function
trainSize = TrainignTimeType.ONE_WEEK
testSize = TestingTimeType.ONE_DAY
# Splitting the dataset into training and testing
X = series[seriesName]
train, test = X[0:trainSize], X[trainSize:trainSize + testSize]
history = [x for x in train]
predictions = list()
maxLen = len(test)
# Creating the ARIMA model
# (5,2,1) start_params=[0,0,0,0,0,0,1,5]
# (5,1,1) start_params=[0,0,0,0,0,0,1,3]
print("\nTraining the model...\n")
model = ARIMA(history, order=(5, 0, 1))
model_fit = model.fit(start_params=[0, 0, 0, 0, 0, 0, 0, 1])
maxLen = len(test)
print("Testing...")
# walk-forward validation
for t in range(len(test)):
perc = (100 / maxLen) * t
print("\nPerc: %.2f%%" % perc, end="\r")
#print("\033[A \033[A")
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test[t]
history.append(obs)
model_fit = model_fit.append([test[t]])
# print('predicted=%f, expected=%f' % (yhat, obs))
"""
mod = sm.tsa.statespace.SARIMAX(df,
order=(1, 0, 1),
seasonal_order=(0, 0, 1, 12),
enforce_stationarity=False,
enforce_invertibility=False)
"""
print("Testing...")
fc_series = pd.Series(predictions, index=test.index)
fc_series[fc_series < 0] = 0
# evaluate forecasts
values = forecast_accuracy(fc_series.values, test.values)
print(values)
pyplot.figure(figsize=(12, 5), dpi=100)
pyplot.plot(train, color='blue')
pyplot.plot(test, color='blue')
pyplot.plot(fc_series, color='red')
day = trainSize / 1440
pyplot.title(seriesName + " " + str(int(day)) + " days trained")
ax = pyplot.gca()
ax.axes.xaxis.set_visible(False)
# saving date
save_series_to_csv(train, "train.csv", seriesName)
save_series_to_csv(test, "test.csv", seriesName)
save_series_to_csv(fc_series, "predictions.csv", seriesName)
save_accuracy_to_csv(values, "accuracy.csv", seriesName)
save_plot(seriesName)
# pyplot.show()
print("\nAll done!\n")
if __name__ == '__main__':
numbersOfRowToRead = int(trainSize) + int(testSize) + shiftRow
# Reading the series from the dataset file
series = read_csv("Dataset/" + originFileName, header=0, index_col=0, nrows=numbersOfRowToRead,
skiprows=range(1, shiftRow))
# seriesNames = list(series.columns.values)
# seriesNames = ['Speakers']
appls = ["Kettle", "Electric_Heater", "Laptop", "Projector"]
proc = []
for appliance in appls:
p = multiprocessing.Process(target=main, args=[appliance])
p.start()
proc.append(p)
for procces in proc:
procces.join()
| [
"pandas.Series",
"csv.DictWriter",
"numpy.mean",
"numpy.abs",
"matplotlib.pyplot.savefig",
"numpy.corrcoef",
"datetime.datetime.strptime",
"matplotlib.pyplot.gca",
"numpy.hstack",
"matplotlib.pyplot.plot",
"multiprocessing.Process",
"matplotlib.pyplot.figure",
"os.path.isdir",
"os.mkdir",
... | [((2991, 3025), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['finalPath'], {'dpi': '(100)'}), '(finalPath, dpi=100)\n', (3005, 3025), False, 'from matplotlib import pyplot\n'), ((3080, 3121), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%y-%m-%d %H:%M:%S"""'], {}), "(x, '%y-%m-%d %H:%M:%S')\n", (3097, 3121), False, 'from datetime import datetime\n'), ((4448, 4479), 'statsmodels.tsa.arima.model.ARIMA', 'ARIMA', (['history'], {'order': '(5, 0, 1)'}), '(history, order=(5, 0, 1))\n', (4453, 4479), False, 'from statsmodels.tsa.arima.model import ARIMA\n'), ((5247, 5287), 'pandas.Series', 'pd.Series', (['predictions'], {'index': 'test.index'}), '(predictions, index=test.index)\n', (5256, 5287), True, 'import pandas as pd\n'), ((5432, 5471), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(12, 5)', 'dpi': '(100)'}), '(figsize=(12, 5), dpi=100)\n', (5445, 5471), False, 'from matplotlib import pyplot\n'), ((5476, 5508), 'matplotlib.pyplot.plot', 'pyplot.plot', (['train'], {'color': '"""blue"""'}), "(train, color='blue')\n", (5487, 5508), False, 'from matplotlib import pyplot\n'), ((5513, 5544), 'matplotlib.pyplot.plot', 'pyplot.plot', (['test'], {'color': '"""blue"""'}), "(test, color='blue')\n", (5524, 5544), False, 'from matplotlib import pyplot\n'), ((5549, 5584), 'matplotlib.pyplot.plot', 'pyplot.plot', (['fc_series'], {'color': '"""red"""'}), "(fc_series, color='red')\n", (5560, 5584), False, 'from matplotlib import pyplot\n'), ((5690, 5702), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (5700, 5702), False, 'from matplotlib import pyplot\n'), ((717, 736), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (730, 736), False, 'import os\n'), ((942, 961), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (955, 961), False, 'import os\n'), ((1366, 1385), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1379, 1385), False, 'import os\n'), ((1823, 1870), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': 'fieldnames'}), '(csv_file, fieldnames=fieldnames)\n', (1837, 1870), False, 'import csv\n'), ((2525, 2544), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2538, 2544), False, 'import os\n'), ((2749, 2768), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2762, 2768), False, 'import os\n'), ((3266, 3295), 'numpy.corrcoef', 'np.corrcoef', (['forecast', 'actual'], {}), '(forecast, actual)\n', (3277, 3295), True, 'import numpy as np\n'), ((3321, 3354), 'numpy.mean', 'np.mean', (['((forecast - actual) ** 2)'], {}), '((forecast - actual) ** 2)\n', (3328, 3354), True, 'import numpy as np\n'), ((3388, 3435), 'numpy.hstack', 'np.hstack', (['[forecast[:, None], actual[:, None]]'], {}), '([forecast[:, None], actual[:, None]])\n', (3397, 3435), True, 'import numpy as np\n'), ((3494, 3541), 'numpy.hstack', 'np.hstack', (['[forecast[:, None], actual[:, None]]'], {}), '([forecast[:, None], actual[:, None]])\n', (3503, 3541), True, 'import numpy as np\n'), ((3598, 3618), 'numpy.mean', 'np.mean', (['(mins / maxs)'], {}), '(mins / maxs)\n', (3605, 3618), True, 'import numpy as np\n'), ((6564, 6618), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'main', 'args': '[appliance]'}), '(target=main, args=[appliance])\n', (6587, 6618), False, 'import multiprocessing\n'), ((763, 777), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (771, 777), False, 'import os\n'), ((988, 1002), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (996, 1002), False, 'import os\n'), ((1412, 1426), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1420, 1426), False, 'import os\n'), ((2571, 2585), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (2579, 2585), False, 'import os\n'), ((2795, 2809), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (2803, 2809), False, 'import os\n'), ((3203, 3228), 'numpy.abs', 'np.abs', (['(forecast - actual)'], {}), '(forecast - actual)\n', (3209, 3228), True, 'import numpy as np\n'), ((3231, 3245), 'numpy.abs', 'np.abs', (['actual'], {}), '(actual)\n', (3237, 3245), True, 'import numpy as np\n'), ((1883, 1913), 'os.stat', 'os.stat', (["(path + '/' + fileName)"], {}), "(path + '/' + fileName)\n", (1890, 1913), False, 'import os\n')] |
#!/usr/bin/env python3
"""
Solution for ISTA 421 / INFO 521 Fall 2015, HW 2, Problem 1
Author: <NAME>, 12 September 2015
<NAME> (Sept 2018)
"""
import argparse
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# --------------------------------------------------
def get_args():
"""get args"""
parser = argparse.ArgumentParser(
description='Find w-hat',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file', metavar='FILE', help='csv data file')
parser.add_argument(
'-m',
'--model_order',
help='Model order',
metavar='int',
type=int,
default=1)
parser.add_argument(
'-t',
'--title',
help='Plot title',
metavar='str',
type=str,
default=None)
parser.add_argument(
'-x',
'--xlabel',
help='X axis label',
metavar='str',
type=str,
default='x')
parser.add_argument(
'-y',
'--ylabel',
help='Y axis label',
metavar='str',
type=str,
default='t')
parser.add_argument(
'-o',
'--outfile',
help='Save output to filename',
metavar='str',
type=str,
default=None)
parser.add_argument(
'-s', '--scale', help='Whether to scale the data', action='store_true')
parser.add_argument(
'-q',
'--quiet',
help='Do not show debug messages',
action='store_true')
return parser.parse_args()
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
logging.critical(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""main"""
args = get_args()
logging.basicConfig(
level=logging.CRITICAL if args.quiet else logging.DEBUG)
title = args.title if args.title else 'Fit to {} order'.format(
args.model_order)
read_data_fit_plot(
args.file,
model_order=args.model_order,
scale_p=args.scale,
plot_title=title,
xlabel=args.xlabel,
ylabel=args.ylabel,
save_path=args.outfile,
plot_p=True)
# --------------------------------------------------
def plot_data(x, t, title='Data', xlabel='x', ylabel='t'):
"""
Plot single input feature x data with corresponding response
values t as a scatter plot.
:param x: sequence of 1-dimensional input data values (numbers)
:param t: sequence of 1-dimensional responses
:param title: title of plot (default 'Data')
:return: None
"""
plt.figure() # Create a new figure object for plotting
plt.scatter(x, t, edgecolor='b', color='w', marker='o')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.pause(.1) # required on some systems to allow rendering
# --------------------------------------------------
def plot_model(x, w):
"""
Plot the function (curve) of an n-th order polynomial model:
t = w0*x^0 + w1*x^1 + w2*x^2 + ... wn*x^n
This works by creating a set of x-axis (plotx) points and
then use the model parameters w to determine the corresponding
t-axis (plott) points on the model curve.
:param x: sequence of 1-dimensional input data values (numbers)
:param w: n-dimensional sequence of model parameters: w0, w1, w2, ..., wn
:return: the plotx and plott values for the plotted curve
"""
# NOTE: this assumes a figure() object has already been created.
# plotx represents evenly-spaced set of 100 points on the x-axis
# used for creating a relatively "smooth" model curve plot.
# Includes points a little before the min x input (-0.25)
# and a little after the max x input (+0.25)
plotx = np.linspace(min(x) - 0.25, max(x) + 0.25, 100)
# plotX (note that python is case sensitive, so this is *not*
# the same as plotx with a lower-case x) is the "design matrix"
# for our model curve inputs represented in plotx.
# We need to do the same computation as we do when doing
# model fitting (as in fitpoly(), below), except that we
# don't need to infer (by the normal equations) the values
# of w, as they are given here as input.
# plotx.shape[0] ensures we create a matrix with the number of
# rows corresponding to the number of points in plotx (this will
# still work even if we change the number of plotx points to
# something other than 100)
plotX = np.zeros((plotx.shape[0], w.size))
# populate the design matrix plotX
for k in range(w.size):
plotX[:, k] = np.power(plotx, k)
# Take the dot (inner) product of the design matrix plotX and the
# parameter vector w
plott = np.dot(plotX, w)
# plot the x (plotx) and t (plott) values in red
plt.plot(plotx, plott, color='r', linewidth=2)
plt.pause(.1) # required on some systems to allow rendering
return plotx, plott
# --------------------------------------------------
def scale01(x):
"""
HELPER FUNCTION: only needed if you are working with large
x values. This is NOT needed for problems 1, 2 and 4.
Mathematically, the sizes of the values of variables (e.g., x)
could be arbitrarily large. However, when we go to manipulate
those values on a computer, we need to be careful. E.g., in
these exercises we are taking powers of values and if the
values are large, then taking a large power of the variable
may exceed what can be represented numerically.
For example, in the Olympics data (both men's and women's),
the input x values are years in the 1000's. If you model
is, say, polynomial order 5, then you're taking a large
number to the power of 5, which is the order of a quadrillion!
Python floating point numbers have trouble representing this
many significant digits.
This function here scales the input data to be the range [0, 1]
(i.e., between 0 and 1, inclusive).
:param x: sequence of 1-dimensional input data values (numbers)
:return: x values linearly scaled to range [0, 1]
"""
x_min = min(x)
x_range = max(x) - x_min
return (x - x_min) / x_range
# --------------------------------------------------
def fitpoly(x, t, model_order):
"""
Given "training" data in input sequence x (number features),
corresponding target value sequence t, and a specified
polynomial of order model_order, determine the linear
least mean squared (LMS) error best fit for parameters w,
using the generalized matrix normal equation.
model_order is a non-negative integer, n, representing the
highest polynomial exponent of the polynomial model:
t = w0*x^0 + w1*x^1 + w2*x^2 + ... wn*x^n
:param x: sequence of 1-dimensional input data features
:param t: sequence of target response values
:param model_order: integer representing the highest polynomial exponent of the polynomial model
:return: parameter vector w
"""
# Construct the empty design matrix
# np.zeros takes a python tuple representing the number
# of elements along each axis and returns an array of those
# dimensions filled with zeros.
# For example, to create a 2x3 array of zeros, call
# np.zeros((2,3))
# and this returns (if executed at the command-line):
# array([[ 0., 0., 0.],
# [ 0., 0., 0.]])
# The number of columns is model_order+1 because a model_order
# of 0 requires one column (filled with input x values to the
# power of 0), model_order=1 requires two columns (first input x
# values to power of 0, then column of input x values to power 1),
# and so on...
X = np.zeros((x.shape[0], model_order + 1))
# Fill each column of the design matrix with the corresponding
for k in range(model_order + 1): # w.size
X[:, k] = np.power(x, k)
logging.debug('model_order = {}'.format(model_order))
logging.debug('x.shape = {}'.format(x.shape))
logging.debug('X.shape = {}'.format(X.shape))
logging.debug('t.shape = {}'.format(t.shape))
#w, res, rank, s = np.linalg.lstsq(X, t)
# w = (X^T . X)^-1 * (X^T . t)
w = np.linalg.inv((X.transpose().dot(X))).dot(X.transpose().dot(t))
logging.debug('w.shape = {}'.format(w.shape))
return w
# --------------------------------------------------
def read_data_fit_plot(data_path,
model_order=1,
scale_p=False,
save_path=None,
plot_p=False,
xlabel='x',
ylabel='y',
plot_title='Data'):
"""
A "top-level" script to
(1) Load the data
(2) Optionally scale the data between [0, 1]
(3) Plot the raw data
(4) Find the best-fit parameters
(5) Plot the model on top of the data
(6) If save_path is a filepath (not None), then save the figure as a pdf
(6) Optionally call the matplotlib show() fn, which keeps the plot open
:param data_path: Path to the data
:param model_order: Non-negative integer representing model polynomial order
:param scale_p: Boolean Flag (default False)
:param save_path: Optional (default None) filepath to save figure to file
:param plot_p: Boolean Flag (default False)
:param plot_title: Title of the plot (default 'Data')
:return: None
"""
# (1) load the data
data = np.genfromtxt(data_path, delimiter=',', dtype=None)
# (2) Optionally scale the data between [0,1]
# See the scale01 documentation for explanation of why you
# might want to scale
if scale_p:
x = scale01(
data[:,
0]) # extract x (slice first column) and scale so x \in [0,1]
else:
x = data[:, 0] # extract x (slice first column)
t = data[:, 1] # extract t (slice second column)
# (3) plot the raw data
plot_data(x, t, title=plot_title, xlabel=xlabel, ylabel=ylabel)
# (4) find the best-fit model parameters using the fitpoly function
w = fitpoly(x, t, model_order)
logging.debug(
'Identified model parameters w (in scientific notation):\n{}'.format(
w))
# python defaults to print floats in scientific notation,
# so here I'll also print using python format, which I find easier to read
logging.debug('w again (not in scientific notation):\n{}'.format(
['{0:f}'.format(i) for i in w]))
# (5) Plot the model on top of the data
plot_model(x, w)
# (6) If save_path is a filepath (not None), then save the figure as a pdf
if save_path is not None:
plt.savefig(save_path, fmt='pdf')
# (7) Optionally show the plot window (and hold it open)
if plot_p:
plt.show()
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.dot",
"logging.critical",
"sys.exit",
"matplotlib.pyplot.scatter",... | [((359, 469), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Find w-hat"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Find w-hat', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (382, 469), False, 'import argparse\n'), ((1725, 1746), 'logging.critical', 'logging.critical', (['msg'], {}), '(msg)\n', (1741, 1746), False, 'import logging\n'), ((1751, 1762), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1759, 1762), False, 'import sys\n'), ((1872, 1948), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '(logging.CRITICAL if args.quiet else logging.DEBUG)'}), '(level=logging.CRITICAL if args.quiet else logging.DEBUG)\n', (1891, 1948), False, 'import logging\n'), ((2715, 2727), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2725, 2727), True, 'import matplotlib.pyplot as plt\n'), ((2775, 2830), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 't'], {'edgecolor': '"""b"""', 'color': '"""w"""', 'marker': '"""o"""'}), "(x, t, edgecolor='b', color='w', marker='o')\n", (2786, 2830), True, 'import matplotlib.pyplot as plt\n'), ((2835, 2853), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (2845, 2853), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2876), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (2868, 2876), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2897), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2890, 2897), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2916), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (2911, 2916), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4627), 'numpy.zeros', 'np.zeros', (['(plotx.shape[0], w.size)'], {}), '((plotx.shape[0], w.size))\n', (4601, 4627), True, 'import numpy as np\n'), ((4845, 4861), 'numpy.dot', 'np.dot', (['plotX', 'w'], {}), '(plotX, w)\n', (4851, 4861), True, 'import numpy as np\n'), ((4920, 4966), 'matplotlib.pyplot.plot', 'plt.plot', (['plotx', 'plott'], {'color': '"""r"""', 'linewidth': '(2)'}), "(plotx, plott, color='r', linewidth=2)\n", (4928, 4966), True, 'import matplotlib.pyplot as plt\n'), ((4972, 4986), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (4981, 4986), True, 'import matplotlib.pyplot as plt\n'), ((7826, 7865), 'numpy.zeros', 'np.zeros', (['(x.shape[0], model_order + 1)'], {}), '((x.shape[0], model_order + 1))\n', (7834, 7865), True, 'import numpy as np\n'), ((9603, 9654), 'numpy.genfromtxt', 'np.genfromtxt', (['data_path'], {'delimiter': '""","""', 'dtype': 'None'}), "(data_path, delimiter=',', dtype=None)\n", (9616, 9654), True, 'import numpy as np\n'), ((4718, 4736), 'numpy.power', 'np.power', (['plotx', 'k'], {}), '(plotx, k)\n', (4726, 4736), True, 'import numpy as np\n'), ((7999, 8013), 'numpy.power', 'np.power', (['x', 'k'], {}), '(x, k)\n', (8007, 8013), True, 'import numpy as np\n'), ((10810, 10843), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'fmt': '"""pdf"""'}), "(save_path, fmt='pdf')\n", (10821, 10843), True, 'import matplotlib.pyplot as plt\n'), ((10929, 10939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10937, 10939), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import awkward
from awkward import JaggedArray
#for later
#func = numbaize(formula,['p%i'%i for i in range(nParms)]+[varnames[i] for i in range(nEvalVars)])
def convert_jec_txt_file(jecFilePath):
jec_f = open(jecFilePath,'r')
layoutstr = jec_f.readline().strip().strip('{}')
jec_f.close()
name = jecFilePath.split('/')[-1].split('.')[0]
layout = layoutstr.split()
if not layout[0].isdigit():
raise Exception('First column of JEC descriptor must be a digit!')
#setup the file format
nBinnedVars = int(layout[0])
nBinColumns = 2*nBinnedVars
nEvalVars = int(layout[nBinnedVars+1])
formula = layout[nBinnedVars+nEvalVars+2]
nParms = 0
while( formula.count('[%i]'%nParms) ):
formula = formula.replace('[%i]'%nParms,'p%i'%nParms)
nParms += 1
#protect function names with vars in them
funcs_to_cap = ['max','exp']
for f in funcs_to_cap:
formula = formula.replace(f,f.upper())
templatevars = ['x','y','z','w','t','s']
varnames = [layout[i+nBinnedVars+2] for i in range(nEvalVars)]
for find,replace in zip(templatevars,varnames):
formula = formula.replace(find,replace)
#restore max
for f in funcs_to_cap:
formula = formula.replace(f.upper(),f)
nFuncColumns = 2*nEvalVars + nParms
nTotColumns = nFuncColumns + 1
#parse the columns
minMax = ['Min','Max']
columns = []
dtypes = []
offset = 1
for i in range(nBinnedVars):
columns.extend(['%s%s'%(layout[i+offset],mm) for mm in minMax])
dtypes.extend(['<f8','<f8'])
columns.append('NVars')
dtypes.append('<i8')
offset += nBinnedVars + 1
for i in range(nEvalVars):
columns.extend(['%s%s'%(layout[i+offset],mm) for mm in minMax])
dtypes.extend(['<f8','<f8'])
for i in range(nParms):
columns.append('p%i'%i)
dtypes.append('<f8')
pars = np.genfromtxt(jecFilePath,
dtype=tuple(dtypes),
names=tuple(columns),
skip_header=1,
unpack=True,
encoding='ascii'
)
#the first bin is always usual for JECs
#the next bins may vary in number, so they're jagged arrays... yay
bins = {}
offset_col = 0
offset_name = 1
bin_order = []
for i in range(nBinnedVars):
binMins = None
binMaxs = None
if i == 0:
binMins = np.unique(pars[columns[0]])
binMaxs = np.unique(pars[columns[1]])
bins[layout[i+offset_name]] = np.union1d(binMins,binMaxs)
else:
counts = np.zeros(0,dtype=np.int)
allBins = np.zeros(0,dtype=np.double)
for binMin in bins[bin_order[0]][:-1]:
binMins = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i+offset_col]])
binMaxs = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i+offset_col+1]])
theBins = np.union1d(binMins,binMaxs)
allBins = np.append(allBins,theBins)
counts = np.append(counts,theBins.size)
bins[layout[i+offset_name]] = JaggedArray.fromcounts(counts,allBins)
bin_order.append(layout[i+offset_name])
offset_col += 1
#skip nvars to the variable columns
#the columns here define clamps for the variables defined in columns[]
# ----> clamps can be different from bins
# ----> if there is more than one binning variable this array is jagged
# ----> just make it jagged all the time
binshapes = tuple([bins[thebin].size-1 for thebin in bin_order])
clamp_mins = {}
clamp_maxs = {}
var_order = []
offset_col = 2*nBinnedVars+1
offset_name = nBinnedVars + 2
jagged_counts = np.ones(bins[bin_order[0]].size-1,dtype=np.int)
if len(bin_order) > 1:
jagged_counts = np.maximum(bins[bin_order[1]].counts - 1,0) #need counts-1 since we only care about Nbins
for i in range(nEvalVars):
clamp_mins[layout[i+offset_name]] = JaggedArray.fromcounts(jagged_counts,np.atleast_1d(pars[columns[i+offset_col]]))
clamp_maxs[layout[i+offset_name]] = JaggedArray.fromcounts(jagged_counts,np.atleast_1d(pars[columns[i+offset_col+1]]))
var_order.append(layout[i+offset_name])
offset_col += 1
#now get the parameters, which we will look up with the clamps
parms = []
parm_order = []
offset_col = 2*nBinnedVars+1 + 2*nEvalVars
for i in range(nParms):
parms.append(JaggedArray.fromcounts(jagged_counts,pars[columns[i+offset_col]]))
parm_order.append('p%i'%(i))
wrapped_up = {}
wrapped_up[(name,'jet_energy_corrector')] = (formula,
(bins,bin_order),
(clamp_mins,clamp_maxs,var_order),
(parms,parm_order))
return wrapped_up
| [
"awkward.JaggedArray.fromcounts",
"numpy.unique",
"numpy.ones",
"numpy.union1d",
"numpy.where",
"numpy.append",
"numpy.zeros",
"numpy.maximum",
"numpy.atleast_1d"
] | [((3855, 3905), 'numpy.ones', 'np.ones', (['(bins[bin_order[0]].size - 1)'], {'dtype': 'np.int'}), '(bins[bin_order[0]].size - 1, dtype=np.int)\n', (3862, 3905), True, 'import numpy as np\n'), ((3954, 3998), 'numpy.maximum', 'np.maximum', (['(bins[bin_order[1]].counts - 1)', '(0)'], {}), '(bins[bin_order[1]].counts - 1, 0)\n', (3964, 3998), True, 'import numpy as np\n'), ((2523, 2550), 'numpy.unique', 'np.unique', (['pars[columns[0]]'], {}), '(pars[columns[0]])\n', (2532, 2550), True, 'import numpy as np\n'), ((2573, 2600), 'numpy.unique', 'np.unique', (['pars[columns[1]]'], {}), '(pars[columns[1]])\n', (2582, 2600), True, 'import numpy as np\n'), ((2643, 2671), 'numpy.union1d', 'np.union1d', (['binMins', 'binMaxs'], {}), '(binMins, binMaxs)\n', (2653, 2671), True, 'import numpy as np\n'), ((2706, 2731), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.int'}), '(0, dtype=np.int)\n', (2714, 2731), True, 'import numpy as np\n'), ((2753, 2781), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.double'}), '(0, dtype=np.double)\n', (2761, 2781), True, 'import numpy as np\n'), ((3246, 3285), 'awkward.JaggedArray.fromcounts', 'JaggedArray.fromcounts', (['counts', 'allBins'], {}), '(counts, allBins)\n', (3268, 3285), False, 'from awkward import JaggedArray\n'), ((4156, 4200), 'numpy.atleast_1d', 'np.atleast_1d', (['pars[columns[i + offset_col]]'], {}), '(pars[columns[i + offset_col]])\n', (4169, 4200), True, 'import numpy as np\n'), ((4281, 4329), 'numpy.atleast_1d', 'np.atleast_1d', (['pars[columns[i + offset_col + 1]]'], {}), '(pars[columns[i + offset_col + 1]])\n', (4294, 4329), True, 'import numpy as np\n'), ((4598, 4666), 'awkward.JaggedArray.fromcounts', 'JaggedArray.fromcounts', (['jagged_counts', 'pars[columns[i + offset_col]]'], {}), '(jagged_counts, pars[columns[i + offset_col]])\n', (4620, 4666), False, 'from awkward import JaggedArray\n'), ((3066, 3094), 'numpy.union1d', 'np.union1d', (['binMins', 'binMaxs'], {}), '(binMins, binMaxs)\n', (3076, 3094), True, 'import numpy as np\n'), ((3120, 3147), 'numpy.append', 'np.append', (['allBins', 'theBins'], {}), '(allBins, theBins)\n', (3129, 3147), True, 'import numpy as np\n'), ((3173, 3204), 'numpy.append', 'np.append', (['counts', 'theBins.size'], {}), '(counts, theBins.size)\n', (3182, 3204), True, 'import numpy as np\n'), ((2873, 2909), 'numpy.where', 'np.where', (['(pars[columns[0]] == binMin)'], {}), '(pars[columns[0]] == binMin)\n', (2881, 2909), True, 'import numpy as np\n'), ((2976, 3012), 'numpy.where', 'np.where', (['(pars[columns[0]] == binMin)'], {}), '(pars[columns[0]] == binMin)\n', (2984, 3012), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spin_basis_general
from quspin.basis.transformations import square_lattice_trans
from quspin.operators import hamiltonian
import numpy as np
from itertools import product
import os
def test(S,Lx,Ly):
N = Lx*Ly
nmax = int(eval("2*"+S))
sps = nmax+1
tr = square_lattice_trans(Lx,Ly)
basis_dict = {}
Nups=range(nmax*N+1)
for Nup in Nups:
basis_blocks=[]
pcon_basis = spin_basis_general(N,Nup=Nup,S=S)
Ns_block = 0
for blocks in tr.allowed_blocks_spin_inversion_iter(Nup,sps):
basis = spin_basis_general(N,Nup=Nup,S=S,**blocks)
Ns_block += basis.Ns
basis_blocks.append(basis)
try:
assert(Ns_block == pcon_basis.Ns)
except AssertionError:
print(Nup,Ns_block,pcon_basis.Ns)
raise AssertionError("reduced blocks don't sum to particle sector.")
basis_dict[Nup] = (pcon_basis,basis_blocks)
J = [[1.0,i,tr.T_x[i]] for i in range(N)]
J.extend([[1.0,i,tr.T_y[i]] for i in range(N)])
static = [["zz",J],["+-",J],["-+",J]]
E_symm = {}
for Nb,(pcon_basis,basis_blocks) in basis_dict.items():
H_pcon = hamiltonian(static,[],basis=pcon_basis,dtype=np.float64)
if H_pcon.Ns>0:
E_pcon = np.linalg.eigvalsh(H_pcon.todense())
else:
E_pcon = np.array([])
E_block = []
for basis in basis_blocks:
H = hamiltonian(static,[],basis=basis,dtype=np.complex128)
if H.Ns>0:
E_block.append(np.linalg.eigvalsh(H.todense()))
E_block = np.hstack(E_block)
E_block.sort()
np.testing.assert_allclose(E_pcon,E_block,atol=1e-13)
print("passed Nb={} sector".format(Nb))
test("1/2",3,3)
test("1",3,3)
test("1/2",3,2)
test("1",3,2)
| [
"sys.path.insert",
"quspin.basis.transformations.square_lattice_trans",
"numpy.hstack",
"numpy.testing.assert_allclose",
"quspin.operators.hamiltonian",
"os.getcwd",
"numpy.array",
"quspin.basis.spin_basis_general"
] | [((108, 138), 'sys.path.insert', 'sys.path.insert', (['(0)', 'qspin_path'], {}), '(0, qspin_path)\n', (123, 138), False, 'import sys, os\n'), ((89, 100), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (98, 100), False, 'import os\n'), ((425, 453), 'quspin.basis.transformations.square_lattice_trans', 'square_lattice_trans', (['Lx', 'Ly'], {}), '(Lx, Ly)\n', (445, 453), False, 'from quspin.basis.transformations import square_lattice_trans\n'), ((546, 581), 'quspin.basis.spin_basis_general', 'spin_basis_general', (['N'], {'Nup': 'Nup', 'S': 'S'}), '(N, Nup=Nup, S=S)\n', (564, 581), False, 'from quspin.basis import spin_basis_general\n'), ((1212, 1271), 'quspin.operators.hamiltonian', 'hamiltonian', (['static', '[]'], {'basis': 'pcon_basis', 'dtype': 'np.float64'}), '(static, [], basis=pcon_basis, dtype=np.float64)\n', (1223, 1271), False, 'from quspin.operators import hamiltonian\n'), ((1555, 1573), 'numpy.hstack', 'np.hstack', (['E_block'], {}), '(E_block)\n', (1564, 1573), True, 'import numpy as np\n'), ((1593, 1648), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['E_pcon', 'E_block'], {'atol': '(1e-13)'}), '(E_pcon, E_block, atol=1e-13)\n', (1619, 1648), True, 'import numpy as np\n'), ((671, 716), 'quspin.basis.spin_basis_general', 'spin_basis_general', (['N'], {'Nup': 'Nup', 'S': 'S'}), '(N, Nup=Nup, S=S, **blocks)\n', (689, 716), False, 'from quspin.basis import spin_basis_general\n'), ((1356, 1368), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1364, 1368), True, 'import numpy as np\n'), ((1421, 1478), 'quspin.operators.hamiltonian', 'hamiltonian', (['static', '[]'], {'basis': 'basis', 'dtype': 'np.complex128'}), '(static, [], basis=basis, dtype=np.complex128)\n', (1432, 1478), False, 'from quspin.operators import hamiltonian\n')] |
import pytest
import numpy as np
import torch
from torch import nn
import torch.optim as optim
import nics_fix_pt as nfp
# When module_cfg's nf_fix_paramparam is set , it means scale=-1, bitwidth=2, method=FIX_AUTO, see the default config in conftest module_cfg fixture.
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3},
{
"inputs": [1, 1, 0],
"data": [0.2513, -0.52, 0],
"out_scale": 1,
"result": 0,
"output": [0.5, -0.5, 0], # quantized parameters, step 0.5
},
),
(
{"input_num": 3},
{
"inputs": [1, 1, 0],
"data": [0.2513, -0.5, 0],
"out_scale": 0.5,
"result": -0.25,
"output": [0.25, -0.5, 0], # quantized parameters, step 0.25
},
),
],
indirect=["module_cfg"],
)
def test_fix_forward_auto(module_cfg, case):
module, cfg, _ = module_cfg
if "data" in case:
module.param[0, :] = torch.tensor(case["data"])
with torch.no_grad():
res = module.forward(torch.tensor(case["inputs"]).float())
assert np.isclose(res, case["result"]) # calc output
assert np.isclose(module.param, case["output"]).all() # quantized parameter
assert cfg["param"]["scale"] == case["out_scale"] # scale
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3},
{
"inputs": [[1, 1, 0], [1, 2, 0]],
"data": [0.2513, -0.52, 0],
"out_scale": 1,
"result": [[0], [-0.5]],
"output": [0.5, -0.5, 0], # quantized parameters, step 0.5
},
),
(
{"input_num": 3},
{
"inputs": [[1, 1, 0], [1, 1, 0]],
"data": [0.2513, -0.52, 0],
"out_scale": 1,
"result": [[0], [0]],
"output": [0.5, -0.5, 0], # quantized parameters, step 0.5
},
),
(
{"input_num": 3},
{
"inputs": [[1, 1, 0], [1, 1, 0]],
"data": [0.2513, -0.5, 0],
"out_scale": 0.5,
"result": [[-0.25], [-0.25]],
"output": [0.25, -0.5, 0], # quantized parameters, step 0.25
},
),
],
indirect=["module_cfg"],
)
def test_fix_forward_parallel_gpu(module_cfg, case):
module, cfg, _ = module_cfg
if "data" in case:
module.param.data[0, :] = torch.tensor(case["data"])
model = nn.DataParallel(module.cuda(), [0, 1])
with torch.no_grad():
res = model(torch.tensor(case["inputs"]).float().cuda())
assert cfg["param"]["scale"] == case["out_scale"] # scale
assert np.isclose(res.cpu(), case["result"]).all() # calc output
# assert np.isclose(module.param.cpu(), case["output"]).all() # quantized parameter
# this will not change,
# but the gradient will still be accumulated in module_parameters[name].grad
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [0.52, -0.27, 0],
"data": [0, 0, 0],
"grad_scale": 1,
"output": [0.5, -0.5, 0],
},
),
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [0.5, -0.27, 0],
"data": [0, 0, 0],
"grad_scale": 0.5,
"output": [0.5, -0.25, 0], # quantized gradients
},
),
],
indirect=["module_cfg"],
)
def test_fix_backward_auto(module_cfg, case):
module, _, cfg = module_cfg
if "data" in case:
module.param.data[0, :] = torch.tensor(case["data"])
res = module.forward(torch.tensor(case["inputs"]).float())
res.backward()
assert np.isclose(
module._parameters["param"].grad, case["output"]
).all() # quantized gradient
assert cfg["param"]["scale"] == case["grad_scale"] # scale
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3, "data_cfg": {"method": nfp.FIX_NONE},
"grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [[0.52, -0.27, 0], [0.52, -0.27, 0]],
"data": [0, 0, 0],
"grad_scale": 1,
"output": [0.5, -0.5, 0],
},
),
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [[0.5, -0.27, 0], [0.5, -0.27, 0]],
"data": [0, 0, 0],
"grad_scale": 0.5,
"output": [0.5, -0.25, 0], # quantized gradients
},
),
],
indirect=["module_cfg"],
)
def test_fix_backward_parallel_gpu(module_cfg, case):
module, _, cfg = module_cfg
if "data" in case:
module.param.data[0, :] = torch.tensor(case["data"])
model = nn.DataParallel(module.cuda(), [0, 1])
res = torch.sum(model(torch.tensor(case["inputs"]).float().cuda()))
res.backward()
assert np.isclose(
module._parameters["param"].grad.cpu(), 2 * np.array(case["output"])
).all() # quantized gradient, 2 batch, grad x 2
assert cfg["param"]["scale"] == case["grad_scale"] # scale
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [0.52, -0.27, 0],
"data": [0, 0, 0],
"grad_scale": 1,
"output": [0.5, -0.5, 0],
},
),
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [0.5, -0.27, 0],
"data": [0, 0, 0],
"grad_scale": 0.5,
"output": [0.5, -0.25, 0], # quantized gradients
},
),
],
indirect=["module_cfg"],
)
def test_fix_update_auto(module_cfg, case):
module, _, cfg = module_cfg
if "data" in case:
module.param.data[0, :] = torch.tensor(case["data"])
optimizer = optim.SGD(module.parameters(), lr=1.0, momentum=0)
res = module.forward(torch.tensor(case["inputs"]).float())
res.backward()
optimizer.step()
assert np.isclose(
-module._parameters["param"].detach(), case["output"]
).all() # updated parameter should be - lr * gradient
assert cfg["param"]["scale"] == case["grad_scale"] # scale
def test_ConvBN_fix():
from nics_fix_pt.nn_fix import ConvBN_fix
# float forward and combine forward get the same results
module = ConvBN_fix(3, 32, nf_fix_params={}).cuda()
module.train()
data = torch.tensor(np.random.rand(128, 3, 32, 32).astype(np.float32)).cuda()
comb_out = module(data)
float_out = module.bn(module.conv(data))
assert (float_out - comb_out < 1e-3).all()
module.eval()
module = ConvBN_fix(3, 32, nf_fix_params={}).cuda()
data = torch.tensor(np.random.rand(128, 3, 32, 32).astype(np.float32)).cuda()
comb_out = module(data)
float_out = module.bn(module.conv(data))
assert (float_out - comb_out < 1e-3).all()
| [
"numpy.isclose",
"numpy.random.rand",
"nics_fix_pt.nn_fix.ConvBN_fix",
"pytest.mark.parametrize",
"torch.tensor",
"numpy.array",
"torch.no_grad"
] | [((275, 619), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module_cfg, case"""', "[({'input_num': 3}, {'inputs': [1, 1, 0], 'data': [0.2513, -0.52, 0],\n 'out_scale': 1, 'result': 0, 'output': [0.5, -0.5, 0]}), ({'input_num':\n 3}, {'inputs': [1, 1, 0], 'data': [0.2513, -0.5, 0], 'out_scale': 0.5,\n 'result': -0.25, 'output': [0.25, -0.5, 0]})]"], {'indirect': "['module_cfg']"}), "('module_cfg, case', [({'input_num': 3}, {'inputs':\n [1, 1, 0], 'data': [0.2513, -0.52, 0], 'out_scale': 1, 'result': 0,\n 'output': [0.5, -0.5, 0]}), ({'input_num': 3}, {'inputs': [1, 1, 0],\n 'data': [0.2513, -0.5, 0], 'out_scale': 0.5, 'result': -0.25, 'output':\n [0.25, -0.5, 0]})], indirect=['module_cfg'])\n", (298, 619), False, 'import pytest\n'), ((1436, 1994), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module_cfg, case"""', "[({'input_num': 3}, {'inputs': [[1, 1, 0], [1, 2, 0]], 'data': [0.2513, -\n 0.52, 0], 'out_scale': 1, 'result': [[0], [-0.5]], 'output': [0.5, -0.5,\n 0]}), ({'input_num': 3}, {'inputs': [[1, 1, 0], [1, 1, 0]], 'data': [\n 0.2513, -0.52, 0], 'out_scale': 1, 'result': [[0], [0]], 'output': [0.5,\n -0.5, 0]}), ({'input_num': 3}, {'inputs': [[1, 1, 0], [1, 1, 0]],\n 'data': [0.2513, -0.5, 0], 'out_scale': 0.5, 'result': [[-0.25], [-0.25\n ]], 'output': [0.25, -0.5, 0]})]"], {'indirect': "['module_cfg']"}), "('module_cfg, case', [({'input_num': 3}, {'inputs':\n [[1, 1, 0], [1, 2, 0]], 'data': [0.2513, -0.52, 0], 'out_scale': 1,\n 'result': [[0], [-0.5]], 'output': [0.5, -0.5, 0]}), ({'input_num': 3},\n {'inputs': [[1, 1, 0], [1, 1, 0]], 'data': [0.2513, -0.52, 0],\n 'out_scale': 1, 'result': [[0], [0]], 'output': [0.5, -0.5, 0]}), ({\n 'input_num': 3}, {'inputs': [[1, 1, 0], [1, 1, 0]], 'data': [0.2513, -\n 0.5, 0], 'out_scale': 0.5, 'result': [[-0.25], [-0.25]], 'output': [\n 0.25, -0.5, 0]})], indirect=['module_cfg'])\n", (1459, 1994), False, 'import pytest\n'), ((3167, 3560), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module_cfg, case"""', "[({'input_num': 3, 'grad_cfg': {'method': nfp.FIX_AUTO}}, {'inputs': [0.52,\n -0.27, 0], 'data': [0, 0, 0], 'grad_scale': 1, 'output': [0.5, -0.5, 0]\n }), ({'input_num': 3, 'grad_cfg': {'method': nfp.FIX_AUTO}}, {'inputs':\n [0.5, -0.27, 0], 'data': [0, 0, 0], 'grad_scale': 0.5, 'output': [0.5, \n -0.25, 0]})]"], {'indirect': "['module_cfg']"}), "('module_cfg, case', [({'input_num': 3, 'grad_cfg':\n {'method': nfp.FIX_AUTO}}, {'inputs': [0.52, -0.27, 0], 'data': [0, 0, \n 0], 'grad_scale': 1, 'output': [0.5, -0.5, 0]}), ({'input_num': 3,\n 'grad_cfg': {'method': nfp.FIX_AUTO}}, {'inputs': [0.5, -0.27, 0],\n 'data': [0, 0, 0], 'grad_scale': 0.5, 'output': [0.5, -0.25, 0]})],\n indirect=['module_cfg'])\n", (3190, 3560), False, 'import pytest\n'), ((4253, 4729), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module_cfg, case"""', "[({'input_num': 3, 'data_cfg': {'method': nfp.FIX_NONE}, 'grad_cfg': {\n 'method': nfp.FIX_AUTO}}, {'inputs': [[0.52, -0.27, 0], [0.52, -0.27, 0\n ]], 'data': [0, 0, 0], 'grad_scale': 1, 'output': [0.5, -0.5, 0]}), ({\n 'input_num': 3, 'grad_cfg': {'method': nfp.FIX_AUTO}}, {'inputs': [[0.5,\n -0.27, 0], [0.5, -0.27, 0]], 'data': [0, 0, 0], 'grad_scale': 0.5,\n 'output': [0.5, -0.25, 0]})]"], {'indirect': "['module_cfg']"}), "('module_cfg, case', [({'input_num': 3, 'data_cfg':\n {'method': nfp.FIX_NONE}, 'grad_cfg': {'method': nfp.FIX_AUTO}}, {\n 'inputs': [[0.52, -0.27, 0], [0.52, -0.27, 0]], 'data': [0, 0, 0],\n 'grad_scale': 1, 'output': [0.5, -0.5, 0]}), ({'input_num': 3,\n 'grad_cfg': {'method': nfp.FIX_AUTO}}, {'inputs': [[0.5, -0.27, 0], [\n 0.5, -0.27, 0]], 'data': [0, 0, 0], 'grad_scale': 0.5, 'output': [0.5, \n -0.25, 0]})], indirect=['module_cfg'])\n", (4276, 4729), False, 'import pytest\n'), ((5536, 5929), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module_cfg, case"""', "[({'input_num': 3, 'grad_cfg': {'method': nfp.FIX_AUTO}}, {'inputs': [0.52,\n -0.27, 0], 'data': [0, 0, 0], 'grad_scale': 1, 'output': [0.5, -0.5, 0]\n }), ({'input_num': 3, 'grad_cfg': {'method': nfp.FIX_AUTO}}, {'inputs':\n [0.5, -0.27, 0], 'data': [0, 0, 0], 'grad_scale': 0.5, 'output': [0.5, \n -0.25, 0]})]"], {'indirect': "['module_cfg']"}), "('module_cfg, case', [({'input_num': 3, 'grad_cfg':\n {'method': nfp.FIX_AUTO}}, {'inputs': [0.52, -0.27, 0], 'data': [0, 0, \n 0], 'grad_scale': 1, 'output': [0.5, -0.5, 0]}), ({'input_num': 3,\n 'grad_cfg': {'method': nfp.FIX_AUTO}}, {'inputs': [0.5, -0.27, 0],\n 'data': [0, 0, 0], 'grad_scale': 0.5, 'output': [0.5, -0.25, 0]})],\n indirect=['module_cfg'])\n", (5559, 5929), False, 'import pytest\n'), ((1100, 1126), 'torch.tensor', 'torch.tensor', (["case['data']"], {}), "(case['data'])\n", (1112, 1126), False, 'import torch\n'), ((1136, 1151), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1149, 1151), False, 'import torch\n'), ((1235, 1266), 'numpy.isclose', 'np.isclose', (['res', "case['result']"], {}), "(res, case['result'])\n", (1245, 1266), True, 'import numpy as np\n'), ((2645, 2671), 'torch.tensor', 'torch.tensor', (["case['data']"], {}), "(case['data'])\n", (2657, 2671), False, 'import torch\n'), ((2732, 2747), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2745, 2747), False, 'import torch\n'), ((3964, 3990), 'torch.tensor', 'torch.tensor', (["case['data']"], {}), "(case['data'])\n", (3976, 3990), False, 'import torch\n'), ((5148, 5174), 'torch.tensor', 'torch.tensor', (["case['data']"], {}), "(case['data'])\n", (5160, 5174), False, 'import torch\n'), ((6331, 6357), 'torch.tensor', 'torch.tensor', (["case['data']"], {}), "(case['data'])\n", (6343, 6357), False, 'import torch\n'), ((4084, 4144), 'numpy.isclose', 'np.isclose', (["module._parameters['param'].grad", "case['output']"], {}), "(module._parameters['param'].grad, case['output'])\n", (4094, 4144), True, 'import numpy as np\n'), ((6880, 6915), 'nics_fix_pt.nn_fix.ConvBN_fix', 'ConvBN_fix', (['(3)', '(32)'], {'nf_fix_params': '{}'}), '(3, 32, nf_fix_params={})\n', (6890, 6915), False, 'from nics_fix_pt.nn_fix import ConvBN_fix\n'), ((7176, 7211), 'nics_fix_pt.nn_fix.ConvBN_fix', 'ConvBN_fix', (['(3)', '(32)'], {'nf_fix_params': '{}'}), '(3, 32, nf_fix_params={})\n', (7186, 7211), False, 'from nics_fix_pt.nn_fix import ConvBN_fix\n'), ((1297, 1337), 'numpy.isclose', 'np.isclose', (['module.param', "case['output']"], {}), "(module.param, case['output'])\n", (1307, 1337), True, 'import numpy as np\n'), ((4016, 4044), 'torch.tensor', 'torch.tensor', (["case['inputs']"], {}), "(case['inputs'])\n", (4028, 4044), False, 'import torch\n'), ((6450, 6478), 'torch.tensor', 'torch.tensor', (["case['inputs']"], {}), "(case['inputs'])\n", (6462, 6478), False, 'import torch\n'), ((1182, 1210), 'torch.tensor', 'torch.tensor', (["case['inputs']"], {}), "(case['inputs'])\n", (1194, 1210), False, 'import torch\n'), ((5392, 5416), 'numpy.array', 'np.array', (["case['output']"], {}), "(case['output'])\n", (5400, 5416), True, 'import numpy as np\n'), ((6966, 6996), 'numpy.random.rand', 'np.random.rand', (['(128)', '(3)', '(32)', '(32)'], {}), '(128, 3, 32, 32)\n', (6980, 6996), True, 'import numpy as np\n'), ((7243, 7273), 'numpy.random.rand', 'np.random.rand', (['(128)', '(3)', '(32)', '(32)'], {}), '(128, 3, 32, 32)\n', (7257, 7273), True, 'import numpy as np\n'), ((2769, 2797), 'torch.tensor', 'torch.tensor', (["case['inputs']"], {}), "(case['inputs'])\n", (2781, 2797), False, 'import torch\n'), ((5252, 5280), 'torch.tensor', 'torch.tensor', (["case['inputs']"], {}), "(case['inputs'])\n", (5264, 5280), False, 'import torch\n')] |
import sys
import math
import numpy
import random
import pygame
from scipy.spatial import distance
from shapely.geometry import box, Polygon
BLUE = (0, 0, 230)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLACK = (0, 0, 0)
SIZE = (1500, 800)
width = 50
height = 50
pygame.init()
screen = pygame.display.set_mode(SIZE)
class robot():
def __init__(self, x, y,end_x, end_y, vel, num_targets):
self.x = x
self.y = y
self.vel = vel
self.end_x = end_x
self.end_y = end_y
self.angle = 0
self.path = [[], []]
self.targets = []
self.detected = []
for t in range(num_targets):
x = random.randint(100, 1400)
y = random.randint(100, 700)
self.targets.append([x, y])
def adjust_angle(self):
while self.angle > 360:
self.angle -= 360
while self.angle < 0:
self.angle += 360
def directions(action, rot=0.4):
if action == "ahead":
zed.x += zed.vel*math.cos(zed.angle*3.14/180)
zed.y -= zed.vel*math.sin(zed.angle*3.14/180)
zed.path[0].append([zed.x + 25, zed.y + 25])
zed.path[1].append([zed.angle])
if action == "back":
zed.x -= zed.vel*math.cos(zed.angle*3.14/180)
zed.y += zed.vel*math.sin(zed.angle*3.14/180)
zed.path[0].append([zed.x + 25, zed.y + 25])
zed.path[1].append([zed.angle])
if action == "turn_r":
zed.angle -= rot
zed.path[0].append([zed.x + 25, zed.y + 25])
zed.path[1].append([zed.angle])
if action == "turn_l":
zed.angle += rot
zed.path[0].append([zed.x + 25, zed.y + 25])
zed.path[1].append([zed.angle])
def move_robot(zed, mode='ctrl'):
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
directions("turn_l")
if keys[pygame.K_RIGHT]:
directions("turn_r")
if keys[pygame.K_UP]:
directions("ahead")
if keys[pygame.K_DOWN]:
directions("back")
if mode == 'auto':
if zed.detected == []:
# directions("turn_l")
zed.detected = [[zed.end_x, zed.end_y]]
else:
x_target = zed.detected[0][0]
y_target = zed.detected[0][1]
x_zed = (zed.x + 25)
y_zed = (zed.y + 25)
x_dif = x_target - x_zed
y_dif = y_zed - y_target
angle = (180/math.pi) * numpy.arctan(y_dif/x_dif)
angle = round(angle, 2)
sig_x = x_dif/abs(x_dif)
sig_y = y_dif/abs(y_dif)
if (sig_x == -1 and sig_y == -1) or (sig_x == -1 and sig_y == 1):
angle += 180
else:
angle += sig_x*180 + sig_y*(-180)
rot = (angle - zed.angle)
if zed.angle > 270 and angle < zed.angle - 180:
rot = angle + (360 - zed.angle)
if zed.angle < angle - 180 and angle > 270:
rot = -zed.angle -(360 - angle)
rot = rot/30
directions("turn_l", rot)
directions("ahead")
def draw_robot(zed, screen):
x = zed.x
y = zed.y
angle = zed.angle
icon_rect = pygame.draw.rect(screen, BLACK, (x, y, width, height))
centro = (x+25, y+25)
theta = numpy.radians(angle)
c, s = numpy.cos(theta), numpy.sin(theta)
r_mat = [[c,-s], [s, c]]
base1 = (200, 571)
base2 = (200, -571)
rotated1 = numpy.dot(base1, r_mat)
rotated2 = numpy.dot(base2, r_mat)
l1e = (centro[0] + rotated1[0], centro[1] + rotated1[1])
l2e = (centro[0] + rotated2[0], centro[1] + rotated2[1])
pygame.draw.line(screen, BLUE, centro, l1e)
pygame.draw.line(screen, BLUE, centro, l2e)
pygame.draw.line(screen, BLUE, l1e, l2e)
icon = pygame.image.load("icon.jpg")
icon = pygame.transform.scale(icon, (width, height))
icon = pygame.transform.rotate(icon, angle)
screen.blit(icon, icon_rect)
for target in zed.targets:
if [target[0], target[1]] in zed.detected:
pygame.draw.rect(screen, RED, [target[0], target[1], 25, 25])
continue
pygame.draw.rect(screen, BLUE, [target[0], target[1], 25, 25])
if len(zed.path[0]) > 2:
pygame.draw.lines(screen, GREEN, False, zed.path[0])
return [centro, l1e, l2e]
def detect_collision(zed, points):
vision_field = Polygon(points)
for target in zed.targets:
gridcell_shape = box(target[0], target[1]+25, target[0]+25, target[1])
if vision_field.intersection(gridcell_shape).area != 0 :
if not [target[0], target[1]] in zed.detected:
zed.detected.append([target[0], target[1]])
for i in range(len(zed.targets)):
point = [zed.targets[i][0], zed.targets[i][1]]
if distance.euclidean([zed.x + 25, zed.y + 25], point) < 30:
if point in zed.detected:
zed.detected.pop(zed.detected.index(point))
zed.targets.pop(i)
break
def detect_obstacles():
#ver o objeto e identificar se é lixo ou não (isso vai retornar o retangulo do objeto)
#criar um novo caminho para contornar o obstáculo
#a partir do objeto identificado
#serão criados dois pontos perto dos limites visualizados
#o novo "próximo" ponto vai ser o ponto de extremidade mais pŕoximo do próximo alvo
pass
def define_path(zed, screen):
points = zed.detected
ref = [zed.x + 25, zed.y + 25]
path = []
aux = 0
if [zed.end_x, zed.end_y] in zed.detected and len(zed.detected) > 1:
points.pop(points.index([zed.end_x, zed.end_y]))
for k in range(len(points)):
minimum = 10000
for point in points:
dist = distance.euclidean(ref, point)
if ref != point:
if dist < minimum:
if point not in path:
minimum = dist
aux = point
ref = aux
path.append(ref)
zed.detected = path
if len(zed.detected) > 1:
pygame.draw.lines(screen, RED, False, zed.detected)
a = 2
while a != 1:
for event in pygame.event.get():
if a == 0 and event.type == pygame.MOUSEBUTTONDOWN:
ex, ey = pygame.mouse.get_pos()
a = 1
break
if event.type == pygame.MOUSEBUTTONDOWN:
bx, by = pygame.mouse.get_pos()
a = 0
zed = robot(bx, by, ex, ey, 1, 20)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.fill(BLACK)
if len(zed.detected) > 0:
define_path(zed, screen)
zed.adjust_angle()
move_robot(zed, "auto")
detect_collision(zed, draw_robot(zed, screen))
pygame.display.update() | [
"numpy.radians",
"pygame.init",
"shapely.geometry.box",
"math.cos",
"shapely.geometry.Polygon",
"sys.exit",
"numpy.sin",
"pygame.transform.scale",
"pygame.display.set_mode",
"pygame.mouse.get_pos",
"numpy.dot",
"pygame.draw.rect",
"pygame.image.load",
"pygame.display.update",
"random.ran... | [((261, 274), 'pygame.init', 'pygame.init', ([], {}), '()\n', (272, 274), False, 'import pygame\n'), ((284, 313), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SIZE'], {}), '(SIZE)\n', (307, 313), False, 'import pygame\n'), ((1764, 1788), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (1786, 1788), False, 'import pygame\n'), ((3194, 3248), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', '(x, y, width, height)'], {}), '(screen, BLACK, (x, y, width, height))\n', (3210, 3248), False, 'import pygame\n'), ((3289, 3309), 'numpy.radians', 'numpy.radians', (['angle'], {}), '(angle)\n', (3302, 3309), False, 'import numpy\n'), ((3448, 3471), 'numpy.dot', 'numpy.dot', (['base1', 'r_mat'], {}), '(base1, r_mat)\n', (3457, 3471), False, 'import numpy\n'), ((3487, 3510), 'numpy.dot', 'numpy.dot', (['base2', 'r_mat'], {}), '(base2, r_mat)\n', (3496, 3510), False, 'import numpy\n'), ((3639, 3682), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'BLUE', 'centro', 'l1e'], {}), '(screen, BLUE, centro, l1e)\n', (3655, 3682), False, 'import pygame\n'), ((3687, 3730), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'BLUE', 'centro', 'l2e'], {}), '(screen, BLUE, centro, l2e)\n', (3703, 3730), False, 'import pygame\n'), ((3735, 3775), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'BLUE', 'l1e', 'l2e'], {}), '(screen, BLUE, l1e, l2e)\n', (3751, 3775), False, 'import pygame\n'), ((3788, 3817), 'pygame.image.load', 'pygame.image.load', (['"""icon.jpg"""'], {}), "('icon.jpg')\n", (3805, 3817), False, 'import pygame\n'), ((3829, 3874), 'pygame.transform.scale', 'pygame.transform.scale', (['icon', '(width, height)'], {}), '(icon, (width, height))\n', (3851, 3874), False, 'import pygame\n'), ((3886, 3922), 'pygame.transform.rotate', 'pygame.transform.rotate', (['icon', 'angle'], {}), '(icon, angle)\n', (3909, 3922), False, 'import pygame\n'), ((4383, 4398), 'shapely.geometry.Polygon', 'Polygon', (['points'], {}), '(points)\n', (4390, 4398), False, 'from shapely.geometry import box, Polygon\n'), ((6168, 6186), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6184, 6186), False, 'import pygame\n'), ((6506, 6524), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6522, 6524), False, 'import pygame\n'), ((6783, 6806), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6804, 6806), False, 'import pygame\n'), ((3321, 3337), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (3330, 3337), False, 'import numpy\n'), ((3339, 3355), 'numpy.sin', 'numpy.sin', (['theta'], {}), '(theta)\n', (3348, 3355), False, 'import numpy\n'), ((4143, 4205), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLUE', '[target[0], target[1], 25, 25]'], {}), '(screen, BLUE, [target[0], target[1], 25, 25])\n', (4159, 4205), False, 'import pygame\n'), ((4244, 4296), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', 'GREEN', '(False)', 'zed.path[0]'], {}), '(screen, GREEN, False, zed.path[0])\n', (4261, 4296), False, 'import pygame\n'), ((4456, 4513), 'shapely.geometry.box', 'box', (['target[0]', '(target[1] + 25)', '(target[0] + 25)', 'target[1]'], {}), '(target[0], target[1] + 25, target[0] + 25, target[1])\n', (4459, 4513), False, 'from shapely.geometry import box, Polygon\n'), ((6074, 6125), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', 'RED', '(False)', 'zed.detected'], {}), '(screen, RED, False, zed.detected)\n', (6091, 6125), False, 'import pygame\n'), ((674, 699), 'random.randint', 'random.randint', (['(100)', '(1400)'], {}), '(100, 1400)\n', (688, 699), False, 'import random\n'), ((716, 740), 'random.randint', 'random.randint', (['(100)', '(700)'], {}), '(100, 700)\n', (730, 740), False, 'import random\n'), ((1019, 1051), 'math.cos', 'math.cos', (['(zed.angle * 3.14 / 180)'], {}), '(zed.angle * 3.14 / 180)\n', (1027, 1051), False, 'import math\n'), ((1073, 1105), 'math.sin', 'math.sin', (['(zed.angle * 3.14 / 180)'], {}), '(zed.angle * 3.14 / 180)\n', (1081, 1105), False, 'import math\n'), ((1246, 1278), 'math.cos', 'math.cos', (['(zed.angle * 3.14 / 180)'], {}), '(zed.angle * 3.14 / 180)\n', (1254, 1278), False, 'import math\n'), ((1300, 1332), 'math.sin', 'math.sin', (['(zed.angle * 3.14 / 180)'], {}), '(zed.angle * 3.14 / 180)\n', (1308, 1332), False, 'import math\n'), ((4052, 4113), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'RED', '[target[0], target[1], 25, 25]'], {}), '(screen, RED, [target[0], target[1], 25, 25])\n', (4068, 4113), False, 'import pygame\n'), ((4808, 4859), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['[zed.x + 25, zed.y + 25]', 'point'], {}), '([zed.x + 25, zed.y + 25], point)\n', (4826, 4859), False, 'from scipy.spatial import distance\n'), ((5746, 5776), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['ref', 'point'], {}), '(ref, point)\n', (5764, 5776), False, 'from scipy.spatial import distance\n'), ((6269, 6291), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (6289, 6291), False, 'import pygame\n'), ((6399, 6421), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (6419, 6421), False, 'import pygame\n'), ((6576, 6586), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6584, 6586), False, 'import sys\n'), ((2435, 2462), 'numpy.arctan', 'numpy.arctan', (['(y_dif / x_dif)'], {}), '(y_dif / x_dif)\n', (2447, 2462), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""Implementations of covariance functions for use with :mod:`moe.optimal_learning.python.python_version.log_likelihood` and :mod:`moe.optimal_learning.python.python_version.gaussian_process`.
This file contains implementations of CovarianceInterface. Currently, we have
SquareExponential, supporting:
* covariance
* grad_covariance
* hyperparameter_grad_covariance
It also contains a few utilities for computing common mathematical quantities and
initialization. Note that the hessian is not yet implemented (use C++ for that feature).
Gradient (spatial and hyperparameter) functions return all derivatives at once
because there is substantial shared computation. The shared results are by far the
most expensive part of gradient computations; they typically involve exponentiation
and are further at least partially shared with the base covariance computation. In
fact, we could improve performance further by caching [certain] components of the
covariance computation for use with the derivative computations.
"""
import numpy
from moe.optimal_learning.python.constant import SQUARE_EXPONENTIAL_COVARIANCE_TYPE
from moe.optimal_learning.python.interfaces.covariance_interface import CovarianceInterface
class SquareExponential(CovarianceInterface):
r"""Implement the square exponential covariance function.
.. Note:: comments are copied from :class:`moe.optimal_learning.python.cpp_wrappers.covariance.SquareExponential`.
The function:
``cov(x_1, x_2) = \alpha * \exp(-1/2 * ((x_1 - x_2)^T * L * (x_1 - x_2)) )``
where L is the diagonal matrix with i-th diagonal entry ``1/lengths[i]/lengths[i]``
This covariance object has ``dim+1`` hyperparameters: ``\alpha, lengths_i``
"""
covariance_type = SQUARE_EXPONENTIAL_COVARIANCE_TYPE
def __init__(self, hyperparameters):
r"""Construct a square exponential covariance object with the specified hyperparameters.
:param hyperparameters: hyperparameters of the covariance function; index 0 is \alpha (signal variance, \sigma_f^2)
and index 1..dim are the per-dimension length scales.
:type hyperparameters: array-like of size dim+1
"""
self.hyperparameters = hyperparameters
@property
def num_hyperparameters(self):
"""Return the number of hyperparameters of this covariance function."""
return self._hyperparameters.size
def get_hyperparameters(self):
"""Get the hyperparameters (array of float64 with shape (num_hyperparameters)) of this covariance."""
return numpy.copy(self._hyperparameters)
def set_hyperparameters(self, hyperparameters):
"""Set hyperparameters to the specified hyperparameters; ordering must match."""
self._hyperparameters = numpy.copy(hyperparameters)
self._lengths_sq = numpy.copy(self._hyperparameters[1:])
self._lengths_sq *= self._lengths_sq
hyperparameters = property(get_hyperparameters, set_hyperparameters)
def get_json_serializable_info(self):
"""Create and return a covariance_info dictionary of this covariance object."""
return {
'covariance_type': self.covariance_type,
'hyperparameters': self.hyperparameters.tolist(),
}
def covariance(self, point_one, point_two):
r"""Compute the square exponential covariance function of two points, cov(``point_one``, ``point_two``).
Square Exponential: ``cov(x_1, x_2) = \alpha * \exp(-1/2 * ((x_1 - x_2)^T * L * (x_1 - x_2)) )``
.. Note:: comments are copied from the matching method comments of
:class:`moe.optimal_learning.python.interfaces.covariance_interface.CovarianceInterface`.
The covariance function is guaranteed to be symmetric by definition: ``covariance(x, y) = covariance(y, x)``.
This function is also positive definite by definition.
:param point_one: first input, the point ``x``
:type point_one: array of float64 with shape (dim)
:param point_two: second input, the point ``y``
:type point_two: array of float64 with shape (dim)
:return: value of covariance between the input points
:rtype: float64
"""
temp = point_two - point_one
temp *= temp
temp /= self._lengths_sq
return self._hyperparameters[0] * numpy.exp(-0.5 * temp.sum())
def grad_covariance(self, point_one, point_two):
r"""Compute the gradient of self.covariance(point_one, point_two) with respect to the FIRST argument, point_one.
Gradient of Square Exponential (wrt ``x_1``):
``\pderiv{cov(x_1, x_2)}{x_{1,i}} = (x_{2,i} - x_{1,i}) / L_{i}^2 * cov(x_1, x_2)``
.. Note:: comments are copied from the matching method comments of
:class:`moe.optimal_learning.python.interfaces.covariance_interface.CovarianceInterface`.
This distinction is important for maintaining the desired symmetry. ``Cov(x, y) = Cov(y, x)``.
Additionally, ``\pderiv{Cov(x, y)}{x} = \pderiv{Cov(y, x)}{x}``.
However, in general, ``\pderiv{Cov(x, y)}{x} != \pderiv{Cov(y, x)}{y}`` (NOT equal! These may differ by a negative sign)
Hence to avoid separate implementations for differentiating against first vs second argument, this function only handles
differentiation against the first argument. If you need ``\pderiv{Cov(y, x)}{x}``, just swap points x and y.
:param point_one: first input, the point ``x``
:type point_one: array of float64 with shape (dim)
:param point_two: second input, the point ``y``
:type point_two: array of float64 with shape (dim)
:return: grad_cov: i-th entry is ``\pderiv{cov(x_1, x_2)}{x_i}``
:rtype: array of float64 with shape (dim)
"""
grad_cov = point_two - point_one
grad_cov /= self._lengths_sq
grad_cov *= self.covariance(point_one, point_two)
return grad_cov
def hyperparameter_grad_covariance(self, point_one, point_two):
r"""Compute the gradient of self.covariance(point_one, point_two) with respect to its hyperparameters.
Gradient of Square Exponential (wrt hyperparameters (``alpha, L``)):
``\pderiv{cov(x_1, x_2)}{\theta_0} = cov(x_1, x_2) / \theta_0``
``\pderiv{cov(x_1, x_2)}{\theta_0} = [(x_{1,i} - x_{2,i}) / L_i]^2 / L_i * cov(x_1, x_2)``
Note: ``\theta_0 = \alpha`` and ``\theta_{1:d} = L_{0:d-1}``
.. Note:: comments are copied from the matching method comments of
:class:`moe.optimal_learning.python.interfaces.covariance_interface.CovarianceInterface`.
Unlike GradCovariance(), the order of point_one and point_two is irrelevant here (since we are not differentiating against
either of them). Thus the matrix of grad covariances (wrt hyperparameters) is symmetric.
:param point_one: first input, the point ``x``
:type point_one: array of float64 with shape (dim)
:param point_two: second input, the point ``y``
:type point_two: array of float64 with shape (dim)
:return: grad_hyperparameter_cov: i-th entry is ``\pderiv{cov(x_1, x_2)}{\theta_i}``
:rtype: array of float64 with shape (num_hyperparameters)
"""
cov = self.covariance(point_one, point_two)
grad_cov = numpy.empty(self.num_hyperparameters)
grad_cov[0] = cov / self._hyperparameters[0]
lengths = self._hyperparameters[1:]
grad_cov_lengths = grad_cov[1:]
numpy.subtract(point_two, point_one, out=grad_cov_lengths)
grad_cov_lengths /= lengths
grad_cov_lengths *= grad_cov_lengths
grad_cov_lengths /= lengths
grad_cov_lengths *= cov
return grad_cov
def hyperparameter_hessian_covariance(self, point_one, point_two):
r"""Compute the hessian of self.covariance(point_one, point_two) with respect to its hyperparameters.
TODO(GH-57): Implement Hessians in Python.
"""
raise NotImplementedError("Python implementation does not support computing the hessian covariance wrt hyperparameters.")
| [
"numpy.copy",
"numpy.empty",
"numpy.subtract"
] | [((2580, 2613), 'numpy.copy', 'numpy.copy', (['self._hyperparameters'], {}), '(self._hyperparameters)\n', (2590, 2613), False, 'import numpy\n'), ((2788, 2815), 'numpy.copy', 'numpy.copy', (['hyperparameters'], {}), '(hyperparameters)\n', (2798, 2815), False, 'import numpy\n'), ((2843, 2880), 'numpy.copy', 'numpy.copy', (['self._hyperparameters[1:]'], {}), '(self._hyperparameters[1:])\n', (2853, 2880), False, 'import numpy\n'), ((7363, 7400), 'numpy.empty', 'numpy.empty', (['self.num_hyperparameters'], {}), '(self.num_hyperparameters)\n', (7374, 7400), False, 'import numpy\n'), ((7546, 7604), 'numpy.subtract', 'numpy.subtract', (['point_two', 'point_one'], {'out': 'grad_cov_lengths'}), '(point_two, point_one, out=grad_cov_lengths)\n', (7560, 7604), False, 'import numpy\n')] |
import argparse
import os
import tarfile
import urllib
import numpy as np
import pyprind
import utils
"""
次の理由から、本家の```create_ubuntu_dataset.py```の代わりにこちらを使う。
- ubuntu_dialogs.tgzはサイズが莫大で、また今回に関してはそのすべては必要ではないため、解凍せずに条件にあうものだけを抽出する
- 今回に関しては (context, response, label) のtripletsの生成までは不要であり、条件にあうdialogだけが必要
"""
URL = 'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz'
ARCHIVE_NAME = 'ubuntu_dialogs.tgz'
def main(args):
archive_dir = args.input
output_dir = args.output
# Download archived dialogues (```ubuntu_dialogs.tgz```) to ```archive_dir```
prepare_data_maybe_download(archive_dir)
# Extract dialogues that meet the given conditions
dialogues = extract_dialogues(archive_path=os.path.join(archive_dir, ARCHIVE_NAME),
n_dialogues=args.n_dialogues,
min_dialogue_length=args.min_dialogue_length,
max_dialogue_length=args.max_dialogue_length,
max_utterance_length=args.max_utterance_length,
max_speakers=args.max_speakers)
assert len(dialogues) <= args.n_dialogues
# Save the extracted dialogues to ```output_dir```
save_dialogues(output_dir=output_dir, dialogues=dialogues)
utils.writelog("Done.")
def prepare_data_maybe_download(archive_dir):
"""
Download archived dialogues if necessary.
This functions is mainly copied from the following original repository:
https://github.com/rkadlec/ubuntu-ranking-dataset-creator
"""
# Check
filenames = os.listdir(archive_dir)
assert "generate.sh" in filenames
assert "create_ubuntu_dataset.py" in filenames
assert "download_punkt.py" in filenames
assert "meta" in filenames
# dialogs are missing
archive_path = os.path.join(archive_dir, ARCHIVE_NAME)
if not os.path.exists(archive_path):
# archive missing, download it
utils.writelog("Downloading %s to %s" % (URL, archive_path))
filepath, _ = urllib.request.urlretrieve(URL, archive_path)
utils.writelog("Successfully downloaded " + filepath)
else:
utils.writelog("Found archive: %s" % archive_path)
def extract_dialogues(
archive_path,
n_dialogues,
min_dialogue_length,
max_dialogue_length,
max_utterance_length,
max_speakers):
utils.writelog("Number of dialogues: %d" % n_dialogues)
utils.writelog("Min. dialogue length: %d" % min_dialogue_length)
utils.writelog("Max. dialogue length: %d" % max_dialogue_length)
utils.writelog("Max. utterance length: %d" % max_utterance_length)
utils.writelog("Max. speakers: %d" % max_speakers)
utils.writelog("Extracting dialogues from %s ..." % archive_path)
dialogues = []
with tarfile.open(name=archive_path, mode="r") as tar:
# Get archived files (including directories)
utils.writelog("Extracting archived information ...")
members = tar.getmembers() # May take several minutes
utils.writelog("Number of archived entries (files + directories): %d" % len(members))
members = [m for m in members if m.name.endswith(".tsv")]
utils.writelog("Number of archived TSV files: %d" % len(members))
count = 0
avg_dialogue_length = []
avg_utterance_length = []
avg_speakers = []
for member_i, member in enumerate(members):
# Content
with tar.extractfile(member) as f:
binary = f.read()
text = binary.decode("utf-8")
lines = text.split("\n")
lines = [line.split("\t") for line in lines]
# Clean lines
new_lines = []
for items in lines:
assert len(items) == 4 or len(items) == 1 or len(items) == 0
if len(items) == 4:
new_lines.append(items)
lines = new_lines
# Clean utterance
lines = [items for items in lines if len(items) == 4]
for i in range(len(lines)):
assert len(lines[i]) == 4
utterance = lines[i][3]
utterance = utterance.strip()
lines[i][3] = utterance
# If conditions are met, record this dialogue
avg_dialogue_length.append(len(lines))
if min_dialogue_length <= len(lines) <= max_dialogue_length:
# Dialogue length is OK
all_with_response = True
for items in lines[2:]:
_, _, listener, _ = items
if listener == "":
all_with_response = False
all_with_utterance = True
for items in lines:
_, _, _, utterance = items
if utterance == "":
all_with_utterance = False
if all_with_response and all_with_utterance:
# All utterances (except for the first one) are with response-to markers
temp_max_utterance_length = -1
speakers = []
for items in lines:
_, speaker, listener, utterance = items
n_tokens = len(utterance.split(" ")) # rough whitespace-based tokenization
temp_max_utterance_length = max(temp_max_utterance_length, n_tokens)
speakers.append(speaker)
speakers.append(listener)
speakers = set(speakers)
avg_utterance_length.append(temp_max_utterance_length)
avg_speakers.append(len(speakers))
if temp_max_utterance_length <= max_utterance_length and len(speakers) <= max_speakers:
# Utterance length and the number of speakers are OK
dialogues.append(lines)
count += 1
# Progress
if count % 1000 == 0:
utils.writelog("##### Extracted %d dialogues #####" % count)
if count == n_dialogues:
break
# Progress
if (member_i + 1) % 5000 == 0:
utils.writelog("Processed %d dialogues" % (member_i + 1))
utils.writelog("Avg. dialogue length: %f" % np.mean(avg_dialogue_length))
utils.writelog("Avg. max utterange length: %f" % np.mean(avg_utterance_length))
utils.writelog("Avg. number of speakers: %f" % np.mean(avg_speakers))
avg_dialogue_length = []
avg_utterance_length = []
avg_speakers = []
ratio = float(count) / len(members) * 100.0
utils.writelog("Extracted %d dialogues (utility: %d/%d=%.2f%%)" % (count, count, len(members), ratio))
return dialogues
def save_dialogues(output_dir, dialogues):
utils.writelog("Saving dialogues to %s ..." % output_dir)
utils.mkdir(output_dir)
for dialogue_i, dialogue in enumerate(pyprind.prog_bar(dialogues)):
with open(os.path.join(output_dir, "%06d.tsv" % dialogue_i), "w") as f:
for items in dialogue:
assert len(items) == 4
line = "\t".join(items)
f.write("%s\n" % line)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--output", type=str, required=True)
parser.add_argument("--n_dialogues", type=int, required=True)
parser.add_argument("--min_dialogue_length", type=int, default=7)
parser.add_argument("--max_dialogue_length", type=int, default=16)
parser.add_argument("--max_utterance_length", type=int, default=20)
parser.add_argument("--max_speakers", type=int, default=9)
args = parser.parse_args()
main(args)
| [
"os.path.exists",
"numpy.mean",
"os.listdir",
"tarfile.open",
"argparse.ArgumentParser",
"urllib.request.urlretrieve",
"os.path.join",
"utils.mkdir",
"pyprind.prog_bar",
"utils.writelog"
] | [((1326, 1349), 'utils.writelog', 'utils.writelog', (['"""Done."""'], {}), "('Done.')\n", (1340, 1349), False, 'import utils\n'), ((1630, 1653), 'os.listdir', 'os.listdir', (['archive_dir'], {}), '(archive_dir)\n', (1640, 1653), False, 'import os\n'), ((1864, 1903), 'os.path.join', 'os.path.join', (['archive_dir', 'ARCHIVE_NAME'], {}), '(archive_dir, ARCHIVE_NAME)\n', (1876, 1903), False, 'import os\n'), ((2435, 2490), 'utils.writelog', 'utils.writelog', (["('Number of dialogues: %d' % n_dialogues)"], {}), "('Number of dialogues: %d' % n_dialogues)\n", (2449, 2490), False, 'import utils\n'), ((2495, 2559), 'utils.writelog', 'utils.writelog', (["('Min. dialogue length: %d' % min_dialogue_length)"], {}), "('Min. dialogue length: %d' % min_dialogue_length)\n", (2509, 2559), False, 'import utils\n'), ((2564, 2628), 'utils.writelog', 'utils.writelog', (["('Max. dialogue length: %d' % max_dialogue_length)"], {}), "('Max. dialogue length: %d' % max_dialogue_length)\n", (2578, 2628), False, 'import utils\n'), ((2633, 2699), 'utils.writelog', 'utils.writelog', (["('Max. utterance length: %d' % max_utterance_length)"], {}), "('Max. utterance length: %d' % max_utterance_length)\n", (2647, 2699), False, 'import utils\n'), ((2704, 2754), 'utils.writelog', 'utils.writelog', (["('Max. speakers: %d' % max_speakers)"], {}), "('Max. speakers: %d' % max_speakers)\n", (2718, 2754), False, 'import utils\n'), ((2760, 2825), 'utils.writelog', 'utils.writelog', (["('Extracting dialogues from %s ...' % archive_path)"], {}), "('Extracting dialogues from %s ...' % archive_path)\n", (2774, 2825), False, 'import utils\n'), ((7044, 7101), 'utils.writelog', 'utils.writelog', (["('Saving dialogues to %s ...' % output_dir)"], {}), "('Saving dialogues to %s ...' % output_dir)\n", (7058, 7101), False, 'import utils\n'), ((7106, 7129), 'utils.mkdir', 'utils.mkdir', (['output_dir'], {}), '(output_dir)\n', (7117, 7129), False, 'import utils\n'), ((7477, 7502), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7500, 7502), False, 'import argparse\n'), ((1915, 1943), 'os.path.exists', 'os.path.exists', (['archive_path'], {}), '(archive_path)\n', (1929, 1943), False, 'import os\n'), ((1992, 2052), 'utils.writelog', 'utils.writelog', (["('Downloading %s to %s' % (URL, archive_path))"], {}), "('Downloading %s to %s' % (URL, archive_path))\n", (2006, 2052), False, 'import utils\n'), ((2075, 2120), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['URL', 'archive_path'], {}), '(URL, archive_path)\n', (2101, 2120), False, 'import urllib\n'), ((2129, 2182), 'utils.writelog', 'utils.writelog', (["('Successfully downloaded ' + filepath)"], {}), "('Successfully downloaded ' + filepath)\n", (2143, 2182), False, 'import utils\n'), ((2201, 2251), 'utils.writelog', 'utils.writelog', (["('Found archive: %s' % archive_path)"], {}), "('Found archive: %s' % archive_path)\n", (2215, 2251), False, 'import utils\n'), ((2855, 2896), 'tarfile.open', 'tarfile.open', ([], {'name': 'archive_path', 'mode': '"""r"""'}), "(name=archive_path, mode='r')\n", (2867, 2896), False, 'import tarfile\n'), ((2966, 3019), 'utils.writelog', 'utils.writelog', (['"""Extracting archived information ..."""'], {}), "('Extracting archived information ...')\n", (2980, 3019), False, 'import utils\n'), ((7172, 7199), 'pyprind.prog_bar', 'pyprind.prog_bar', (['dialogues'], {}), '(dialogues)\n', (7188, 7199), False, 'import pyprind\n'), ((743, 782), 'os.path.join', 'os.path.join', (['archive_dir', 'ARCHIVE_NAME'], {}), '(archive_dir, ARCHIVE_NAME)\n', (755, 782), False, 'import os\n'), ((6371, 6428), 'utils.writelog', 'utils.writelog', (["('Processed %d dialogues' % (member_i + 1))"], {}), "('Processed %d dialogues' % (member_i + 1))\n", (6385, 6428), False, 'import utils\n'), ((7220, 7269), 'os.path.join', 'os.path.join', (['output_dir', "('%06d.tsv' % dialogue_i)"], {}), "(output_dir, '%06d.tsv' % dialogue_i)\n", (7232, 7269), False, 'import os\n'), ((6489, 6517), 'numpy.mean', 'np.mean', (['avg_dialogue_length'], {}), '(avg_dialogue_length)\n', (6496, 6517), True, 'import numpy as np\n'), ((6584, 6613), 'numpy.mean', 'np.mean', (['avg_utterance_length'], {}), '(avg_utterance_length)\n', (6591, 6613), True, 'import numpy as np\n'), ((6678, 6699), 'numpy.mean', 'np.mean', (['avg_speakers'], {}), '(avg_speakers)\n', (6685, 6699), True, 'import numpy as np\n'), ((6144, 6204), 'utils.writelog', 'utils.writelog', (["('##### Extracted %d dialogues #####' % count)"], {}), "('##### Extracted %d dialogues #####' % count)\n", (6158, 6204), False, 'import utils\n')] |
import torch, os, numpy as np, copy
import cv2
import glob
from .map import GeometricMap
class preprocess(object):
def __init__(self, data_root, seq_name, parser, log, split='train', phase='training'):
self.parser = parser
self.dataset = parser.dataset
self.data_root = data_root
self.past_frames = parser.past_frames
self.future_frames = parser.future_frames
self.frame_skip = parser.get('frame_skip', 1)
self.min_past_frames = parser.get('min_past_frames', self.past_frames)
self.min_future_frames = parser.get('min_future_frames', self.future_frames)
self.traj_scale = parser.traj_scale
self.past_traj_scale = parser.traj_scale
self.load_map = parser.get('load_map', False)
self.map_version = parser.get('map_version', '0.1')
self.seq_name = seq_name
self.split = split
self.phase = phase
self.log = log
if parser.dataset == 'nuscenes_pred':
label_path = os.path.join(data_root, 'label/{}/{}.txt'.format(split, seq_name))
delimiter = ' '
elif parser.dataset in {'eth', 'hotel', 'univ', 'zara1', 'zara2'}:
label_path = f'{data_root}/{parser.dataset}/{seq_name}.txt'
delimiter = ' '
else:
assert False, 'error'
self.gt = np.genfromtxt(label_path, delimiter=delimiter, dtype=str)
frames = self.gt[:, 0].astype(np.float32).astype(np.int)
fr_start, fr_end = frames.min(), frames.max()
self.init_frame = fr_start
self.num_fr = fr_end + 1 - fr_start
if self.load_map:
self.load_scene_map()
else:
self.geom_scene_map = None
self.class_names = class_names = {'Pedestrian': 1, 'Car': 2, 'Cyclist': 3, 'Truck': 4, 'Van': 5, 'Tram': 6, 'Person': 7, \
'Misc': 8, 'DontCare': 9, 'Traffic_cone': 10, 'Construction_vehicle': 11, 'Barrier': 12, 'Motorcycle': 13, \
'Bicycle': 14, 'Bus': 15, 'Trailer': 16, 'Emergency': 17, 'Construction': 18}
for row_index in range(len(self.gt)):
self.gt[row_index][2] = class_names[self.gt[row_index][2]]
self.gt = self.gt.astype('float32')
self.xind, self.zind = 13, 15
def GetID(self, data):
id = []
for i in range(data.shape[0]):
id.append(data[i, 1].copy())
return id
def TotalFrame(self):
return self.num_fr
def PreData(self, frame):
DataList = []
for i in range(self.past_frames):
if frame - i < self.init_frame:
data = []
data = self.gt[self.gt[:, 0] == (frame - i * self.frame_skip)]
DataList.append(data)
return DataList
def FutureData(self, frame):
DataList = []
for i in range(1, self.future_frames + 1):
data = self.gt[self.gt[:, 0] == (frame + i * self.frame_skip)]
DataList.append(data)
return DataList
def get_valid_id(self, pre_data, fut_data):
cur_id = self.GetID(pre_data[0])
valid_id = []
for idx in cur_id:
exist_pre = [(False if isinstance(data, list) else (idx in data[:, 1])) for data in pre_data[:self.min_past_frames]]
exist_fut = [(False if isinstance(data, list) else (idx in data[:, 1])) for data in fut_data[:self.min_future_frames]]
if np.all(exist_pre) and np.all(exist_fut):
valid_id.append(idx)
return valid_id
def get_pred_mask(self, cur_data, valid_id):
pred_mask = np.zeros(len(valid_id), dtype=np.int)
for i, idx in enumerate(valid_id):
pred_mask[i] = cur_data[cur_data[:, 1] == idx].squeeze()[-1]
return pred_mask
def get_heading(self, cur_data, valid_id):
heading = np.zeros(len(valid_id))
for i, idx in enumerate(valid_id):
heading[i] = cur_data[cur_data[:, 1] == idx].squeeze()[16]
return heading
def load_scene_map(self):
map_file = f'{self.data_root}/map_{self.map_version}/{self.seq_name}.png'
map_vis_file = f'{self.data_root}/map_{self.map_version}/vis_{self.seq_name}.png'
map_meta_file = f'{self.data_root}/map_{self.map_version}/meta_{self.seq_name}.txt'
self.scene_map = np.transpose(cv2.imread(map_file), (2, 0, 1))
self.scene_vis_map = np.transpose(cv2.cvtColor(cv2.imread(map_vis_file), cv2.COLOR_BGR2RGB), (2, 0, 1))
self.meta = np.loadtxt(map_meta_file)
self.map_origin = self.meta[:2]
self.map_scale = scale = self.meta[2]
homography = np.array([[scale, 0., 0.], [0., scale, 0.], [0., 0., scale]])
self.geom_scene_map = GeometricMap(self.scene_map, homography, self.map_origin)
self.scene_vis_map = GeometricMap(self.scene_vis_map, homography, self.map_origin)
def PreMotion(self, DataTuple, valid_id):
motion = []
mask = []
for identity in valid_id:
mask_i = torch.zeros(self.past_frames)
box_3d = torch.zeros([self.past_frames, 2])
for j in range(self.past_frames):
past_data = DataTuple[j] # past_data
if len(past_data) > 0 and identity in past_data[:, 1]:
found_data = past_data[past_data[:, 1] == identity].squeeze()[[self.xind, self.zind]] / self.past_traj_scale
box_3d[self.past_frames-1 - j, :] = torch.from_numpy(found_data).float()
mask_i[self.past_frames-1 - j] = 1.0
elif j > 0:
box_3d[self.past_frames-1 - j, :] = box_3d[self.past_frames - j, :] # if none, copy from previous
else:
raise ValueError('current id missing in the first frame!')
motion.append(box_3d)
mask.append(mask_i)
return motion, mask
def FutureMotion(self, DataTuple, valid_id):
motion = []
mask = []
for identity in valid_id:
mask_i = torch.zeros(self.future_frames)
pos_3d = torch.zeros([self.future_frames, 2])
for j in range(self.future_frames):
fut_data = DataTuple[j] # cur_data
if len(fut_data) > 0 and identity in fut_data[:, 1]:
found_data = fut_data[fut_data[:, 1] == identity].squeeze()[[self.xind, self.zind]] / self.traj_scale
pos_3d[j, :] = torch.from_numpy(found_data).float()
mask_i[j] = 1.0
elif j > 0:
pos_3d[j, :] = pos_3d[j - 1, :] # if none, copy from previous
else:
raise ValueError('current id missing in the first frame!')
motion.append(pos_3d)
mask.append(mask_i)
return motion, mask
def __call__(self, frame):
assert frame - self.init_frame >= 0 and frame - self.init_frame <= self.TotalFrame() - 1, 'frame is %d, total is %d' % (frame, self.TotalFrame())
pre_data = self.PreData(frame)
fut_data = self.FutureData(frame)
valid_id = self.get_valid_id(pre_data, fut_data)
if len(pre_data[0]) == 0 or len(fut_data[0]) == 0 or len(valid_id) == 0:
return None
if self.dataset == 'nuscenes_pred':
pred_mask = self.get_pred_mask(pre_data[0], valid_id)
heading = self.get_heading(pre_data[0], valid_id)
else:
pred_mask = None
heading = None
pre_motion_3D, pre_motion_mask = self.PreMotion(pre_data, valid_id)
fut_motion_3D, fut_motion_mask = self.FutureMotion(fut_data, valid_id)
data = {
'pre_motion_3D': pre_motion_3D,
'fut_motion_3D': fut_motion_3D,
'fut_motion_mask': fut_motion_mask,
'pre_motion_mask': pre_motion_mask,
'pre_data': pre_data,
'fut_data': fut_data,
'heading': heading,
'valid_id': valid_id,
'traj_scale': self.traj_scale,
'pred_mask': pred_mask,
'scene_map': self.geom_scene_map,
'seq': self.seq_name,
'frame': frame
}
return data
| [
"numpy.all",
"torch.from_numpy",
"numpy.array",
"numpy.loadtxt",
"numpy.genfromtxt",
"torch.zeros",
"cv2.imread"
] | [((1355, 1412), 'numpy.genfromtxt', 'np.genfromtxt', (['label_path'], {'delimiter': 'delimiter', 'dtype': 'str'}), '(label_path, delimiter=delimiter, dtype=str)\n', (1368, 1412), True, 'import torch, os, numpy as np, copy\n'), ((4513, 4538), 'numpy.loadtxt', 'np.loadtxt', (['map_meta_file'], {}), '(map_meta_file)\n', (4523, 4538), True, 'import torch, os, numpy as np, copy\n'), ((4646, 4713), 'numpy.array', 'np.array', (['[[scale, 0.0, 0.0], [0.0, scale, 0.0], [0.0, 0.0, scale]]'], {}), '([[scale, 0.0, 0.0], [0.0, scale, 0.0], [0.0, 0.0, scale]])\n', (4654, 4713), True, 'import torch, os, numpy as np, copy\n'), ((4348, 4368), 'cv2.imread', 'cv2.imread', (['map_file'], {}), '(map_file)\n', (4358, 4368), False, 'import cv2\n'), ((5027, 5056), 'torch.zeros', 'torch.zeros', (['self.past_frames'], {}), '(self.past_frames)\n', (5038, 5056), False, 'import torch, os, numpy as np, copy\n'), ((5078, 5112), 'torch.zeros', 'torch.zeros', (['[self.past_frames, 2]'], {}), '([self.past_frames, 2])\n', (5089, 5112), False, 'import torch, os, numpy as np, copy\n'), ((6062, 6093), 'torch.zeros', 'torch.zeros', (['self.future_frames'], {}), '(self.future_frames)\n', (6073, 6093), False, 'import torch, os, numpy as np, copy\n'), ((6115, 6151), 'torch.zeros', 'torch.zeros', (['[self.future_frames, 2]'], {}), '([self.future_frames, 2])\n', (6126, 6151), False, 'import torch, os, numpy as np, copy\n'), ((3437, 3454), 'numpy.all', 'np.all', (['exist_pre'], {}), '(exist_pre)\n', (3443, 3454), True, 'import torch, os, numpy as np, copy\n'), ((3459, 3476), 'numpy.all', 'np.all', (['exist_fut'], {}), '(exist_fut)\n', (3465, 3476), True, 'import torch, os, numpy as np, copy\n'), ((4436, 4460), 'cv2.imread', 'cv2.imread', (['map_vis_file'], {}), '(map_vis_file)\n', (4446, 4460), False, 'import cv2\n'), ((5481, 5509), 'torch.from_numpy', 'torch.from_numpy', (['found_data'], {}), '(found_data)\n', (5497, 5509), False, 'import torch, os, numpy as np, copy\n'), ((6490, 6518), 'torch.from_numpy', 'torch.from_numpy', (['found_data'], {}), '(found_data)\n', (6506, 6518), False, 'import torch, os, numpy as np, copy\n')] |
########## Creating Test Set ##########
import torch
import numpy as np
import pandas as pd
import os
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'allsides_data')
os.chdir(data_path)
### Select name of dataset to name files
####################################################
affix = 'allsides' # 'allsides_duplicates_removed' #
####################################################
# (for allsides_duplicates_removed only texts and masks
# need to be handled, since rest is the same)
### choosing split ratio ###
split_ratio = (80,10,10) ###
############################
##### loading tensors
### contents
contents_text_tensor = torch.load(f'{affix}_contents_text_tensor.pt')
contents_mask_tensor = torch.load(f'{affix}_contents_mask_tensor.pt')
### titles
# titles_text_tensor = torch.load(f'{affix}_titles_text_tensor.pt')
# titles_mask_tensor = torch.load(f'{affix}_titles_mask_tensor.pt')
bias_tensor = torch.load(f'{affix}_bias_tensor.pt')
### loading date and source
data = pd.read_csv('allsides_data_short.csv')
data.drop(columns=['name', 'content', 'bias'],inplace=True)
source_array = np.array(data['source']).reshape((-1,1))
# list of tensors that need to be: modified, devided into sets, and saved
if affix == 'allsides':
tensor_list = [contents_text_tensor, contents_mask_tensor, bias_tensor, source_array]
elif affix == 'allsides_duplicates_removed':
tensor_list = [contents_text_tensor, contents_mask_tensor]
else:
raise AssertionError('affix should be \'allsides\' or \'allsides_duplicates_removed\'')
# titles_text_tensor, titles_mask_tensor, # (titles not used)
### creating id vectors
np.random.seed(123)
data_length = len(contents_text_tensor)
ids = np.arange(data_length)
np.random.shuffle(ids)
# cut offs for train- validation- and test-set according to split ratio
train_val_cut = int(data_length*(split_ratio[0]/100))
val_test_cut = int(data_length*(split_ratio[0]+split_ratio[1])/100)
# ids for each set
train_ids = ids[:train_val_cut]
val_ids = ids[train_val_cut:val_test_cut]
test_ids = ids[val_test_cut:]
### creating train- val- test-sets
if affix == 'allsides':
tensor_file_names = ['contents_text', 'contents_mask', 'bias', 'source']
elif affix == 'allsides_duplicates_removed':
tensor_file_names = ['contents_text', 'contents_mask']
else:
raise AssertionError('affix should be \'allsides\' or \'allsides_duplicates_removed\'')
# 'titles_text', 'titles_mask'
train_tensors = []
for tensor in tensor_list:
train_tensors.append(tensor[train_ids])
val_tensors = []
for tensor in tensor_list:
val_tensors.append(tensor[val_ids])
test_tensors = []
for tensor in tensor_list:
test_tensors.append(tensor[test_ids])
### saving tensors
for tensor,name in zip(train_tensors,tensor_file_names):
if name == 'source':
np.save(f'{affix}_{name}_train.npy', tensor)
else:
torch.save(tensor,f'{affix}_{name}_train.pt')
for tensor,name in zip(val_tensors,tensor_file_names):
if name == 'source':
np.save(f'{affix}_{name}_val.npy', tensor)
else:
torch.save(tensor, f'{affix}_{name}_val.pt')
for tensor,name in zip(test_tensors,tensor_file_names):
if name == 'source':
np.save(f'{affix}_{name}_test.npy', tensor)
else:
torch.save(tensor, f'{affix}_{name}_test.pt')
| [
"pandas.read_csv",
"torch.load",
"os.chdir",
"numpy.array",
"numpy.random.seed",
"torch.save",
"os.path.abspath",
"numpy.save",
"numpy.arange",
"numpy.random.shuffle"
] | [((188, 207), 'os.chdir', 'os.chdir', (['data_path'], {}), '(data_path)\n', (196, 207), False, 'import os\n'), ((660, 706), 'torch.load', 'torch.load', (['f"""{affix}_contents_text_tensor.pt"""'], {}), "(f'{affix}_contents_text_tensor.pt')\n", (670, 706), False, 'import torch\n'), ((730, 776), 'torch.load', 'torch.load', (['f"""{affix}_contents_mask_tensor.pt"""'], {}), "(f'{affix}_contents_mask_tensor.pt')\n", (740, 776), False, 'import torch\n'), ((940, 977), 'torch.load', 'torch.load', (['f"""{affix}_bias_tensor.pt"""'], {}), "(f'{affix}_bias_tensor.pt')\n", (950, 977), False, 'import torch\n'), ((1014, 1052), 'pandas.read_csv', 'pd.read_csv', (['"""allsides_data_short.csv"""'], {}), "('allsides_data_short.csv')\n", (1025, 1052), True, 'import pandas as pd\n'), ((1652, 1671), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (1666, 1671), True, 'import numpy as np\n'), ((1720, 1742), 'numpy.arange', 'np.arange', (['data_length'], {}), '(data_length)\n', (1729, 1742), True, 'import numpy as np\n'), ((1743, 1765), 'numpy.random.shuffle', 'np.random.shuffle', (['ids'], {}), '(ids)\n', (1760, 1765), True, 'import numpy as np\n'), ((144, 169), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (159, 169), False, 'import os\n'), ((1129, 1153), 'numpy.array', 'np.array', (["data['source']"], {}), "(data['source'])\n", (1137, 1153), True, 'import numpy as np\n'), ((2828, 2872), 'numpy.save', 'np.save', (['f"""{affix}_{name}_train.npy"""', 'tensor'], {}), "(f'{affix}_{name}_train.npy', tensor)\n", (2835, 2872), True, 'import numpy as np\n'), ((2891, 2937), 'torch.save', 'torch.save', (['tensor', 'f"""{affix}_{name}_train.pt"""'], {}), "(tensor, f'{affix}_{name}_train.pt')\n", (2901, 2937), False, 'import torch\n'), ((3026, 3068), 'numpy.save', 'np.save', (['f"""{affix}_{name}_val.npy"""', 'tensor'], {}), "(f'{affix}_{name}_val.npy', tensor)\n", (3033, 3068), True, 'import numpy as np\n'), ((3087, 3131), 'torch.save', 'torch.save', (['tensor', 'f"""{affix}_{name}_val.pt"""'], {}), "(tensor, f'{affix}_{name}_val.pt')\n", (3097, 3131), False, 'import torch\n'), ((3222, 3265), 'numpy.save', 'np.save', (['f"""{affix}_{name}_test.npy"""', 'tensor'], {}), "(f'{affix}_{name}_test.npy', tensor)\n", (3229, 3265), True, 'import numpy as np\n'), ((3284, 3329), 'torch.save', 'torch.save', (['tensor', 'f"""{affix}_{name}_test.pt"""'], {}), "(tensor, f'{affix}_{name}_test.pt')\n", (3294, 3329), False, 'import torch\n')] |
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def grad_grad_lapse(lapse, christoffel_second_kind, field_a, d_field_a):
return (lapse * np.einsum("i,j", field_a, field_a) -
lapse * np.einsum("kij,k", christoffel_second_kind, field_a) +
0.5 * lapse *
(np.einsum("ij", d_field_a) + np.einsum("ij->ji", d_field_a)))
def divergence_lapse(conformal_factor_squared, inverse_conformal_metric,
grad_grad_lapse):
return (conformal_factor_squared *
np.einsum("ij,ij", inverse_conformal_metric, grad_grad_lapse))
| [
"numpy.einsum"
] | [((561, 622), 'numpy.einsum', 'np.einsum', (['"""ij,ij"""', 'inverse_conformal_metric', 'grad_grad_lapse'], {}), "('ij,ij', inverse_conformal_metric, grad_grad_lapse)\n", (570, 622), True, 'import numpy as np\n'), ((183, 217), 'numpy.einsum', 'np.einsum', (['"""i,j"""', 'field_a', 'field_a'], {}), "('i,j', field_a, field_a)\n", (192, 217), True, 'import numpy as np\n'), ((240, 292), 'numpy.einsum', 'np.einsum', (['"""kij,k"""', 'christoffel_second_kind', 'field_a'], {}), "('kij,k', christoffel_second_kind, field_a)\n", (249, 292), True, 'import numpy as np\n'), ((334, 360), 'numpy.einsum', 'np.einsum', (['"""ij"""', 'd_field_a'], {}), "('ij', d_field_a)\n", (343, 360), True, 'import numpy as np\n'), ((363, 393), 'numpy.einsum', 'np.einsum', (['"""ij->ji"""', 'd_field_a'], {}), "('ij->ji', d_field_a)\n", (372, 393), True, 'import numpy as np\n')] |
"""Integration test: run experiments with some small & fast configs.
Only cursory 'smoke' checks -- there are plenty of errors this won't catch."""
import os
import shutil
import tempfile
import numpy as np
import pytest
import ray
from ray import tune
from aprl.activations.density.pipeline import density_ex
from aprl.activations.tsne.pipeline import tsne_ex
from aprl.multi.score import multi_score_ex
from aprl.multi.train import multi_train_ex
from aprl.policies.loader import AGENT_LOADERS
from aprl.score_agent import score_ex
from aprl.train import NO_VECENV, RL_ALGOS, train_ex
EXPERIMENTS = [score_ex, train_ex]
@pytest.mark.parametrize("experiment", EXPERIMENTS)
def test_experiment(experiment):
"""Smoke test to check the experiments runs with default config."""
run = experiment.run()
assert run.status == "COMPLETED"
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
SCORE_AGENT_CONFIGS = [
{"agent_b_type": "zoo", "agent_b_path": "2", "videos": True, "episodes": 2},
{"env_name": "multicomp/KickAndDefend-v0", "episodes": 1},
{"record_traj": True, "record_traj_params": {"save_dir": "test_dir"}},
{"noisy_agent_index": 0},
{"mask_agent_index": 0},
{"mask_agent_index": 0, "mask_agent_masking_type": "additive_noise", "mask_agent_noise": 1.0},
]
SCORE_AGENT_CONFIGS += [
{
"agent_b_type": rl_algo,
"agent_b_path": os.path.join(BASE_DIR, "dummy_sumo_ants", rl_algo),
"episodes": 1,
}
for rl_algo in AGENT_LOADERS.keys()
if rl_algo != "zoo"
]
@pytest.mark.parametrize("config", SCORE_AGENT_CONFIGS)
def test_score_agent(config):
"""Smoke test for score agent to check it runs with some different configs."""
config = dict(config)
if "episodes" not in config:
config["episodes"] = 1 # speed up tests
config["render"] = False # faster without, test_experiment already tests with render
run = score_ex.run(config_updates=config)
assert run.status == "COMPLETED"
outcomes = [run.result[k] for k in ["ties", "win0", "win1"]]
assert sum(outcomes) == run.config["episodes"]
if config.get("record_traj", False):
try:
for i in range(2):
traj_file_path = os.path.join(
config["record_traj_params"]["save_dir"], f"agent_{i}.npz"
)
traj_data = np.load(traj_file_path)
assert set(traj_data.keys()).issuperset(["observations", "actions", "rewards"])
for k, ep_data in traj_data.items():
assert len(ep_data) == config["episodes"], f"unexpected array length at '{k}'"
os.remove(traj_file_path)
finally:
os.rmdir(config["record_traj_params"]["save_dir"])
SCORE_AGENT_VIDEO_CONFIGS = {
"none_dir": {
"videos": True,
"video_params": {"save_dir": None},
"episodes": 1,
"render": False,
},
"specified_dir": {
"videos": True,
"video_params": {"save_dir": "specific_video_dir"},
"episodes": 1,
"render": False,
},
}
def test_score_agent_video():
# Confirm that experiment runs properly saving videos to a temp dir
none_dir_run = score_ex.run(config_updates=SCORE_AGENT_VIDEO_CONFIGS["none_dir"])
assert none_dir_run.status == "COMPLETED"
try:
# Confirm that the first time you try to save videos to a specified dir, it works properly
specified_dir_run = score_ex.run(config_updates=SCORE_AGENT_VIDEO_CONFIGS["specified_dir"])
assert specified_dir_run.status == "COMPLETED"
# Confirm that the second time you try to save videos to the same specified dir, it fails
with pytest.raises(AssertionError):
_ = score_ex.run(config_updates=SCORE_AGENT_VIDEO_CONFIGS["specified_dir"])
finally:
shutil.rmtree(SCORE_AGENT_VIDEO_CONFIGS["specified_dir"]["video_params"]["save_dir"])
TRAIN_CONFIGS = [
{"num_env": 1},
{"env_name": "multicomp/YouShallNotPassHumans-v0"},
{"normalize": False},
{"embed_type": "ppo2", "embed_path": os.path.join(BASE_DIR, "dummy_sumo_ants", "ppo2")},
{
"env_name": "multicomp/SumoHumans-v0",
"rew_shape": True,
"rew_shape_params": {"anneal_frac": 0.1},
},
{"env_name": "multicomp/SumoHumans-v0", "embed_noise": True},
{"env_name": "Humanoid-v3", "embed_types": [], "embed_paths": []},
{
"env_name": "multicomp/SumoHumansAutoContact-v0",
"rew_shape": True,
"rew_shape_params": {"metric": "length", "min_wait": 100, "window_size": 100},
},
{
"env_name": "multicomp/SumoHumans-v0",
"rew_shape": True,
"embed_noise": True,
"embed_noise_params": {"metric": "sparse", "min_wait": 100, "window_size": 100},
},
{"env_name": "multicomp/SumoHumansAutoContact-v0", "adv_noise_params": {"noise_val": 0.1}},
{
# test TransparentLSTMPolicy
"transparent_params": ["ff_policy", "hid"],
},
{
# test TransparentMLPPolicyValue
"env_name": "multicomp/YouShallNotPassHumans-v0",
"transparent_params": ["ff_policy"],
"batch_size": 32,
},
{
"env_name": "multicomp/SumoHumans-v0",
"lookback_params": {"lb_num": 2, "lb_path": 1, "lb_type": "zoo"},
"adv_noise_params": {"noise_val": 0.1},
"transparent_params": ["ff_policy"],
},
]
try:
from stable_baselines import GAIL
del GAIL
TRAIN_CONFIGS.append(
{
"rl_algo": "gail",
"num_env": 1,
"expert_dataset_path": os.path.join(BASE_DIR, "SumoAnts_traj/agent_0.npz"),
}
)
except ImportError: # pragma: no cover
# skip GAIL test if algorithm not available
pass
TRAIN_CONFIGS += [
{"rl_algo": algo, "num_env": 1 if algo in NO_VECENV else 2}
for algo in RL_ALGOS.keys()
if algo != "gail"
]
# Choose hyperparameters to minimize resource consumption in tests
TRAIN_SMALL_RESOURCES = {
"batch_size": 64,
"total_timesteps": 128,
"num_env": 2,
}
@pytest.mark.parametrize("config", TRAIN_CONFIGS)
def test_train(config):
config = dict(config)
for k, v in TRAIN_SMALL_RESOURCES.items():
config.setdefault(k, v)
run = train_ex.run(config_updates=config)
assert run.status == "COMPLETED"
final_dir = run.result
assert os.path.isdir(final_dir), "final result not saved"
assert os.path.isfile(os.path.join(final_dir, "model.pkl")), "model weights not saved"
def _test_multi(ex, config_updates=None):
multi_config = {
"spec": {
"run_kwargs": {
"resources_per_trial": {"cpu": 2}, # CI build only has 2 cores
"upload_dir": None, # do not upload test results anywhere
"sync_to_cloud": None, # as above
},
},
"init_kwargs": {"num_cpus": 2}, # CI build only has 2 cores
}
if config_updates:
multi_config.update(config_updates)
run = ex.run(config_updates=multi_config, named_configs=("debug_config",))
assert run.status == "COMPLETED"
assert ray.state.state.redis_client is None, "ray has not been shutdown"
return run
def test_multi_score():
run = _test_multi(multi_score_ex)
assert "scores" in run.result
assert "exp_id" in run.result
assert isinstance(run.result["scores"], dict)
def test_multi_train():
config_updates = {
"train": TRAIN_SMALL_RESOURCES,
}
run = _test_multi(multi_train_ex, config_updates=config_updates)
analysis, exp_id = run.result
assert isinstance(analysis, tune.analysis.ExperimentAnalysis)
assert isinstance(exp_id, str)
ACTIVATION_EXPERIMENTS = [
(density_ex, "fit_density_model"),
(tsne_ex, "tsne_fit_model"),
]
@pytest.mark.parametrize("test_cfg", ACTIVATION_EXPERIMENTS)
def test_activation_pipeline(test_cfg):
ex, inner_exp_name = test_cfg
with tempfile.TemporaryDirectory(prefix="test_activation_pipeline") as tmpdir:
config_updates = {
"generate_activations": {
"score_update": {
"spec": {
"run_kwargs": {
"resources_per_trial": {"cpu": 2}, # CI build only has 2 cores
"upload_dir": os.path.join(tmpdir, "ray"),
"sync_to_cloud": (
"mkdir -p {target} && " "rsync -rlptv {source}/ {target}"
),
},
},
"init_kwargs": {"num_cpus": 2}, # CI build only has 2 cores
},
"ray_upload_dir": os.path.join(tmpdir, "ray"),
},
inner_exp_name: {"init_kwargs": {"num_cpus": 2}}, # CI build only has 2 cores
"output_root": os.path.join(tmpdir, "main"),
}
run = ex.run(config_updates=config_updates, named_configs=("debug_config",))
assert run.status == "COMPLETED"
os.stat(run.result) # check output path exists
| [
"aprl.score_agent.score_ex.run",
"tempfile.TemporaryDirectory",
"os.path.join",
"shutil.rmtree",
"os.path.realpath",
"pytest.mark.parametrize",
"os.rmdir",
"os.path.isdir",
"pytest.raises",
"os.remove",
"aprl.train.train_ex.run",
"os.stat",
"aprl.train.RL_ALGOS.keys",
"aprl.policies.loader... | [((630, 680), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""experiment"""', 'EXPERIMENTS'], {}), "('experiment', EXPERIMENTS)\n", (653, 680), False, 'import pytest\n'), ((1548, 1602), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""config"""', 'SCORE_AGENT_CONFIGS'], {}), "('config', SCORE_AGENT_CONFIGS)\n", (1571, 1602), False, 'import pytest\n'), ((6089, 6137), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""config"""', 'TRAIN_CONFIGS'], {}), "('config', TRAIN_CONFIGS)\n", (6112, 6137), False, 'import pytest\n'), ((7815, 7874), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_cfg"""', 'ACTIVATION_EXPERIMENTS'], {}), "('test_cfg', ACTIVATION_EXPERIMENTS)\n", (7838, 7874), False, 'import pytest\n'), ((879, 905), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (895, 905), False, 'import os\n'), ((1925, 1960), 'aprl.score_agent.score_ex.run', 'score_ex.run', ([], {'config_updates': 'config'}), '(config_updates=config)\n', (1937, 1960), False, 'from aprl.score_agent import score_ex\n'), ((3227, 3293), 'aprl.score_agent.score_ex.run', 'score_ex.run', ([], {'config_updates': "SCORE_AGENT_VIDEO_CONFIGS['none_dir']"}), "(config_updates=SCORE_AGENT_VIDEO_CONFIGS['none_dir'])\n", (3239, 3293), False, 'from aprl.score_agent import score_ex\n'), ((6278, 6313), 'aprl.train.train_ex.run', 'train_ex.run', ([], {'config_updates': 'config'}), '(config_updates=config)\n', (6290, 6313), False, 'from aprl.train import NO_VECENV, RL_ALGOS, train_ex\n'), ((6390, 6414), 'os.path.isdir', 'os.path.isdir', (['final_dir'], {}), '(final_dir)\n', (6403, 6414), False, 'import os\n'), ((1398, 1448), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""dummy_sumo_ants"""', 'rl_algo'], {}), "(BASE_DIR, 'dummy_sumo_ants', rl_algo)\n", (1410, 1448), False, 'import os\n'), ((1498, 1518), 'aprl.policies.loader.AGENT_LOADERS.keys', 'AGENT_LOADERS.keys', ([], {}), '()\n', (1516, 1518), False, 'from aprl.policies.loader import AGENT_LOADERS\n'), ((3477, 3548), 'aprl.score_agent.score_ex.run', 'score_ex.run', ([], {'config_updates': "SCORE_AGENT_VIDEO_CONFIGS['specified_dir']"}), "(config_updates=SCORE_AGENT_VIDEO_CONFIGS['specified_dir'])\n", (3489, 3548), False, 'from aprl.score_agent import score_ex\n'), ((3856, 3946), 'shutil.rmtree', 'shutil.rmtree', (["SCORE_AGENT_VIDEO_CONFIGS['specified_dir']['video_params']['save_dir']"], {}), "(SCORE_AGENT_VIDEO_CONFIGS['specified_dir']['video_params'][\n 'save_dir'])\n", (3869, 3946), False, 'import shutil\n'), ((4105, 4154), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""dummy_sumo_ants"""', '"""ppo2"""'], {}), "(BASE_DIR, 'dummy_sumo_ants', 'ppo2')\n", (4117, 4154), False, 'import os\n'), ((5881, 5896), 'aprl.train.RL_ALGOS.keys', 'RL_ALGOS.keys', ([], {}), '()\n', (5894, 5896), False, 'from aprl.train import NO_VECENV, RL_ALGOS, train_ex\n'), ((6467, 6503), 'os.path.join', 'os.path.join', (['final_dir', '"""model.pkl"""'], {}), "(final_dir, 'model.pkl')\n", (6479, 6503), False, 'import os\n'), ((7958, 8020), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""test_activation_pipeline"""'}), "(prefix='test_activation_pipeline')\n", (7985, 8020), False, 'import tempfile\n'), ((9053, 9072), 'os.stat', 'os.stat', (['run.result'], {}), '(run.result)\n', (9060, 9072), False, 'import os\n'), ((2716, 2766), 'os.rmdir', 'os.rmdir', (["config['record_traj_params']['save_dir']"], {}), "(config['record_traj_params']['save_dir'])\n", (2724, 2766), False, 'import os\n'), ((3716, 3745), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3729, 3745), False, 'import pytest\n'), ((3763, 3834), 'aprl.score_agent.score_ex.run', 'score_ex.run', ([], {'config_updates': "SCORE_AGENT_VIDEO_CONFIGS['specified_dir']"}), "(config_updates=SCORE_AGENT_VIDEO_CONFIGS['specified_dir'])\n", (3775, 3834), False, 'from aprl.score_agent import score_ex\n'), ((5616, 5667), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""SumoAnts_traj/agent_0.npz"""'], {}), "(BASE_DIR, 'SumoAnts_traj/agent_0.npz')\n", (5628, 5667), False, 'import os\n'), ((8878, 8906), 'os.path.join', 'os.path.join', (['tmpdir', '"""main"""'], {}), "(tmpdir, 'main')\n", (8890, 8906), False, 'import os\n'), ((2234, 2306), 'os.path.join', 'os.path.join', (["config['record_traj_params']['save_dir']", 'f"""agent_{i}.npz"""'], {}), "(config['record_traj_params']['save_dir'], f'agent_{i}.npz')\n", (2246, 2306), False, 'import os\n'), ((2373, 2396), 'numpy.load', 'np.load', (['traj_file_path'], {}), '(traj_file_path)\n', (2380, 2396), True, 'import numpy as np\n'), ((2661, 2686), 'os.remove', 'os.remove', (['traj_file_path'], {}), '(traj_file_path)\n', (2670, 2686), False, 'import os\n'), ((8716, 8743), 'os.path.join', 'os.path.join', (['tmpdir', '"""ray"""'], {}), "(tmpdir, 'ray')\n", (8728, 8743), False, 'import os\n'), ((8335, 8362), 'os.path.join', 'os.path.join', (['tmpdir', '"""ray"""'], {}), "(tmpdir, 'ray')\n", (8347, 8362), False, 'import os\n')] |
"""
Compute the interaction matrix of a cascade GEQ
Parameters
----------
G : ndarray
linear gain at which the leakage is determined
c : float
gain factor at bandwidth (0.5 refers to db(G)/2)
wg : ndarray
command frequencies i.e. filter center frequencies (in rad/sample)
wc : ndarray
design frequencies (rad/sample) at which leakage is computed
bw : ndarray
bandwidth of filters in radians
Returns
-------
leak : ndarray
N by M matrix showing how the magnitude responses of the band filters leak to the design frequencies.
N is determined from the length of the array wc (number of design frequencies) whereas M is
determined from the length of wg (number of filters)
Notes
-----
Python reference to <NAME>.; <NAME>. The quest for the best graphic equalizer. In Proceedings of the International Conference
on Digital Audio Effects (DAFx-17), Edinburgh, UK, 5–9 September 2017; pp. 95–102
"""
import numpy as np
from scipy import signal
from functions.pareq import pareq
def interactionMatrix(G,c,wg,wc,bw):
M = len(wg)
N = len(wc)
leak = np.zeros([M,N])
Gdb = 20 * np.log10(G)
Gdbw = c * Gdb
Gw = 10 ** (Gdbw/20)
for m in range(M):
[num, den] = pareq(G[m],Gw[m],wg[m],bw[m])
w,h = signal.freqz(num, den, wc)
Gain = 20*np.log10(np.abs(h))/Gdb[m]
leak[m,:] = np.abs(Gain)
return leak | [
"numpy.abs",
"numpy.log10",
"functions.pareq.pareq",
"numpy.zeros",
"scipy.signal.freqz"
] | [((1201, 1217), 'numpy.zeros', 'np.zeros', (['[M, N]'], {}), '([M, N])\n', (1209, 1217), True, 'import numpy as np\n'), ((1233, 1244), 'numpy.log10', 'np.log10', (['G'], {}), '(G)\n', (1241, 1244), True, 'import numpy as np\n'), ((1338, 1370), 'functions.pareq.pareq', 'pareq', (['G[m]', 'Gw[m]', 'wg[m]', 'bw[m]'], {}), '(G[m], Gw[m], wg[m], bw[m])\n', (1343, 1370), False, 'from functions.pareq import pareq\n'), ((1382, 1408), 'scipy.signal.freqz', 'signal.freqz', (['num', 'den', 'wc'], {}), '(num, den, wc)\n', (1394, 1408), False, 'from scipy import signal\n'), ((1474, 1486), 'numpy.abs', 'np.abs', (['Gain'], {}), '(Gain)\n', (1480, 1486), True, 'import numpy as np\n'), ((1436, 1445), 'numpy.abs', 'np.abs', (['h'], {}), '(h)\n', (1442, 1445), True, 'import numpy as np\n')] |
from base.base_train import BaseTrain
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from time import sleep
import time
class GANTrainer(BaseTrain):
def __init__(self, sess, model, data, config, summarizer):
super(GANTrainer, self).__init__(sess, model, data, config, summarizer)
def train_epoch(self):
"""
implement the logic of epoch:
-loop on the number of iterations in the config and call the train step
-add any summaries you want using the summary
"""
# Attach the epoch loop to a variable
loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))
# Define the lists for summaries and losses
gen_losses = []
disc_losses = []
summaries = []
# Get the current epoch counter
cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)
self.sess.run(self.data.iterator.initializer)
image = self.data.image
for _ in loop:
loop.set_description("Epoch:{}".format(cur_epoch + 1))
loop.refresh() # to show immediately the update
sleep(0.01)
gen_loss, disc_loss, summary = self.train_step(image, cur_epoch=cur_epoch)
gen_losses.append(gen_loss)
disc_losses.append(disc_loss)
summaries.append(summary)
# write the summaries
self.summarizer.add_tensorboard(cur_epoch, summaries=summaries)
# Compute the means of the losses
gen_loss_m = np.mean(gen_losses)
disc_loss_m = np.mean(disc_losses)
# Generate images between epochs to evaluate
if cur_epoch % self.config.log.frequency_test == 0:
noise = np.random.normal(
loc=0.0,
scale=1.0,
size=[self.config.data_loader.test_batch, self.config.trainer.noise_dim],
)
image_eval = self.sess.run(image)
feed_dict = {
self.model.image_input: image_eval,
self.model.sample_tensor: noise,
self.model.is_training: False,
}
reconstruction = self.sess.run(self.model.summary_image, feed_dict=feed_dict)
self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction])
if cur_epoch % self.config.log.show_steps == 0 or cur_epoch == 1:
self.logger.info(
"Epoch {}, Generator Loss: {}, Discriminator Loss: {}".format(
cur_epoch + 1, gen_loss_m, disc_loss_m
)
)
self.model.save(self.sess)
def train_step(self, image, cur_epoch):
# Generate noise from uniform distribution between -1 and 1
# New Noise Generation
# noise = np.random.uniform(-1., 1.,size=[self.config.batch_size, self.config.noise_dim])
noise = np.random.normal(
loc=0.0,
scale=1.0,
size=[self.config.data_loader.batch_size, self.config.trainer.noise_dim],
)
true_labels, generated_labels = self.generate_labels(
self.config.trainer.soft_labels, self.config.trainer.flip_labels
)
# Instance noise additions
real_noise, fake_noise = self.generate_noise(self.config.trainer.include_noise, cur_epoch)
# Evaluation of the image
image_eval = self.sess.run(image)
# Construct the Feed Dictionary
# Train the Discriminator on both real and fake images
feed_dict = {
self.model.noise_tensor: noise,
self.model.image_input: image_eval,
self.model.true_labels: true_labels,
self.model.generated_labels: generated_labels,
self.model.real_noise: real_noise,
self.model.fake_noise: fake_noise,
self.model.is_training: True,
}
_, disc_loss = self.sess.run(
[self.model.train_disc, self.model.total_disc_loss], feed_dict=feed_dict
)
# Train the Generator and get the summaries
# Re create the noise for the generator
noise = np.random.normal(
loc=0.0,
scale=1.0,
size=[self.config.data_loader.batch_size, self.config.trainer.noise_dim],
)
real_noise, fake_noise = self.generate_noise(self.config.trainer.include_noise, cur_epoch)
true_labels, generated_labels = self.generate_labels(
self.config.trainer.soft_labels, self.config.trainer.flip_labels
)
feed_dict = {
self.model.noise_tensor: noise,
self.model.image_input: image_eval,
self.model.true_labels: true_labels,
self.model.generated_labels: generated_labels,
self.model.real_noise: real_noise,
self.model.fake_noise: fake_noise,
self.model.is_training: True,
}
_, gen_loss = self.sess.run(
[self.model.train_gen, self.model.total_gen_loss], feed_dict=feed_dict
)
if self.config.log.enable_summary:
sm = self.sess.run(self.model.summary_all, feed_dict=feed_dict)
else:
sm = None
return gen_loss, disc_loss, sm
def generate_labels(self, soft_labels, flip_labels):
if not soft_labels:
true_labels = np.ones((self.config.data_loader.batch_size, 1))
generated_labels = np.zeros((self.config.data_loader.batch_size, 1))
else:
generated_labels = np.zeros(
(self.config.data_loader.batch_size, 1)
) + np.random.uniform(low=0.0, high=0.1, size=[self.config.data_loader.batch_size, 1])
flipped_idx = np.random.choice(
np.arange(len(generated_labels)),
size=int(self.config.trainer.noise_probability * len(generated_labels)),
)
generated_labels[flipped_idx] = 1 - generated_labels[flipped_idx]
true_labels = np.ones((self.config.data_loader.batch_size, 1)) - np.random.uniform(
low=0.0, high=0.1, size=[self.config.data_loader.batch_size, 1]
)
flipped_idx = np.random.choice(
np.arange(len(true_labels)),
size=int(self.config.trainer.noise_probability * len(true_labels)),
)
true_labels[flipped_idx] = 1 - true_labels[flipped_idx]
if flip_labels:
return generated_labels, true_labels
else:
return true_labels, generated_labels
def generate_noise(self, include_noise, cur_epoch):
sigma = max(0.75 * (10.0 - cur_epoch) / (10), 0.05)
if include_noise:
# If we want to add this is will add the noises
real_noise = np.random.normal(
scale=sigma,
size=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,
)
fake_noise = np.random.normal(
scale=sigma,
size=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,
)
else:
# Otherwise we are just going to add zeros which will not break anything
real_noise = np.zeros(
([self.config.data_loader.batch_size] + self.config.trainer.image_dims)
)
fake_noise = np.zeros(
([self.config.data_loader.batch_size] + self.config.trainer.image_dims)
)
return real_noise, fake_noise
| [
"numpy.random.normal",
"numpy.mean",
"numpy.ones",
"time.sleep",
"numpy.zeros",
"numpy.random.uniform"
] | [((1512, 1531), 'numpy.mean', 'np.mean', (['gen_losses'], {}), '(gen_losses)\n', (1519, 1531), True, 'import numpy as np\n'), ((1554, 1574), 'numpy.mean', 'np.mean', (['disc_losses'], {}), '(disc_losses)\n', (1561, 1574), True, 'import numpy as np\n'), ((2864, 2979), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '[self.config.data_loader.batch_size, self.config.trainer.noise_dim]'}), '(loc=0.0, scale=1.0, size=[self.config.data_loader.\n batch_size, self.config.trainer.noise_dim])\n', (2880, 2979), True, 'import numpy as np\n'), ((4101, 4216), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '[self.config.data_loader.batch_size, self.config.trainer.noise_dim]'}), '(loc=0.0, scale=1.0, size=[self.config.data_loader.\n batch_size, self.config.trainer.noise_dim])\n', (4117, 4216), True, 'import numpy as np\n'), ((1128, 1139), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (1133, 1139), False, 'from time import sleep\n'), ((1708, 1823), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '[self.config.data_loader.test_batch, self.config.trainer.noise_dim]'}), '(loc=0.0, scale=1.0, size=[self.config.data_loader.\n test_batch, self.config.trainer.noise_dim])\n', (1724, 1823), True, 'import numpy as np\n'), ((5313, 5361), 'numpy.ones', 'np.ones', (['(self.config.data_loader.batch_size, 1)'], {}), '((self.config.data_loader.batch_size, 1))\n', (5320, 5361), True, 'import numpy as np\n'), ((5393, 5442), 'numpy.zeros', 'np.zeros', (['(self.config.data_loader.batch_size, 1)'], {}), '((self.config.data_loader.batch_size, 1))\n', (5401, 5442), True, 'import numpy as np\n'), ((6738, 6847), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma', 'size': '([self.config.data_loader.batch_size] + self.config.trainer.image_dims)'}), '(scale=sigma, size=[self.config.data_loader.batch_size] +\n self.config.trainer.image_dims)\n', (6754, 6847), True, 'import numpy as np\n'), ((6916, 7025), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma', 'size': '([self.config.data_loader.batch_size] + self.config.trainer.image_dims)'}), '(scale=sigma, size=[self.config.data_loader.batch_size] +\n self.config.trainer.image_dims)\n', (6932, 7025), True, 'import numpy as np\n'), ((7193, 7272), 'numpy.zeros', 'np.zeros', (['([self.config.data_loader.batch_size] + self.config.trainer.image_dims)'], {}), '([self.config.data_loader.batch_size] + self.config.trainer.image_dims)\n', (7201, 7272), True, 'import numpy as np\n'), ((7330, 7409), 'numpy.zeros', 'np.zeros', (['([self.config.data_loader.batch_size] + self.config.trainer.image_dims)'], {}), '([self.config.data_loader.batch_size] + self.config.trainer.image_dims)\n', (7338, 7409), True, 'import numpy as np\n'), ((5488, 5537), 'numpy.zeros', 'np.zeros', (['(self.config.data_loader.batch_size, 1)'], {}), '((self.config.data_loader.batch_size, 1))\n', (5496, 5537), True, 'import numpy as np\n'), ((5570, 5657), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(0.1)', 'size': '[self.config.data_loader.batch_size, 1]'}), '(low=0.0, high=0.1, size=[self.config.data_loader.\n batch_size, 1])\n', (5587, 5657), True, 'import numpy as np\n'), ((5954, 6002), 'numpy.ones', 'np.ones', (['(self.config.data_loader.batch_size, 1)'], {}), '((self.config.data_loader.batch_size, 1))\n', (5961, 6002), True, 'import numpy as np\n'), ((6005, 6092), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(0.1)', 'size': '[self.config.data_loader.batch_size, 1]'}), '(low=0.0, high=0.1, size=[self.config.data_loader.\n batch_size, 1])\n', (6022, 6092), True, 'import numpy as np\n')] |
"""Integration test for cache infra"""
from unittest import TestCase
import numpy as np
import shutil
import pathlib
import time
from cmlkit.engine import Component
from cmlkit.engine.data import Data
tmpdir = pathlib.Path(__file__).parent / "tmp_test_engine_cache"
tmpdir.mkdir(exist_ok=True)
class DummyComponent1(Component):
kind = "dummy123"
def __init__(self, a=1.0, context={}):
super().__init__(context=context)
self.a = a
def _get_config(self):
return {"a": self.a}
def __call__(self, x):
result = self.cache.get_if_cached(x.id)
if result is None:
result = Data.result(self, x, data={"y": self.compute(x.data["x"])})
self.cache.submit(x.id, result)
return result
def compute(self, x):
time.sleep(0.1)
return x * self.a
class TestEngineCache(TestCase):
def setUp(self):
self.tmpdir = tmpdir
self.tmpdir.mkdir(exist_ok=True)
self.component = DummyComponent1(a=2.0)
self.input = Data.create(data={"x": np.ones(3)})
self.output = Data.create(
data={"y": np.ones(3) * 2},
history=[self.input.history[0], self.component.get_hid()],
)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_nop_by_default(self):
# does it return the correct result, and does it not go faster?
start = time.monotonic()
result = self.component(self.input)
self.assertGreater(time.monotonic() - start, 0.1)
print(result.history)
print(self.output.history)
self.assertEqual(
result.get_config_hash(), self.output.get_config_hash()
)
start = time.monotonic()
result = self.component(self.input)
self.assertGreater(time.monotonic() - start, 0.1)
self.assertEqual(
result.get_config_hash(), self.output.get_config_hash()
)
def test_faster_with_diskcache(self):
# does it return correct results,
# and get faster?!
component = DummyComponent1(
a=2.0, context={"cache": {"disk": {"location": self.tmpdir}}}
)
start = time.monotonic()
result = component(self.input)
duration = time.monotonic() - start
self.assertGreater(duration, 0.1)
self.assertEqual(
result.get_config_hash(), self.output.get_config_hash()
)
start = time.monotonic()
result = component(self.input)
self.assertGreater(duration, time.monotonic() - start)
self.assertEqual(
result.get_config_hash(), self.output.get_config_hash()
)
| [
"numpy.ones",
"pathlib.Path",
"time.monotonic",
"time.sleep",
"shutil.rmtree"
] | [((214, 236), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (226, 236), False, 'import pathlib\n'), ((806, 821), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (816, 821), False, 'import time\n'), ((1269, 1295), 'shutil.rmtree', 'shutil.rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (1282, 1295), False, 'import shutil\n'), ((1421, 1437), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1435, 1437), False, 'import time\n'), ((1726, 1742), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1740, 1742), False, 'import time\n'), ((2200, 2216), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2214, 2216), False, 'import time\n'), ((2463, 2479), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2477, 2479), False, 'import time\n'), ((2275, 2291), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2289, 2291), False, 'import time\n'), ((1509, 1525), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1523, 1525), False, 'import time\n'), ((1814, 1830), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1828, 1830), False, 'import time\n'), ((2556, 2572), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2570, 2572), False, 'import time\n'), ((1067, 1077), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1074, 1077), True, 'import numpy as np\n'), ((1138, 1148), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1145, 1148), True, 'import numpy as np\n')] |
import os
import numpy as np
import cv2
from paddle.fluid.io import Dataset
class LaneDataSet(Dataset):
def __init__(self, dataset_path, data_list='train', transform=None, is_val=False):
self.img = os.listdir(os.path.join(dataset_path, data_list))
self.is_val = is_val
self.is_testing = 'test' in data_list
if not self.is_testing:
self.sky = ['10011130', '10010014', '10024306', '10010116', '10008480', '10016709', '10016688', '10012704',
'10016634', '10010679', '10024403', '10013078', '10010443', '10016355', '10014527', '10020544']
print("{} images loaded.".format(len(self.img)))
self.img_list = [os.path.join(dataset_path, data_list, x) for x in self.img]
if 'train' in data_list:
self.label_list = [x.replace(data_list, 'train_label').replace('jpg', 'png') for x in self.img_list]
self.transform = transform
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
image = cv2.imread(self.img_list[idx])
# im_copy = np.copy(image)
size = image.shape
if not self.is_testing:
label = cv2.imread(self.label_list[idx], cv2.IMREAD_UNCHANGED)
if not self.is_val:
crop_height = int(size[0] * 1 / 3)
if self.img[idx][:8] in self.sky:
# h = np.random.randint(crop_height + 1)
# image = image[h:h + size[0] - crop_height]
# label = label[h:h + size[0] - crop_height]
image = image[:(size[0] - crop_height)]
label = label[:(size[0] - crop_height)]
else:
image = image[crop_height:]
label = label[crop_height:]
if self.transform:
if self.is_testing:
for transform in self.transform:
image = transform(image)
# import matplotlib.pyplot as plt
# image += np.array([103.939, 116.779, 123.68])
# image = image[:, :, ::-1].astype(np.uint8)
# plt.imshow(image)
# plt.show()
return np.transpose(image, (2, 0, 1)).astype('float32'), self.img[idx], size
else:
for transform in self.transform:
image, label = transform((image, label))
# if (label == 17).any() or (label == 16).any() or (label == 9).any() or (label == 10).any():
# import matplotlib.pyplot as plt
# image += np.array([103.939, 116.779, 123.68])
# image = image[:, :, ::-1].astype(np.uint8)
# plt.imshow(im_copy[:, :, ::-1].astype(np.uint8))
# plt.show()
# plt.imshow(image)
# plt.show()
# plt.imshow((label * 10).astype(np.uint8))
# plt.show()
return np.transpose(image, (2, 0, 1)).astype('float32'), label.astype('int64')
def collate_fn(batch):
img = [x[0] for x in batch]
name = np.array([int(x[1].replace('.jpg', '')) for x in batch])
size = np.array([x[2] for x in batch])
img = np.stack(img, axis=0)
return [img, name, size]
| [
"os.path.join",
"numpy.array",
"numpy.stack",
"numpy.transpose",
"cv2.imread"
] | [((3136, 3167), 'numpy.array', 'np.array', (['[x[2] for x in batch]'], {}), '([x[2] for x in batch])\n', (3144, 3167), True, 'import numpy as np\n'), ((3178, 3199), 'numpy.stack', 'np.stack', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (3186, 3199), True, 'import numpy as np\n'), ((1040, 1070), 'cv2.imread', 'cv2.imread', (['self.img_list[idx]'], {}), '(self.img_list[idx])\n', (1050, 1070), False, 'import cv2\n'), ((223, 260), 'os.path.join', 'os.path.join', (['dataset_path', 'data_list'], {}), '(dataset_path, data_list)\n', (235, 260), False, 'import os\n'), ((691, 731), 'os.path.join', 'os.path.join', (['dataset_path', 'data_list', 'x'], {}), '(dataset_path, data_list, x)\n', (703, 731), False, 'import os\n'), ((1185, 1239), 'cv2.imread', 'cv2.imread', (['self.label_list[idx]', 'cv2.IMREAD_UNCHANGED'], {}), '(self.label_list[idx], cv2.IMREAD_UNCHANGED)\n', (1195, 1239), False, 'import cv2\n'), ((2928, 2958), 'numpy.transpose', 'np.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (2940, 2958), True, 'import numpy as np\n'), ((2219, 2249), 'numpy.transpose', 'np.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (2231, 2249), True, 'import numpy as np\n')] |
# This script does the replication of [B&M 2011] component of the demoforestation paper
# Import required modules
import numpy as np
import pandas as pd
import statsmodels.api as stats
from matplotlib import pyplot as plt
from ToTeX import restab
# Reading in the data
data = pd.read_csv('C:/Users/User/Documents/Data/demoforestation.csv')
# (1) Replicating Figure 1
# Structuring dataframes
Yf1 = data['Rate_9000']
Xf11 = stats.add_constant(data['Democracy(20)_90'])
Xf12 = stats.add_constant(data[['Democracy(20)_90', 'Democracy(20)_90_2']])
f1m1 = stats.OLS(Yf1,Xf11)
f1m2 = stats.OLS(Yf1,Xf12)
f1r1 = f1m1.fit()
print(f1r1.summary())
file = open('C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_1_model_1.txt', 'w')
file.write(f1r1.summary().as_text())
file.close()
f1r2 = f1m2.fit()
print(f1r2.summary())
file = open('C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_1_model_2.txt', 'w')
file.write(f1r2.summary().as_text())
file.close()
# Recreating the plot
plt.figure()
plt.scatter(data['Democracy(20)_90'], data['Rate_9000'], s = 40)
plt.xlabel('Democracy Index')
plt.ylabel('Deforestation Rate')
plt.ylim(-15.5,10.5)
plt.xlim(-10.5,10.5)
basis = [i/10 for i in range(-120,121)]
l1 = [0.0741 + 0.0041*(i/10) for i in range(-120,121)]
l2 = [0.9628 + 0.0480*(i/10) - 0.0220*(i/10)**2 for i in range(-120,121)]
plt.plot(basis, l1, 'k-', linewidth = 4)
plt.plot(basis, l2, 'r-', linewidth = 4)
plt.savefig('C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_1.eps')
# (2) Replicating the 7 regression models
df1 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2']].dropna()
df2 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'CCI_90']].dropna()
df3 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land']].dropna()
df4 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90']].dropna()
df5 = data[['Rate_9000', 'Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']].dropna()
df6 = data[['Rate_9000', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']].dropna()
df7 = data[['Rate_9000', 'GDP_cap_90', 'GDP_cap_90_2']].dropna()
X1 = stats.add_constant(df1[['Democracy(20)_90', 'Democracy(20)_90_2']])
X2 = stats.add_constant(df2[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'CCI_90']])
X3 = stats.add_constant(df3[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land']])
X4 = stats.add_constant(df4[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90']])
X5 = stats.add_constant(df5[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']])
X6 = stats.add_constant(df6[['Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']])
X7 = stats.add_constant(df7[['GDP_cap_90', 'GDP_cap_90_2']])
mod1 = stats.OLS(df1['Rate_9000'],X1)
mod2 = stats.OLS(df2['Rate_9000'],X2)
mod3 = stats.OLS(df3['Rate_9000'],X3)
mod4 = stats.OLS(df4['Rate_9000'],X4)
mod5 = stats.OLS(df5['Rate_9000'],X5)
mod6 = stats.OLS(df6['Rate_9000'],X6)
mod7 = stats.OLS(df7['Rate_9000'],X7)
mods = [mod1, mod2, mod3, mod4, mod5, mod6, mod7]
res_list = []
for mod in mods:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Demoforestation/Replication/Model_' + str(mods.index(mod)+1) + '.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Demoforestation/Replication/restab_replication.txt')
# (3) Replicating the cluster analyses
# Recreate the statistics in Table (3) in the original paper
# Record group level statistics
Type6 = pd.DataFrame(np.zeros((2,6)), columns = data.Type6.unique(), index = ['Democracy', 'Rate_9000'])
Type3 = pd.DataFrame(np.zeros((2,3)), columns = data.Type3.unique(), index = ['Democracy', 'Rate_9000'])
for c in Type6.columns:
df = data[data['Type6'] == c]
Type6[c]['Democracy'] = np.mean(df['Democracy(20)_90'])
Type6[c]['Rate_9000'] = np.mean(df['Rate_9000'])
for c in Type3.columns:
df = data[data['Type3'] == c]
Type3[c]['Democracy'] = np.mean(df['Democracy(20)_90'])
Type3[c]['Rate_9000'] = np.mean(df['Rate_9000'])
# Create scatter plots from these data frames
Type6labs = ['DEM-W', 'AR', 'DEM-S', 'TM', 'RDP', 'TOT']
plt.figure()
plt.scatter(Type6.iloc[0], Type6.iloc[1], c = 'k', s = 60)
v = [4,0,4,1.5,2,-1]
for idx, lab in enumerate(Type6labs):
plt.annotate(lab, (Type6.iloc[0][idx]-.25*v[idx], Type6.iloc[1][idx]-.5))
plt.xlabel('Democracy Index')
plt.ylabel('Deforestation Rate')
Type6b = Type6[['Traditional Monarchy', 'Totalitarian Regime', 'Authoritarian', 'Restricted Democratic Practice', 'Weak Democracy', 'Strong Democracy']]
plt.plot(Type6b.iloc[0], Type6b.iloc[1], 'k--')
plt.xlim(-9.5,10.5)
plt.ylim(-4.7,1.8)
plt.savefig('C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_2a.eps')
plt.figure()
plt.scatter(Type3.iloc[0], Type3.iloc[1], c = 'k', s = 60)
for idx, lab in enumerate(Type3.columns):
plt.annotate(lab, (Type3.iloc[0][idx]-.4, Type3.iloc[1][idx]-.067))
plt.xlabel('Democracy Index')
plt.ylabel('Deforestation Rate')
l3a = [0.46939 + 0.02778*(i/100) - 0.01203*(i/100)**2 for i in range(-625,35)]
l3b = [0.47038 - 0.00821*(i/100)**2 for i in range(35,833)]
plt.plot([(i/100) for i in range(-625,35)], l3a, 'k--')
plt.plot([(i/100) for i in range(35,833)], l3b, 'k--')
plt.xlim(-7.25,9.25)
plt.ylim(-0.3,0.55)
plt.savefig('C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_2b.eps')
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"ToTeX.restab",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.figure",
"statsmodels.api.add_constant",
"numpy.zeros",
"matplotlib.pyplot.scatter",
... | [((292, 355), 'pandas.read_csv', 'pd.read_csv', (['"""C:/Users/User/Documents/Data/demoforestation.csv"""'], {}), "('C:/Users/User/Documents/Data/demoforestation.csv')\n", (303, 355), True, 'import pandas as pd\n'), ((451, 495), 'statsmodels.api.add_constant', 'stats.add_constant', (["data['Democracy(20)_90']"], {}), "(data['Democracy(20)_90'])\n", (469, 495), True, 'import statsmodels.api as stats\n'), ((504, 572), 'statsmodels.api.add_constant', 'stats.add_constant', (["data[['Democracy(20)_90', 'Democracy(20)_90_2']]"], {}), "(data[['Democracy(20)_90', 'Democracy(20)_90_2']])\n", (522, 572), True, 'import statsmodels.api as stats\n'), ((583, 603), 'statsmodels.api.OLS', 'stats.OLS', (['Yf1', 'Xf11'], {}), '(Yf1, Xf11)\n', (592, 603), True, 'import statsmodels.api as stats\n'), ((611, 631), 'statsmodels.api.OLS', 'stats.OLS', (['Yf1', 'Xf12'], {}), '(Yf1, Xf12)\n', (620, 631), True, 'import statsmodels.api as stats\n'), ((1049, 1061), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1059, 1061), True, 'from matplotlib import pyplot as plt\n'), ((1063, 1125), 'matplotlib.pyplot.scatter', 'plt.scatter', (["data['Democracy(20)_90']", "data['Rate_9000']"], {'s': '(40)'}), "(data['Democracy(20)_90'], data['Rate_9000'], s=40)\n", (1074, 1125), True, 'from matplotlib import pyplot as plt\n'), ((1129, 1158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Democracy Index"""'], {}), "('Democracy Index')\n", (1139, 1158), True, 'from matplotlib import pyplot as plt\n'), ((1160, 1192), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Deforestation Rate"""'], {}), "('Deforestation Rate')\n", (1170, 1192), True, 'from matplotlib import pyplot as plt\n'), ((1194, 1215), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-15.5)', '(10.5)'], {}), '(-15.5, 10.5)\n', (1202, 1215), True, 'from matplotlib import pyplot as plt\n'), ((1216, 1237), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-10.5)', '(10.5)'], {}), '(-10.5, 10.5)\n', (1224, 1237), True, 'from matplotlib import pyplot as plt\n'), ((1410, 1448), 'matplotlib.pyplot.plot', 'plt.plot', (['basis', 'l1', '"""k-"""'], {'linewidth': '(4)'}), "(basis, l1, 'k-', linewidth=4)\n", (1418, 1448), True, 'from matplotlib import pyplot as plt\n'), ((1452, 1490), 'matplotlib.pyplot.plot', 'plt.plot', (['basis', 'l2', '"""r-"""'], {'linewidth': '(4)'}), "(basis, l2, 'r-', linewidth=4)\n", (1460, 1490), True, 'from matplotlib import pyplot as plt\n'), ((1494, 1583), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_1.eps"""'], {}), "(\n 'C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_1.eps')\n", (1505, 1583), True, 'from matplotlib import pyplot as plt\n'), ((2460, 2527), 'statsmodels.api.add_constant', 'stats.add_constant', (["df1[['Democracy(20)_90', 'Democracy(20)_90_2']]"], {}), "(df1[['Democracy(20)_90', 'Democracy(20)_90_2']])\n", (2478, 2527), True, 'import statsmodels.api as stats\n'), ((2534, 2665), 'statsmodels.api.add_constant', 'stats.add_constant', (["df2[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90',\n 'Rural_Population_90', 'Ln_Land', 'CCI_90']]"], {}), "(df2[['Democracy(20)_90', 'Democracy(20)_90_2',\n 'Education_90', 'Rural_Population_90', 'Ln_Land', 'CCI_90']])\n", (2552, 2665), True, 'import statsmodels.api as stats\n'), ((2668, 2789), 'statsmodels.api.add_constant', 'stats.add_constant', (["df3[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90',\n 'Rural_Population_90', 'Ln_Land']]"], {}), "(df3[['Democracy(20)_90', 'Democracy(20)_90_2',\n 'Education_90', 'Rural_Population_90', 'Ln_Land']])\n", (2686, 2789), True, 'import statsmodels.api as stats\n'), ((2792, 2927), 'statsmodels.api.add_constant', 'stats.add_constant', (["df4[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90',\n 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90']]"], {}), "(df4[['Democracy(20)_90', 'Democracy(20)_90_2',\n 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90']])\n", (2810, 2927), True, 'import statsmodels.api as stats\n'), ((2930, 3085), 'statsmodels.api.add_constant', 'stats.add_constant', (["df5[['Democracy(20)_90', 'Democracy(20)_90_2', 'Education_90',\n 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90', 'GDP_cap_90_2']]"], {}), "(df5[['Democracy(20)_90', 'Democracy(20)_90_2',\n 'Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90',\n 'GDP_cap_90_2']])\n", (2948, 3085), True, 'import statsmodels.api as stats\n'), ((3084, 3193), 'statsmodels.api.add_constant', 'stats.add_constant', (["df6[['Education_90', 'Rural_Population_90', 'Ln_Land', 'GDP_cap_90',\n 'GDP_cap_90_2']]"], {}), "(df6[['Education_90', 'Rural_Population_90', 'Ln_Land',\n 'GDP_cap_90', 'GDP_cap_90_2']])\n", (3102, 3193), True, 'import statsmodels.api as stats\n'), ((3196, 3251), 'statsmodels.api.add_constant', 'stats.add_constant', (["df7[['GDP_cap_90', 'GDP_cap_90_2']]"], {}), "(df7[['GDP_cap_90', 'GDP_cap_90_2']])\n", (3214, 3251), True, 'import statsmodels.api as stats\n'), ((3262, 3293), 'statsmodels.api.OLS', 'stats.OLS', (["df1['Rate_9000']", 'X1'], {}), "(df1['Rate_9000'], X1)\n", (3271, 3293), True, 'import statsmodels.api as stats\n'), ((3301, 3332), 'statsmodels.api.OLS', 'stats.OLS', (["df2['Rate_9000']", 'X2'], {}), "(df2['Rate_9000'], X2)\n", (3310, 3332), True, 'import statsmodels.api as stats\n'), ((3340, 3371), 'statsmodels.api.OLS', 'stats.OLS', (["df3['Rate_9000']", 'X3'], {}), "(df3['Rate_9000'], X3)\n", (3349, 3371), True, 'import statsmodels.api as stats\n'), ((3379, 3410), 'statsmodels.api.OLS', 'stats.OLS', (["df4['Rate_9000']", 'X4'], {}), "(df4['Rate_9000'], X4)\n", (3388, 3410), True, 'import statsmodels.api as stats\n'), ((3418, 3449), 'statsmodels.api.OLS', 'stats.OLS', (["df5['Rate_9000']", 'X5'], {}), "(df5['Rate_9000'], X5)\n", (3427, 3449), True, 'import statsmodels.api as stats\n'), ((3457, 3488), 'statsmodels.api.OLS', 'stats.OLS', (["df6['Rate_9000']", 'X6'], {}), "(df6['Rate_9000'], X6)\n", (3466, 3488), True, 'import statsmodels.api as stats\n'), ((3496, 3527), 'statsmodels.api.OLS', 'stats.OLS', (["df7['Rate_9000']", 'X7'], {}), "(df7['Rate_9000'], X7)\n", (3505, 3527), True, 'import statsmodels.api as stats\n'), ((3899, 4007), 'ToTeX.restab', 'restab', (['res_list', '"""C:/Users/User/Documents/Data/Demoforestation/Replication/restab_replication.txt"""'], {}), "(res_list,\n 'C:/Users/User/Documents/Data/Demoforestation/Replication/restab_replication.txt'\n )\n", (3905, 4007), False, 'from ToTeX import restab\n'), ((4830, 4842), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4840, 4842), True, 'from matplotlib import pyplot as plt\n'), ((4844, 4898), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Type6.iloc[0]', 'Type6.iloc[1]'], {'c': '"""k"""', 's': '(60)'}), "(Type6.iloc[0], Type6.iloc[1], c='k', s=60)\n", (4855, 4898), True, 'from matplotlib import pyplot as plt\n'), ((5054, 5083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Democracy Index"""'], {}), "('Democracy Index')\n", (5064, 5083), True, 'from matplotlib import pyplot as plt\n'), ((5085, 5117), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Deforestation Rate"""'], {}), "('Deforestation Rate')\n", (5095, 5117), True, 'from matplotlib import pyplot as plt\n'), ((5273, 5320), 'matplotlib.pyplot.plot', 'plt.plot', (['Type6b.iloc[0]', 'Type6b.iloc[1]', '"""k--"""'], {}), "(Type6b.iloc[0], Type6b.iloc[1], 'k--')\n", (5281, 5320), True, 'from matplotlib import pyplot as plt\n'), ((5322, 5342), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-9.5)', '(10.5)'], {}), '(-9.5, 10.5)\n', (5330, 5342), True, 'from matplotlib import pyplot as plt\n'), ((5343, 5362), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-4.7)', '(1.8)'], {}), '(-4.7, 1.8)\n', (5351, 5362), True, 'from matplotlib import pyplot as plt\n'), ((5363, 5453), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_2a.eps"""'], {}), "(\n 'C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_2a.eps')\n", (5374, 5453), True, 'from matplotlib import pyplot as plt\n'), ((5452, 5464), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5462, 5464), True, 'from matplotlib import pyplot as plt\n'), ((5466, 5520), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Type3.iloc[0]', 'Type3.iloc[1]'], {'c': '"""k"""', 's': '(60)'}), "(Type3.iloc[0], Type3.iloc[1], c='k', s=60)\n", (5477, 5520), True, 'from matplotlib import pyplot as plt\n'), ((5652, 5681), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Democracy Index"""'], {}), "('Democracy Index')\n", (5662, 5681), True, 'from matplotlib import pyplot as plt\n'), ((5683, 5715), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Deforestation Rate"""'], {}), "('Deforestation Rate')\n", (5693, 5715), True, 'from matplotlib import pyplot as plt\n'), ((5971, 5992), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-7.25)', '(9.25)'], {}), '(-7.25, 9.25)\n', (5979, 5992), True, 'from matplotlib import pyplot as plt\n'), ((5993, 6013), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.3)', '(0.55)'], {}), '(-0.3, 0.55)\n', (6001, 6013), True, 'from matplotlib import pyplot as plt\n'), ((6014, 6104), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_2b.eps"""'], {}), "(\n 'C:/Users/User/Documents/Data/Demoforestation/Replication/Figure_2b.eps')\n", (6025, 6104), True, 'from matplotlib import pyplot as plt\n'), ((4164, 4180), 'numpy.zeros', 'np.zeros', (['(2, 6)'], {}), '((2, 6))\n', (4172, 4180), True, 'import numpy as np\n'), ((4270, 4286), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (4278, 4286), True, 'import numpy as np\n'), ((4451, 4482), 'numpy.mean', 'np.mean', (["df['Democracy(20)_90']"], {}), "(df['Democracy(20)_90'])\n", (4458, 4482), True, 'import numpy as np\n'), ((4512, 4536), 'numpy.mean', 'np.mean', (["df['Rate_9000']"], {}), "(df['Rate_9000'])\n", (4519, 4536), True, 'import numpy as np\n'), ((4634, 4665), 'numpy.mean', 'np.mean', (["df['Democracy(20)_90']"], {}), "(df['Democracy(20)_90'])\n", (4641, 4665), True, 'import numpy as np\n'), ((4695, 4719), 'numpy.mean', 'np.mean', (["df['Rate_9000']"], {}), "(df['Rate_9000'])\n", (4702, 4719), True, 'import numpy as np\n'), ((4977, 5062), 'matplotlib.pyplot.annotate', 'plt.annotate', (['lab', '(Type6.iloc[0][idx] - 0.25 * v[idx], Type6.iloc[1][idx] - 0.5)'], {}), '(lab, (Type6.iloc[0][idx] - 0.25 * v[idx], Type6.iloc[1][idx] -\n 0.5))\n', (4989, 5062), True, 'from matplotlib import pyplot as plt\n'), ((5581, 5654), 'matplotlib.pyplot.annotate', 'plt.annotate', (['lab', '(Type3.iloc[0][idx] - 0.4, Type3.iloc[1][idx] - 0.067)'], {}), '(lab, (Type3.iloc[0][idx] - 0.4, Type3.iloc[1][idx] - 0.067))\n', (5593, 5654), True, 'from matplotlib import pyplot as plt\n')] |
# coding: utf8
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
from keras.layers import Input, Dense
from keras.optimizers import SGD
from keras.models import Model
X = random.uniform(0, 30, 100) # 随机生成在[0,30]区间内服从均匀分布的100个数
y = 1.85 * X + random.normal(0, 2, 100) # 对X乘以固定系数后加上随机扰动
plt.scatter(X, y)
plt.xlabel('X')
plt.ylabel('y')
input_shape = (1,)
input_tensor = Input(shape=input_shape)
predict = Dense(1, activation='linear', name='output')(input_tensor)
model = Model(inputs=input_tensor, outputs=predict)
print(model.summary())
model.compile(loss='mse', optimizer=SGD(lr=0.0001))
train_history = model.fit(X, y, validation_split=0.2,
epochs=100, batch_size=100, verbose=2)
plt.plot(train_history.history['loss'])
plt.plot(train_history.history['val_loss'])
plt.xlabel('epochs')
plt.ylabel('loss')
plt.title('model loss')
plt.legend(['train', 'val'], loc='upper right')
[w, b] = model.layers[1].get_weights()
print(w, b)
plt.scatter(X, y)
plt.xlabel('X')
plt.ylabel('y')
x1 = np.linspace(0, 30, 1000)
y1 = w[0][0] * x1 + b[0]
plt.plot(x1, y1, 'r')
| [
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.layers.Input",
"numpy.linspace",
"keras.optimizers.SGD",
"keras.models.Model",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"keras.layers.Dense",
"matplotlib.pyplot.title",
... | [((200, 226), 'numpy.random.uniform', 'random.uniform', (['(0)', '(30)', '(100)'], {}), '(0, 30, 100)\n', (214, 226), False, 'from numpy import random\n'), ((317, 334), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {}), '(X, y)\n', (328, 334), True, 'import matplotlib.pyplot as plt\n'), ((335, 350), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (345, 350), True, 'import matplotlib.pyplot as plt\n'), ((351, 366), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (361, 366), True, 'import matplotlib.pyplot as plt\n'), ((402, 426), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (407, 426), False, 'from keras.layers import Input, Dense\n'), ((504, 547), 'keras.models.Model', 'Model', ([], {'inputs': 'input_tensor', 'outputs': 'predict'}), '(inputs=input_tensor, outputs=predict)\n', (509, 547), False, 'from keras.models import Model\n'), ((744, 783), 'matplotlib.pyplot.plot', 'plt.plot', (["train_history.history['loss']"], {}), "(train_history.history['loss'])\n", (752, 783), True, 'import matplotlib.pyplot as plt\n'), ((784, 827), 'matplotlib.pyplot.plot', 'plt.plot', (["train_history.history['val_loss']"], {}), "(train_history.history['val_loss'])\n", (792, 827), True, 'import matplotlib.pyplot as plt\n'), ((828, 848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (838, 848), True, 'import matplotlib.pyplot as plt\n'), ((849, 867), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (859, 867), True, 'import matplotlib.pyplot as plt\n'), ((868, 891), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (877, 891), True, 'import matplotlib.pyplot as plt\n'), ((892, 939), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '"""upper right"""'}), "(['train', 'val'], loc='upper right')\n", (902, 939), True, 'import matplotlib.pyplot as plt\n'), ((993, 1010), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {}), '(X, y)\n', (1004, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1026), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1021, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1037, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1048, 1072), 'numpy.linspace', 'np.linspace', (['(0)', '(30)', '(1000)'], {}), '(0, 30, 1000)\n', (1059, 1072), True, 'import numpy as np\n'), ((1098, 1119), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1', '"""r"""'], {}), "(x1, y1, 'r')\n", (1106, 1119), True, 'import matplotlib.pyplot as plt\n'), ((272, 296), 'numpy.random.normal', 'random.normal', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (285, 296), False, 'from numpy import random\n'), ((437, 481), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""', 'name': '"""output"""'}), "(1, activation='linear', name='output')\n", (442, 481), False, 'from keras.layers import Input, Dense\n'), ((608, 622), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (611, 622), False, 'from keras.optimizers import SGD\n')] |
import numpy as np
import unittest
from algorithm_ncs.benchmark import benchmark_func
from algorithm_ncs.problem import load_problem
def load_test_data(file_path):
with open(file_path, "r") as f:
lines_data = f.readlines()
lines = []
for data in lines_data:
line = []
for d in data.split(" "):
if d != "":
line.append(float(d))
lines.append(line)
x = np.asarray(lines[0:10])
v = np.asarray(lines[10:]).reshape(10)
return x, v
def test_func(problem_index):
file_path = "datasets_ncs/test_data_func{}.txt".format(problem_index)
x, v = load_test_data(file_path)
para = load_problem(problem_index, 50)
res = benchmark_func(x, problem_index, para)
return v, res
class FuncTest(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_fun6(self):
v, res = test_func(6)
for i in range(len(res)):
self.assertTrue(abs((v[i]-res[i])/v[i]) < 0.001)
def test_fun12(self):
v, res = test_func(12)
for i in range(len(res)):
self.assertTrue(abs((v[i]-res[i])/v[i]) < 0.001)
def test_fun9(self):
v, res = test_func(9)
for i in range(len(res)):
self.assertTrue(abs((v[i]-res[i])/v[i]) < 0.001)
def test_fun10(self):
v, res = test_func(10)
for i in range(len(res)):
self.assertTrue(abs((v[i]-res[i])/v[i]) < 0.001)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"algorithm_ncs.benchmark.benchmark_func",
"numpy.asarray",
"algorithm_ncs.problem.load_problem"
] | [((457, 480), 'numpy.asarray', 'np.asarray', (['lines[0:10]'], {}), '(lines[0:10])\n', (467, 480), True, 'import numpy as np\n'), ((694, 725), 'algorithm_ncs.problem.load_problem', 'load_problem', (['problem_index', '(50)'], {}), '(problem_index, 50)\n', (706, 725), False, 'from algorithm_ncs.problem import load_problem\n'), ((736, 774), 'algorithm_ncs.benchmark.benchmark_func', 'benchmark_func', (['x', 'problem_index', 'para'], {}), '(x, problem_index, para)\n', (750, 774), False, 'from algorithm_ncs.benchmark import benchmark_func\n'), ((1563, 1578), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1576, 1578), False, 'import unittest\n'), ((489, 511), 'numpy.asarray', 'np.asarray', (['lines[10:]'], {}), '(lines[10:])\n', (499, 511), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
This module defines built-in evaluation functions for segmentation applications
Segmentations can be evaluated at several scales:
'foreground' refering to metrics computed once for a foreground label
'label' refering to metrics computed once for each label (including background)
'cc' referring to metrics computed once for each connected component set
Connected components are defined by one-or-more connected
components on the reference segmentation and one-or-more connected
components on the infered segmentation.
These sets are defined by a cc_func. Currently
this is hard coded to be union_of_seg_for_each_ref_cc which takes each
connected component on the reference segmentation and all connected
components on the infered segmentation with any overlap. This will
eventually be made into a factory option for different cc set definitions
Overlap and distance measures can be computed at each of these levels by
deriving from PerComponentEvaluation, which handles the logic of identifying
which comparisons need to be done for each scale.
Overlap and distance measures are computed in two convenience functions
(compute_many_overlap_metrics and compute_many_distance_metrics) and wrapped
by Evaluation classes
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from scipy import ndimage
from niftynet.evaluation.base_evaluations import CachedSubanalysisEvaluation
from niftynet.utilities.util_common import MorphologyOps, \
CachedFunction, CachedFunctionByID
from niftynet.evaluation.base_evaluator import ScalarAggregator
class PerComponentEvaluation(CachedSubanalysisEvaluation):
"""
This class represents evaluations performed on binary segmentation
components computed per label or per connected component. It encodes the
generation of evaluation tasks. Derived classes should define the
metric_name constant and the function metric_from_binarized()
"""
def subanalyses(self, subject_id, data):
analyses = self.app_param.evaluation_units.split(',')
tasks = []
for analysis in analyses:
if analysis in ['foreground', 'label']:
labels = list(range(self.app_param.num_classes))
if analysis == 'foreground':
labels.remove(0)
for label in labels:
tasks.append({'subject_id': subject_id, 'label': label})
elif analysis in ['cc']:
cc_seg, cc_ref = \
connected_components(data['inferred'], data['label'],
self.app_param.output_prob)
cc_func = union_of_seg_for_each_ref_cc # TODO make into factory
conncomps = cc_func(cc_seg, cc_ref)
for conncomp in conncomps:
tasks.append({'subject_id': subject_id,
'cc_labels': conncomps[conncomp]})
# TODO save an index image from blobs_ref[0]
return tasks
def layer_op(self, subject_id, data, task):
# We use a cached binarizer function so that the binarized
# segmentation have the same python id
if 'label' in task:
binarizer = cached_label_binarizer(task['label'],
self.app_param.output_prob)
seg, ref = binarizer(data)
metric_dict = {'subject_id': subject_id, 'label': task['label']}
metric_dict.update(self.metric_dict_from_binarized(seg, ref))
pdf = pd.DataFrame.from_records([metric_dict], ('subject_id', 'label'))
return [pdf]
elif 'cc_labels' in task:
binarizer = cached_cc_binarizer(task['cc_labels'],
self.app_param.output_prob)
seg, ref = binarizer(data)
r_str = '&'.join([str(l) for l in task['cc_labels'][1]])
s_str = '&'.join([str(l) for l in task['cc_labels'][0]])
cc_id = 'r%s_s%s' % (r_str, s_str)
metric_dict = {'subject_id': subject_id, 'cc_id': cc_id}
metric_dict.update(self.metric_dict_from_binarized(seg, ref))
pdf = pd.DataFrame.from_records([metric_dict], ('subject_id', 'cc_id'))
return [pdf]
return []
def metric_dict_from_binarized(self, seg, ref):
"""
Computes a metric from a binarized mask
:param seg: numpy array with binary mask from inferred segmentation
:param ref: numpy array with binary mask from reference segmentation
:return: a dictionary of metric_name:metric_value
"""
raise NotImplementedError('Not implemented in abstract base class')
class PerComponentScalarEvaluation(PerComponentEvaluation):
""" This class simplifies the implementation when the metric just returns a
single scalar with the same name as the class name"""
def __init__(self, *args, **kwargs):
super(PerComponentScalarEvaluation, self).__init__(*args,
**kwargs)
self.metric_name = self.__class__.__name__
def metric_dict_from_binarized(self, seg, ref):
""" Wrap computed metric in dictionary for parent class """
metric_value = self.metric_from_binarized(seg, ref)
return {self.metric_name: metric_value}
def metric_from_binarized(self, seg, ref):
"""
Computer scalar metric value
:param seg: numpy array with binary mask from inferred segmentation
:param ref: numpy array with binary mask from reference segmentation
:return: scalar metric value
"""
def get_aggregations(self):
aggregations = []
analyses = self.app_param.evaluation_units.split(',')
for analysis in analyses:
if analysis in ['foreground', 'label']:
mean_agg = ScalarAggregator(self.metric_name,
('subject_id', 'label'),
('label',), np.mean,
'mean_' + self.metric_name)
std_agg = ScalarAggregator(self.metric_name,
('subject_id', 'label'),
('label',), np.std,
'stdev_' + self.metric_name)
aggregations.extend([mean_agg, std_agg])
elif analysis in ['cc']:
pass
return aggregations
class BuiltinOverlapEvaluation(PerComponentScalarEvaluation):
"""
Wrapper class to encode many similar overlap metrics that can be computed
from a confusion matrix
Metrics computed in compute_many_overlap_metrics can be wrapped by
overriding self.metric_name
"""
def metric_from_binarized(self, seg, ref):
"""
Computes a metric from a binarized mask by computing a confusion
matrix and then delegating the metric computation
:param seg: numpy array with binary mask from inferred segmentation
:param ref: numpy array with binary mask from reference segmentation
:return: scalar metric value
"""
lnot = np.logical_not
land = np.logical_and
conf_mat = np.array([[np.sum(land(lnot(seg), lnot(ref))),
np.sum(land(lnot(seg), (ref)))],
[np.sum(land((seg), lnot(ref))),
np.sum(land((seg), (ref)))]])
return self.metric_from_confusion_matrix(conf_mat)
def metric_from_confusion_matrix(self, confusion_matrix):
"""
Compute metrics from a 2x2 confusion matrix
:param confusion_matrix: 2x2 numpy array
:return: scalar metric value
"""
#pylint: disable=missing-docstring,invalid-name
class n_pos_ref(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[0, 1] + M[1, 1]
class n_neg_ref(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[0, 0] + M[1, 0]
class n_pos_seg(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 0] + M[1, 1]
class n_neg_seg(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[0, 0] + M[0, 1]
class fp(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 0]
class fn(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[0, 1]
class tp(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 1]
class tn(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[0, 0]
class n_intersection(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 1]
class n_union(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[0, 1] + M[1, 0] + M[1, 1]
class specificity(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[0, 0] / (M[0, 0] + M[1, 0])
class sensitivity(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 1] / (M[0, 1] + M[1, 1])
class accuracy(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return (M[1, 1] + M[0, 0]) / sum(sum(M))
class false_positive_rate(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 0] / (M[0, 0] + M[1, 0])
class positive_predictive_values(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 1] / (M[1, 0] + M[1, 1])
class negative_predictive_values(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[0, 0] / (M[0, 0] + M[0, 1])
class dice(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return 2 * M[1, 1] / (M[1, 1] * 2 + M[1, 0] + M[0, 1])
Dice = dice
class jaccard(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 1] / (M[0, 1] + M[1, 0] + M[1, 1])
intersection_over_union = jaccard
Jaccard = jaccard
class informedness(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 1] / (M[0, 1] + M[1, 1]) + \
M[0, 0] / (M[0, 0] + M[1, 0]) - 1
class markedness(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return M[1, 1] / (M[1, 0] + M[1, 1]) + \
M[0, 0] / (M[0, 0] + M[0, 1]) - 1
class vol_diff(BuiltinOverlapEvaluation):
def metric_from_confusion_matrix(self, M):
return (M[1, 1] + M[1, 0]) / (M[0, 1] + M[1, 1])
# Distance metrics as e.g. in 10.3978/j.issn.2223-4292.2015.08.02
class average_distance(PerComponentScalarEvaluation):
def metric_from_binarized(self, seg, ref):
ref_border_dist, seg_border_dist = border_distance(seg, ref, 8)
border_ref, border_seg = borders(seg, ref, 8)
return (np.sum(ref_border_dist) + np.sum(
seg_border_dist)) / (np.sum(border_ref + border_seg))
class hausdorff_distance(PerComponentScalarEvaluation):
def metric_from_binarized(self, seg, ref):
ref_border_dist, seg_border_dist = border_distance(seg, ref, 8)
return np.max([np.max(ref_border_dist), np.max(seg_border_dist)])
class hausdorff95_distance(PerComponentScalarEvaluation):
def metric_from_binarized(self, seg, ref):
ref_border_dist, seg_border_dist = border_distance(seg, ref, 8)
border_ref, border_seg = borders(seg, ref, 8)
seg_values = ref_border_dist[border_seg > 0]
ref_values = seg_border_dist[border_ref > 0]
if seg_values.size == 0 or ref_values.size == 0:
return np.nan
return np.max([np.percentile(seg_values, 95),
np.percentile(ref_values, 95)])
#pylint: enable=missing-docstring,invalid-name
# Helper functions
@CachedFunction
def cached_label_binarizer(label, output_prob):
"""
This class returns a function for binarizing an inferred segmentation
for a specified label.
This function is carefully designed to allow caching of unhashable numpy
objects. Specifically, each call to cached_label_binarizer with the same
(by-value) parameters will return the same (by python id) function
object. This enables two calls to
cached_label_binarizer(...)(numpy_object_1)
with the same parameters from different metrics to use the cached result
:param label: Which label to make foreground in the binary mask
:param output_prob: Is the segmentation probabilistic (if so,
argmax is used first to compute a label map)
:return: a function for computing a binary label map
"""
@CachedFunctionByID
def binarizer(data):
"""
This function binarizes a segmentation based on a specified
label (defined by outer function)
:param data: a data dictionary as built by ImageReader
:return: a numpy array representing a binary label map
"""
if output_prob:
out = np.argmax(data['inferred'], -1)
else:
out = data['inferred']
return out == label, data['label']
return binarizer
@CachedFunction
def cached_cc_binarizer(cc_labels, output_prob):
"""
This class returns a function for binarizing inferred and reference
segmentations for a specified connected component set.
This function is carefully designed to allow caching of unhashable numpy
objects. Specifically, each call to cached_label_binarizer with the same
(by-value) parameters will return the same (by python id) function
object. This enables two calls to
cached_cc_binarizer(...)(numpy_object_1)
with the same parameters from different metrics to use the cached result
:param cc_labels: [seg_label_list, ref_label_list] where each is a
list of values to be considered foreground for this cc set
:param output_prob: Is the segmentation probabilistic (if so,
argmax is used first to compute a label map)
:return: a function for computing a binary label map pair
"""
@CachedFunctionByID
def binarizer(data):
"""
This function binarizes a multi-object segmentation and reference
into a specified connected component set (defined by outer function)
:param data: a data dictionary as built by ImageReader
:return: two numpy arrays representing binary masks (from
inferred and reference segmentations) for a connected component set
"""
cc_func = connected_components
cc_seg, cc_ref = cc_func(data['inferred'], data['label'], output_prob)
cc_seg_in = np.zeros_like(cc_seg[0])
cc_ref_in = np.zeros_like(cc_ref[0])
for i in cc_labels[0]:
cc_seg_in[cc_seg[0] == i] = 1
for i in cc_labels[1]:
cc_ref_in[cc_ref[0] == i] = 1
return cc_seg_in, cc_ref_in
return binarizer
def union_of_seg_for_each_ref_cc(blobs_seg, blobs_ref):
"""
Constructs connected component sets to compute metrics for. Each
reference connected component is paired with the union of inferred
segmentation connected components with any overlap
:param blobs_seg: tuple (numbered_cc_array, number_of_ccs)
:param blobs_ref: tuple (numbered_cc_array, number_of_ccs)
:return: dictionary {label:(ref_label_list, seg_label_list)}
"""
keys = {}
for cc_id in range(1, blobs_ref[1] + 1):
seg_idx = list(np.unique(blobs_seg[0][blobs_ref[0] == cc_id]))
if 0 in seg_idx:
seg_idx.remove(0)
key = 'r' + str(cc_id) + '_c' + '_'.join([str(s) for s in seg_idx])
keys[key] = ((cc_id,), tuple(seg_idx))
return keys
@CachedFunctionByID
def borders(seg, ref, neigh=8):
"""
This function determines the points that lie on the border of the
inferred and reference segmentations
:param seg: numpy array with binary mask from inferred segmentation
:param ref: numpy array with binary mask from reference segmentation
:param neigh: connectivity 4 or 8
:return: numpy arrays of reference and inferred segmentation borders
"""
border_ref = MorphologyOps(ref[:, :, :, 0, 0], neigh).border_map()
border_seg = MorphologyOps(seg[:, :, :, 0, 0], neigh).border_map()
return border_ref, border_seg
@CachedFunctionByID
def border_distance(seg, ref, neigh=8):
"""
This functions determines the distance at each seg border point to the
nearest ref border point and vice versa
:param seg: numpy array with binary mask from inferred segmentation
:param ref: numpy array with binary mask from reference segmentation
:param neigh: connectivity 4 or 8
:return: numpy arrays for distance_from_ref_border, distance_from
seg_border
"""
border_ref, border_seg = borders(seg, ref, neigh)
distance_ref = ndimage.distance_transform_edt(1 - border_ref)
distance_seg = ndimage.distance_transform_edt(1 - border_seg)
distance_border_seg = border_ref * distance_seg
distance_border_ref = border_seg * distance_ref
return distance_border_ref, distance_border_seg
@CachedFunctionByID
def connected_components(seg, ref, output_prob, neigh=8):
"""
Numbers connected components in the reference and inferred segmentations
:param seg: numpy array with binary mask from inferred segmentation
:param ref: numpy array with binary mask from reference segmentation
:param output_prob: Is the segmentation probabilistic (if so,
argmax is used first to compute a label map)
:param neigh: connectivity 4 or 8
:return: (cc_map_ref, count) numbered connected components from reference
:return: (cc_map_seg, count) numbered connected components from inferred
"""
if output_prob:
seg = np.argmax(seg, -1)
blobs_ref = MorphologyOps(ref[:, :, :, 0, 0], neigh).foreground_component()
blobs_seg = MorphologyOps(seg[:, :, :, 0, 0], neigh).foreground_component()
return (blobs_ref[0][:, :, :, np.newaxis, np.newaxis], blobs_ref[1]), \
(blobs_seg[0][:, :, :, np.newaxis, np.newaxis], blobs_seg[1]),
# TODO
# per subject connected component related metrics
# 'connected_elements': (self.connected_elements, 'TPc,FPc,FNc'),
# 'outline_error': (self.outline_error, 'OER,OEFP,OEFN'),
# 'detection_error': (self.detection_error, 'DE,DEFP,DEFN'),
# list_labels
# list connected components
# TODO image_map outputs
| [
"pandas.DataFrame.from_records",
"scipy.ndimage.distance_transform_edt",
"numpy.unique",
"niftynet.evaluation.base_evaluator.ScalarAggregator",
"niftynet.utilities.util_common.MorphologyOps",
"numpy.argmax",
"numpy.max",
"numpy.sum",
"numpy.percentile",
"numpy.zeros_like"
] | [((17202, 17248), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['(1 - border_ref)'], {}), '(1 - border_ref)\n', (17232, 17248), False, 'from scipy import ndimage\n'), ((17268, 17314), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['(1 - border_seg)'], {}), '(1 - border_seg)\n', (17298, 17314), False, 'from scipy import ndimage\n'), ((14994, 15018), 'numpy.zeros_like', 'np.zeros_like', (['cc_seg[0]'], {}), '(cc_seg[0])\n', (15007, 15018), True, 'import numpy as np\n'), ((15039, 15063), 'numpy.zeros_like', 'np.zeros_like', (['cc_ref[0]'], {}), '(cc_ref[0])\n', (15052, 15063), True, 'import numpy as np\n'), ((18131, 18149), 'numpy.argmax', 'np.argmax', (['seg', '(-1)'], {}), '(seg, -1)\n', (18140, 18149), True, 'import numpy as np\n'), ((3633, 3698), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['[metric_dict]', "('subject_id', 'label')"], {}), "([metric_dict], ('subject_id', 'label'))\n", (3658, 3698), True, 'import pandas as pd\n'), ((11317, 11348), 'numpy.sum', 'np.sum', (['(border_ref + border_seg)'], {}), '(border_ref + border_seg)\n', (11323, 11348), True, 'import numpy as np\n'), ((13361, 13392), 'numpy.argmax', 'np.argmax', (["data['inferred']", '(-1)'], {}), "(data['inferred'], -1)\n", (13370, 13392), True, 'import numpy as np\n'), ((15809, 15855), 'numpy.unique', 'np.unique', (['blobs_seg[0][blobs_ref[0] == cc_id]'], {}), '(blobs_seg[0][blobs_ref[0] == cc_id])\n', (15818, 15855), True, 'import numpy as np\n'), ((16505, 16545), 'niftynet.utilities.util_common.MorphologyOps', 'MorphologyOps', (['ref[:, :, :, 0, 0]', 'neigh'], {}), '(ref[:, :, :, 0, 0], neigh)\n', (16518, 16545), False, 'from niftynet.utilities.util_common import MorphologyOps, CachedFunction, CachedFunctionByID\n'), ((16576, 16616), 'niftynet.utilities.util_common.MorphologyOps', 'MorphologyOps', (['seg[:, :, :, 0, 0]', 'neigh'], {}), '(seg[:, :, :, 0, 0], neigh)\n', (16589, 16616), False, 'from niftynet.utilities.util_common import MorphologyOps, CachedFunction, CachedFunctionByID\n'), ((18166, 18206), 'niftynet.utilities.util_common.MorphologyOps', 'MorphologyOps', (['ref[:, :, :, 0, 0]', 'neigh'], {}), '(ref[:, :, :, 0, 0], neigh)\n', (18179, 18206), False, 'from niftynet.utilities.util_common import MorphologyOps, CachedFunction, CachedFunctionByID\n'), ((18246, 18286), 'niftynet.utilities.util_common.MorphologyOps', 'MorphologyOps', (['seg[:, :, :, 0, 0]', 'neigh'], {}), '(seg[:, :, :, 0, 0], neigh)\n', (18259, 18286), False, 'from niftynet.utilities.util_common import MorphologyOps, CachedFunction, CachedFunctionByID\n'), ((4278, 4343), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['[metric_dict]', "('subject_id', 'cc_id')"], {}), "([metric_dict], ('subject_id', 'cc_id'))\n", (4303, 4343), True, 'import pandas as pd\n'), ((5989, 6102), 'niftynet.evaluation.base_evaluator.ScalarAggregator', 'ScalarAggregator', (['self.metric_name', "('subject_id', 'label')", "('label',)", 'np.mean', "('mean_' + self.metric_name)"], {}), "(self.metric_name, ('subject_id', 'label'), ('label',), np.\n mean, 'mean_' + self.metric_name)\n", (6005, 6102), False, 'from niftynet.evaluation.base_evaluator import ScalarAggregator\n'), ((6256, 6369), 'niftynet.evaluation.base_evaluator.ScalarAggregator', 'ScalarAggregator', (['self.metric_name', "('subject_id', 'label')", "('label',)", 'np.std', "('stdev_' + self.metric_name)"], {}), "(self.metric_name, ('subject_id', 'label'), ('label',), np.\n std, 'stdev_' + self.metric_name)\n", (6272, 6369), False, 'from niftynet.evaluation.base_evaluator import ScalarAggregator\n'), ((11250, 11273), 'numpy.sum', 'np.sum', (['ref_border_dist'], {}), '(ref_border_dist)\n', (11256, 11273), True, 'import numpy as np\n'), ((11276, 11299), 'numpy.sum', 'np.sum', (['seg_border_dist'], {}), '(seg_border_dist)\n', (11282, 11299), True, 'import numpy as np\n'), ((11550, 11573), 'numpy.max', 'np.max', (['ref_border_dist'], {}), '(ref_border_dist)\n', (11556, 11573), True, 'import numpy as np\n'), ((11575, 11598), 'numpy.max', 'np.max', (['seg_border_dist'], {}), '(seg_border_dist)\n', (11581, 11598), True, 'import numpy as np\n'), ((12046, 12075), 'numpy.percentile', 'np.percentile', (['seg_values', '(95)'], {}), '(seg_values, 95)\n', (12059, 12075), True, 'import numpy as np\n'), ((12100, 12129), 'numpy.percentile', 'np.percentile', (['ref_values', '(95)'], {}), '(ref_values, 95)\n', (12113, 12129), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
#import matplotlib
#matplotlib.use('Agg')
import numpy as np
from numpy import sin,cos,pi
from scipy.integrate import quad
import matplotlib.pyplot as plt
import scipy.constants as C
import healpy as hp
import h5py
import scipy.optimize as optimize
from scipy.integrate import quad
from pylab import cm
import time
from LFSM.fitting_params.how_to_smooth_SSM import smooth
from LFSM.I_E_term.I_E_equation import I_E
from LFSM.interpolate_sky.interpolate_sky_map import produce_index
class free_free(object):
def __init__(self, v, nside, index_type, dist,emi_form,I_E_form,R0_R1_equal,using_raw_diffuse,only_fit_Anu):
self.v = v#20Mhz frequency in hz
self.nside = nside
self.index_type = index_type
self.dist = dist
self.I_E_form = I_E_form
self.emi_form = emi_form
self.R0_R1_equal = R0_R1_equal
self.using_raw_diffuse = using_raw_diffuse
self.only_fit_Anu = only_fit_Anu
self.params_408 = np.array([71.19, 4.23, 0.03, 0.47, 0.77])
def plot_mollview(self,total,filename = '',log=True):
cool_cmap = cm.jet
cool_cmap.set_under("w") # sets background to white
plt.figure(1)
if log==True:
m = np.log10(total)
Min = None
Max = None
hp.mollview(m,title="The frequency in"+' '+str(self.v)+'MHz', min = Min, max = Max,cmap = cool_cmap)
if log==False:
m = np.log10(total)
Min = None
Max = None
hp.mollview(total,title="The frequency in"+' '+str(self.v)+'MHz', min = Min, max = Max,cmap = cool_cmap)
plt.savefig(str(self.v)+'MHz'+ filename +'.eps',format = 'eps')
return
def produce_xyz(self):
#v in Mhz
result = smooth(self.nside,self.v, self.index_type,self.I_E_form,self.using_raw_diffuse).add_5()
return result
def diffuse_raw(self):
diffuse_x = smooth(self.nside, self.v, self.index_type,self.I_E_form,self.using_raw_diffuse).produce_data()
return diffuse_x
def fun_for_curvefit(self, xyz, A_v, R_0, alpha, R_1, beta, Z_0, gamma, form = 'n_HII'):
#produce for input for curve_fit
result = []
for l,b in xyz:
self.l = l * np.pi / 180.0
self.b = b * np.pi / 180.0
def fun_inte(r):
R = np.sqrt(8.5**2 + (r*np.cos(self.b))**2 -2*8.5*(r*np.cos(self.b))*np.cos(self.l))
Z = r * np.sin(self.b)
#integrate along the sight direction
#return h * np.square(a * np.exp(-np.abs(Z)/b - np.square(R/c)) + d * np.exp(-np.abs(Z)/e - np.square(R/f - g)))
#return a * (R/b)**c * np.exp(-d*(R-e/e) - np.abs(Z)/f)
#integrate along sight direction
#return e * np.square(a * np.exp(-R/b) * np.square(2/(np.exp((Z-d)/c)+ np.exp(-(Z-d)/c))))
#ne = f * np.exp(-np.abs(d)/1e3 - (r_1/(2e4*A +1))**2) + g * np.exp(-np.abs(d)/(0.15*1e3) - (r_1/(2e3*B+1) - 2)**2)
#ne = f * np.exp(-np.abs(d)/(1e3*B+1) - (r_1/(2e4*A +1))**2)
#ne = f * np.exp(-np.abs(d)/(1e3*B+1) - (r_1/(2e4*A +1))**4)
#ne = np.square(a * np.exp(-R/b) * np.square(2.0/(np.exp(Z/(1e3*c+1))+ np.exp(-Z/(1e3*c+1)))))
#ne = a * np.exp(-np.abs(d) * 2/(B+0.1) - 2*(r_1/(20*c + 0.1))**2) + D * np.exp(-np.abs(d)*2/(e * 0.15 + 0.01) - 2*(r_1/(2*f+0.1))**2)
#ne = a * np.exp(-np.abs(d) * 2/(B+0.1) - 2*(r_1/(20*c + 0.1))**2) + D
#ne = (R/(R_0+0.1))**alpha * a * np.exp(-np.abs(Z) * 2/(B+0.1) - 2*(r_1/(20*c + 0.1))**2) + D
emissivity = A_v * (R/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
#get rid of square
return emissivity
result.append(quad(fun_inte, 0, self.dist)[0])
return np.array(result)
def fun_for_curvefit_only_R0(self, xyz, A_v, R_0, alpha, Z_0, gamma, form = 'n_HII'):
#produce for input for curve_fit
beta = 1
result = []
for l,b in xyz:
self.l = l * np.pi / 180.0
self.b = b * np.pi / 180.0
def fun_inte(r):
#x = r * np.sin(np.pi/2. - self.b) * np.cos(self.l)
#y = r * np.sin(np.pi/2. - self.b) * np.sin(self.l)
#z = r * np.cos(np.pi/2. - self.b)
#x_1 = x - 8.5
#y_1 = y
#z_1 = z
#r_1 = np.sqrt(np.square(x_1) + np.square(y_1) + np.square(z_1))
#b_1 = np.pi/2.0 - np.arccos(z_1/r_1)
#l_1 = np.arctan(y_1/x_1)
##R = r_1
#R = np.sqrt(r_1**2 - z**2)
#Z = r_1 * np.sin(b_1)
R = np.sqrt(8.5**2 + (r*np.cos(self.b))**2 -2*8.5*(r*np.cos(self.b))*np.cos(self.l))
Z = r * np.sin(self.b)
emissivity = A_v * (R/R_0)**alpha * np.exp(-(R/R_0)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
#get rid of square
return emissivity
result.append(quad(fun_inte, 0, self.dist)[0])
return np.array(result)
def fun_for_curvefit_R0R2(self, xyz, A_v, R_0, alpha, Z_0, gamma, form = 'n_HII',R_2 = 0.1):
#produce for input for curve_fit
beta = 1
R_2 = R_2
result = []
for l,b in xyz:
self.l = l * np.pi / 180.0
self.b = b * np.pi / 180.0
def fun_inte(r):
R = np.sqrt(8.5**2 + (r*np.cos(self.b))**2 -2*8.5*(r*np.cos(self.b))*np.cos(self.l))
Z = r * np.sin(self.b)
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_0)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
#get rid of square
return emissivity
result.append(quad(fun_inte, 0, self.dist)[0])
return np.array(result)
def fun_for_curvefit_only_fit_Anu(self, xyz, A_v):
#produce for input for curve_fit relatively error
##params = np.array([1.66983971e+06, 5.50771193e+00, -4.82064326e-02, 6.42470460e-01, 5.20197943e-01])
#fitting params using absolute error
##params = np.array([1.70973109e+08, 3.13073265e+00, 6.75317432e-01, 8.80354845e-01, 1.10987383e+00])
params = self.params_408
R_0 = params[1]
alpha = params[2]
Z_0 = params[3]
gamma = params[4]
beta = 1
R_2 = 0.1
result = []
for l,b in xyz:
self.l = l * np.pi / 180.0
self.b = b * np.pi / 180.0
def fun_inte(r):
R = np.sqrt(8.5**2 + (r*np.cos(self.b))**2 -2*8.5*(r*np.cos(self.b))*np.cos(self.l))
Z = r * np.sin(self.b)
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_0)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
#get rid of square
return emissivity
result.append(quad(fun_inte, 0, self.dist)[0])
return np.array(result)
def curve_fit_for_Anu(self):
#A_v
guess = [1]
func = self.fun_for_curvefit_only_fit_Anu
xyz = self.produce_xyz()
#print 'xyz.shape',xyz.shape
# bug report using absolute error
result, pcov = optimize.curve_fit(func, xyz[:,:2], xyz[:,2], guess, bounds=(np.array([0]),np.array([1e10])), method='trf')
##params = np.array([1.66983971e+06, 5.50771193e+00, -4.82064326e-02, 6.42470460e-01, 5.20197943e-01])
##params = np.array([1.70973109e+08, 3.13073265e+00, 6.75317432e-01, 8.80354845e-01, 1.10987383e+00])
params = self.params_408
params[0] = result[0]
with h5py.File(str(self.v)+'Mhz_fitted_param.hdf5','w') as f:
f.create_dataset('params',data = params)
f.create_dataset('v',data = self.v)
f.create_dataset('pcov', data = pcov)
#print 'frequency',self.v
#print 'params',params
#print 'pcov',pcov
return params
def model_m3(self,l,b,abcz0,form='two_componant',R_2=0.1):
self.l = l * np.pi / 180.0
self.b = b * np.pi /180.0
A_v, R_0,alpha, Z_0, gamma = abcz0
R_1 = R_0.copy()
R_2 = R_2
beta = 1
def fun_inte(r):
#integrate along the sight direction
R = np.sqrt(8.5**2 + (r*np.cos(self.b))**2 -2*8.5*(r*np.cos(self.b))*np.cos(self.l))
Z = r * np.sin(self.b)
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
#get rid of square
return emissivity
return quad(fun_inte, 0, self.dist)[0]
def curve_fit(self,form='CRs'):
#if self.R0_R1_equal == False:
if False:
#R_0, alpha, a,B,c,D,g
#A_v, R_0, alpha, R_1, beta, Z_0, gamma
guess = [100,8.5,2,4,2,3,1]
func = self.fun_for_curvefit
xyz = self.produce_xyz()
#print 'xyz.shape',xyz.shape
params, pcov = optimize.curve_fit(func, xyz[:,:2], xyz[:,2], guess, bounds=(np.array([0,1e-5,-3.1,1e-5,-3.1,1e-5,-3.1]),np.array([1e10,100,3.1,100,3.1,20,3.1])), method='trf')
# if self.R0_R1_equal == True:
# #A_v, R_0, alpha, Z_0, gamma
# guess = [1e7,5,2,1.6,1]
# func = self.fun_for_curvefit_only_R0
# xyz = self.produce_xyz()
# print 'xyz.shape',xyz.shape
# params, pcov = optimize.curve_fit(func, xyz[:,:2], xyz[:,2], guess,sigma=xyz[:,2], bounds=(np.array([0,1e-5,-3.1,1e-5,-3.1]),np.array([1e10,100,3.1,20,3.1])), method='trf')
#if self.R0_R1_equal == True:
if True:
#A_v, R_0,R_2=0.1, alpha, Z_0, gamma
guess = [80,5,2,1.6,1]
func = self.fun_for_curvefit_R0R2
xyz = self.produce_xyz()
#print 'xyz.shape',xyz.shape
#params, pcov = optimize.curve_fit(func, xyz[:,:2], xyz[:,2], guess,sigma = xyz[:,2], bounds=(np.array([0,1e-5,-3.1,1e-5,-3.1]),np.array([1e10,100,3.1,20,3.1])), method='trf')
#params, pcov = optimize.curve_fit(func, xyz[:,:2], xyz[:,2], guess, bounds=(np.array([0,1e-5,-3.1,1e-5,-3.1]),np.array([1e10,100,3.1,20,3.1])), method='trf')
params, pcov = optimize.curve_fit(func, xyz[:,:2], xyz[:,2], guess, bounds=(np.array([0,1e-5,1e-5,1e-5,1e-5]),np.array([150,5,3.1,2,3.1])), method='trf')
with h5py.File(str(self.v)+'Mhz_fitted_param.hdf5','w') as f:
f.create_dataset('params',data = params)
f.create_dataset('v',data = self.v)
f.create_dataset('pcov', data = pcov)
#print 'frequency',self.v
#print 'params',params
#print 'pcov',pcov
return params
def model_m2(self,l,b,abcz0,form='two_componant',R_2=0.1):
self.l = l * np.pi / 180.0
self.b = b * np.pi /180.0
if self.R0_R1_equal == False:
A_v, R_0, alpha, R_1, beta, Z_0, gamma = abcz0
R_2 = 0
if self.R0_R1_equal == True:
#A_v, R_0, alpha, Z_0, gamma = abcz0
#R_1 = R_0.copy()
#beta = 1
A_v, R_0,alpha, Z_0, gamma = abcz0
R_1 = R_0.copy()
R_2 = R_2
beta = 1
def fun_inte(r):
#integrate along the sight direction
R = np.sqrt(8.5**2 + (r*np.cos(self.b))**2 -2*8.5*(r*np.cos(self.b))*np.cos(self.l))
Z = r * np.sin(self.b)
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
#get rid of square
return emissivity
return quad(fun_inte, 0, self.dist)[0]
def model_m4(self,l,b,params_408,pix_number,R_2 = 0.1):
self.l = l * np.pi / 180.0
self.b = b * np.pi /180.0
A_v, R_0,alpha, Z_0, gamma = params_408
R_1 = R_0.copy()
R_2 = R_2
beta = 1
#pix_number = hp.ang2pix(self.nside, l, b, lonlat = True)
f = produce_index(Nside = self.nside, freq = self.v, index_type = self.index_type,I_E_form = self.I_E_form)
Beta_G = f.pixel_dependence_index_minus_I_E()
def fun_inte(r):
#integrate along the sight direction
R = np.sqrt(8.5**2 + (r*np.cos(self.b))**2 -2*8.5*(r*np.cos(self.b))*np.cos(self.l))
Z = r * np.sin(self.b)
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)*(self.v/408.)**Beta_G[pix_number]
#get rid of square
return emissivity
return quad(fun_inte, 0, self.dist)[0]
def model_m5(self,l,b,params_408,R_2 = 0.1):
self.l = l * np.pi / 180.0
self.b = b * np.pi /180.0
A_v, R_0,alpha, Z_0, gamma = params_408
R_1 = R_0.copy()
R_2 = R_2
beta = 1
#pix_number = hp.ang2pix(self.nside, l, b, lonlat = True)
f = produce_index(Nside = self.nside, freq = self.v, index_type = self.index_type,I_E_form = self.I_E_form)
Beta_G = f.constant_index_minus_I_E()
def fun_inte(r):
#integrate along the sight direction
R = np.sqrt(8.5**2 + (r*np.cos(self.b))**2 -2*8.5*(r*np.cos(self.b))*np.cos(self.l))
Z = r * np.sin(self.b)
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)*(self.v/408.)**Beta_G[0]
#get rid of square
return emissivity
return quad(fun_inte, 0, self.dist)[0]
def I_E(self, v):
f = I_E(v,self.I_E_form)
result = f.I_E()
#print 'I_E result',result
return result
def delta_m(self):
try:
with h5py.File('./'+str(self.v)+'Mhz_fitted_param.hdf5','r') as f:
abcz0 = f['params'][:]
print ('this is using fitted params')
self.produce_xyz()
except:
print ('i am here doing fitting now')
if self.index_type == 'pixel_dependence_index_minus_I_E':
#params_408 = np.array([71.19, 4.23, 0.03, 0.47, 0.77])
abcz0 = self.params_408
elif self.index_type == 'constant_index_minus_I_E':
if int(self.v) == int(408):
try:
with h5py.File('408Mhz_fitted_param.hdf5','r') as f:
abcz0 = f['params'][:]
except:
abcz0 = self.curve_fit()
abcz0 = self.params_408
else:
abcz0 = self.params_408
else:
if self.only_fit_Anu == True:
abcz0 = self.curve_fit_for_Anu()
else:
abcz0 = self.curve_fit()
#print 'params',abcz0
nside = self.nside
m = np.zeros(hp.nside2npix(nside))
I_E = self.I_E(self.v)
for pix_number in range(0,hp.nside2npix(nside)):
l,b = hp.pixelfunc.pix2ang(nside, pix_number, nest = False, lonlat = True)
#the emissivity after absorption and plus the term of extragalaxy
a = time.time()
if self.index_type == 'pixel_dependence_index_minus_I_E':
pix_value = self.model_m4(l,b,abcz0,pix_number) + I_E
elif self.index_type == 'constant_index_minus_I_E':
if int(self.v) == int(408):
pix_value = self.model_m2(l,b,abcz0) + I_E
else:
pix_value = self.model_m5(l,b,abcz0) + I_E
else:
if self.only_fit_Anu == True:
pix_value = self.model_m3(l,b,abcz0) + I_E
else:
pix_value = self.model_m2(l,b,abcz0) + I_E
m[pix_number] = pix_value
b = time.time()
#print 'delt_m delay',b-a
#print 'produce delt_m and params is over'
#self.plot_mollview(m,filename= 'integrated_temperature')
diffuse_raw = self.diffuse_raw()
delt_m = diffuse_raw + I_E - m
#residual between fitted emissivity and raw input not the smoothed raw input
I_E_value = self.I_E(self.v)
delt_m_percentage = delt_m / diffuse_raw * 100
#self.plot_mollview(delt_m,filename = 'delt_m',log=False)
#self.plot_mollview(delt_m_percentage,filename = 'delt_m_percentage',log=False)
with h5py.File('./'+str(self.emi_form)+str(self.v)+'Mhz' + '_delt_m_and_unabsorb_and_delt_m_percentage.hdf5','w') as f:
f.create_dataset('delt_m',data = delt_m)
f.create_dataset('delt_m_percentage',data = delt_m_percentage)
f.create_dataset('integrated_temperature_total_m', data = m)
f.create_dataset('diffuse_raw', data = diffuse_raw)
f.create_dataset('I_E_value', data = I_E_value)
#back to delt_m and params
return delt_m, abcz0
| [
"LFSM.interpolate_sky.interpolate_sky_map.produce_index",
"numpy.abs",
"numpy.log10",
"scipy.integrate.quad",
"h5py.File",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure",
"LFSM.I_E_term.I_E_equation.I_E",
"numpy.cos",
"LFSM.fitting_params.how_to_smooth_SSM.smooth",
"healpy.nside2npix",... | [((1021, 1062), 'numpy.array', 'np.array', (['[71.19, 4.23, 0.03, 0.47, 0.77]'], {}), '([71.19, 4.23, 0.03, 0.47, 0.77])\n', (1029, 1062), True, 'import numpy as np\n'), ((1217, 1230), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1227, 1230), True, 'import matplotlib.pyplot as plt\n'), ((3931, 3947), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (3939, 3947), True, 'import numpy as np\n'), ((5200, 5216), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (5208, 5216), True, 'import numpy as np\n'), ((5943, 5959), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (5951, 5959), True, 'import numpy as np\n'), ((7065, 7081), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (7073, 7081), True, 'import numpy as np\n'), ((12081, 12181), 'LFSM.interpolate_sky.interpolate_sky_map.produce_index', 'produce_index', ([], {'Nside': 'self.nside', 'freq': 'self.v', 'index_type': 'self.index_type', 'I_E_form': 'self.I_E_form'}), '(Nside=self.nside, freq=self.v, index_type=self.index_type,\n I_E_form=self.I_E_form)\n', (12094, 12181), False, 'from LFSM.interpolate_sky.interpolate_sky_map import produce_index\n'), ((13006, 13106), 'LFSM.interpolate_sky.interpolate_sky_map.produce_index', 'produce_index', ([], {'Nside': 'self.nside', 'freq': 'self.v', 'index_type': 'self.index_type', 'I_E_form': 'self.I_E_form'}), '(Nside=self.nside, freq=self.v, index_type=self.index_type,\n I_E_form=self.I_E_form)\n', (13019, 13106), False, 'from LFSM.interpolate_sky.interpolate_sky_map import produce_index\n'), ((13644, 13665), 'LFSM.I_E_term.I_E_equation.I_E', 'I_E', (['v', 'self.I_E_form'], {}), '(v, self.I_E_form)\n', (13647, 13665), False, 'from LFSM.I_E_term.I_E_equation import I_E\n'), ((1269, 1284), 'numpy.log10', 'np.log10', (['total'], {}), '(total)\n', (1277, 1284), True, 'import numpy as np\n'), ((1483, 1498), 'numpy.log10', 'np.log10', (['total'], {}), '(total)\n', (1491, 1498), True, 'import numpy as np\n'), ((8698, 8726), 'scipy.integrate.quad', 'quad', (['fun_inte', '(0)', 'self.dist'], {}), '(fun_inte, 0, self.dist)\n', (8702, 8726), False, 'from scipy.integrate import quad\n'), ((11733, 11761), 'scipy.integrate.quad', 'quad', (['fun_inte', '(0)', 'self.dist'], {}), '(fun_inte, 0, self.dist)\n', (11737, 11761), False, 'from scipy.integrate import quad\n'), ((12670, 12698), 'scipy.integrate.quad', 'quad', (['fun_inte', '(0)', 'self.dist'], {}), '(fun_inte, 0, self.dist)\n', (12674, 12698), False, 'from scipy.integrate import quad\n'), ((13577, 13605), 'scipy.integrate.quad', 'quad', (['fun_inte', '(0)', 'self.dist'], {}), '(fun_inte, 0, self.dist)\n', (13581, 13605), False, 'from scipy.integrate import quad\n'), ((14941, 14961), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (14954, 14961), True, 'import healpy as hp\n'), ((15029, 15049), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (15042, 15049), True, 'import healpy as hp\n'), ((15070, 15134), 'healpy.pixelfunc.pix2ang', 'hp.pixelfunc.pix2ang', (['nside', 'pix_number'], {'nest': '(False)', 'lonlat': '(True)'}), '(nside, pix_number, nest=False, lonlat=True)\n', (15090, 15134), True, 'import healpy as hp\n'), ((15233, 15244), 'time.time', 'time.time', ([], {}), '()\n', (15242, 15244), False, 'import time\n'), ((15956, 15967), 'time.time', 'time.time', ([], {}), '()\n', (15965, 15967), False, 'import time\n'), ((1822, 1909), 'LFSM.fitting_params.how_to_smooth_SSM.smooth', 'smooth', (['self.nside', 'self.v', 'self.index_type', 'self.I_E_form', 'self.using_raw_diffuse'], {}), '(self.nside, self.v, self.index_type, self.I_E_form, self.\n using_raw_diffuse)\n', (1828, 1909), False, 'from LFSM.fitting_params.how_to_smooth_SSM import smooth\n'), ((1980, 2067), 'LFSM.fitting_params.how_to_smooth_SSM.smooth', 'smooth', (['self.nside', 'self.v', 'self.index_type', 'self.I_E_form', 'self.using_raw_diffuse'], {}), '(self.nside, self.v, self.index_type, self.I_E_form, self.\n using_raw_diffuse)\n', (1986, 2067), False, 'from LFSM.fitting_params.how_to_smooth_SSM import smooth\n'), ((8493, 8507), 'numpy.sin', 'np.sin', (['self.b'], {}), '(self.b)\n', (8499, 8507), True, 'import numpy as np\n'), ((11528, 11542), 'numpy.sin', 'np.sin', (['self.b'], {}), '(self.b)\n', (11534, 11542), True, 'import numpy as np\n'), ((12431, 12445), 'numpy.sin', 'np.sin', (['self.b'], {}), '(self.b)\n', (12437, 12445), True, 'import numpy as np\n'), ((13348, 13362), 'numpy.sin', 'np.sin', (['self.b'], {}), '(self.b)\n', (13354, 13362), True, 'import numpy as np\n'), ((2514, 2528), 'numpy.sin', 'np.sin', (['self.b'], {}), '(self.b)\n', (2520, 2528), True, 'import numpy as np\n'), ((3883, 3911), 'scipy.integrate.quad', 'quad', (['fun_inte', '(0)', 'self.dist'], {}), '(fun_inte, 0, self.dist)\n', (3887, 3911), False, 'from scipy.integrate import quad\n'), ((4931, 4945), 'numpy.sin', 'np.sin', (['self.b'], {}), '(self.b)\n', (4937, 4945), True, 'import numpy as np\n'), ((5152, 5180), 'scipy.integrate.quad', 'quad', (['fun_inte', '(0)', 'self.dist'], {}), '(fun_inte, 0, self.dist)\n', (5156, 5180), False, 'from scipy.integrate import quad\n'), ((5668, 5682), 'numpy.sin', 'np.sin', (['self.b'], {}), '(self.b)\n', (5674, 5682), True, 'import numpy as np\n'), ((5895, 5923), 'scipy.integrate.quad', 'quad', (['fun_inte', '(0)', 'self.dist'], {}), '(fun_inte, 0, self.dist)\n', (5899, 5923), False, 'from scipy.integrate import quad\n'), ((6790, 6804), 'numpy.sin', 'np.sin', (['self.b'], {}), '(self.b)\n', (6796, 6804), True, 'import numpy as np\n'), ((7017, 7045), 'scipy.integrate.quad', 'quad', (['fun_inte', '(0)', 'self.dist'], {}), '(fun_inte, 0, self.dist)\n', (7021, 7045), False, 'from scipy.integrate import quad\n'), ((7395, 7408), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (7403, 7408), True, 'import numpy as np\n'), ((7409, 7434), 'numpy.array', 'np.array', (['[10000000000.0]'], {}), '([10000000000.0])\n', (7417, 7434), True, 'import numpy as np\n'), ((8562, 8588), 'numpy.exp', 'np.exp', (['(-(R / R_1) ** beta)'], {}), '(-(R / R_1) ** beta)\n', (8568, 8588), True, 'import numpy as np\n'), ((11597, 11623), 'numpy.exp', 'np.exp', (['(-(R / R_1) ** beta)'], {}), '(-(R / R_1) ** beta)\n', (11603, 11623), True, 'import numpy as np\n'), ((3729, 3755), 'numpy.exp', 'np.exp', (['(-(R / R_1) ** beta)'], {}), '(-(R / R_1) ** beta)\n', (3735, 3755), True, 'import numpy as np\n'), ((4998, 5024), 'numpy.exp', 'np.exp', (['(-(R / R_0) ** beta)'], {}), '(-(R / R_0) ** beta)\n', (5004, 5024), True, 'import numpy as np\n'), ((5741, 5767), 'numpy.exp', 'np.exp', (['(-(R / R_0) ** beta)'], {}), '(-(R / R_0) ** beta)\n', (5747, 5767), True, 'import numpy as np\n'), ((6863, 6889), 'numpy.exp', 'np.exp', (['(-(R / R_0) ** beta)'], {}), '(-(R / R_0) ** beta)\n', (6869, 6889), True, 'import numpy as np\n'), ((8457, 8471), 'numpy.cos', 'np.cos', (['self.l'], {}), '(self.l)\n', (8463, 8471), True, 'import numpy as np\n'), ((9165, 9217), 'numpy.array', 'np.array', (['[0, 1e-05, -3.1, 1e-05, -3.1, 1e-05, -3.1]'], {}), '([0, 1e-05, -3.1, 1e-05, -3.1, 1e-05, -3.1])\n', (9173, 9217), True, 'import numpy as np\n'), ((9209, 9263), 'numpy.array', 'np.array', (['[10000000000.0, 100, 3.1, 100, 3.1, 20, 3.1]'], {}), '([10000000000.0, 100, 3.1, 100, 3.1, 20, 3.1])\n', (9217, 9263), True, 'import numpy as np\n'), ((10413, 10454), 'numpy.array', 'np.array', (['[0, 1e-05, 1e-05, 1e-05, 1e-05]'], {}), '([0, 1e-05, 1e-05, 1e-05, 1e-05])\n', (10421, 10454), True, 'import numpy as np\n'), ((10447, 10478), 'numpy.array', 'np.array', (['[150, 5, 3.1, 2, 3.1]'], {}), '([150, 5, 3.1, 2, 3.1])\n', (10455, 10478), True, 'import numpy as np\n'), ((11492, 11506), 'numpy.cos', 'np.cos', (['self.l'], {}), '(self.l)\n', (11498, 11506), True, 'import numpy as np\n'), ((12395, 12409), 'numpy.cos', 'np.cos', (['self.l'], {}), '(self.l)\n', (12401, 12409), True, 'import numpy as np\n'), ((12500, 12526), 'numpy.exp', 'np.exp', (['(-(R / R_1) ** beta)'], {}), '(-(R / R_1) ** beta)\n', (12506, 12526), True, 'import numpy as np\n'), ((13312, 13326), 'numpy.cos', 'np.cos', (['self.l'], {}), '(self.l)\n', (13318, 13326), True, 'import numpy as np\n'), ((13417, 13443), 'numpy.exp', 'np.exp', (['(-(R / R_1) ** beta)'], {}), '(-(R / R_1) ** beta)\n', (13423, 13443), True, 'import numpy as np\n'), ((2474, 2488), 'numpy.cos', 'np.cos', (['self.l'], {}), '(self.l)\n', (2480, 2488), True, 'import numpy as np\n'), ((4891, 4905), 'numpy.cos', 'np.cos', (['self.l'], {}), '(self.l)\n', (4897, 4905), True, 'import numpy as np\n'), ((5628, 5642), 'numpy.cos', 'np.cos', (['self.l'], {}), '(self.l)\n', (5634, 5642), True, 'import numpy as np\n'), ((6750, 6764), 'numpy.cos', 'np.cos', (['self.l'], {}), '(self.l)\n', (6756, 6764), True, 'import numpy as np\n'), ((8412, 8426), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (8418, 8426), True, 'import numpy as np\n'), ((8441, 8455), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (8447, 8455), True, 'import numpy as np\n'), ((8596, 8605), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (8602, 8605), True, 'import numpy as np\n'), ((11447, 11461), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (11453, 11461), True, 'import numpy as np\n'), ((11476, 11490), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (11482, 11490), True, 'import numpy as np\n'), ((11631, 11640), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (11637, 11640), True, 'import numpy as np\n'), ((12350, 12364), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (12356, 12364), True, 'import numpy as np\n'), ((12379, 12393), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (12385, 12393), True, 'import numpy as np\n'), ((13267, 13281), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (13273, 13281), True, 'import numpy as np\n'), ((13296, 13310), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (13302, 13310), True, 'import numpy as np\n'), ((2429, 2443), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (2435, 2443), True, 'import numpy as np\n'), ((2458, 2472), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (2464, 2472), True, 'import numpy as np\n'), ((3763, 3772), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (3769, 3772), True, 'import numpy as np\n'), ((4846, 4860), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (4852, 4860), True, 'import numpy as np\n'), ((4875, 4889), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (4881, 4889), True, 'import numpy as np\n'), ((5032, 5041), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (5038, 5041), True, 'import numpy as np\n'), ((5583, 5597), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (5589, 5597), True, 'import numpy as np\n'), ((5612, 5626), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (5618, 5626), True, 'import numpy as np\n'), ((5775, 5784), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (5781, 5784), True, 'import numpy as np\n'), ((6705, 6719), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (6711, 6719), True, 'import numpy as np\n'), ((6734, 6748), 'numpy.cos', 'np.cos', (['self.b'], {}), '(self.b)\n', (6740, 6748), True, 'import numpy as np\n'), ((6897, 6906), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (6903, 6906), True, 'import numpy as np\n'), ((12534, 12543), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (12540, 12543), True, 'import numpy as np\n'), ((13451, 13460), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (13457, 13460), True, 'import numpy as np\n'), ((14392, 14434), 'h5py.File', 'h5py.File', (['"""408Mhz_fitted_param.hdf5"""', '"""r"""'], {}), "('408Mhz_fitted_param.hdf5', 'r')\n", (14401, 14434), False, 'import h5py\n')] |
import numpy as np
arr = np.divide(1,2,3)
print(arr) | [
"numpy.divide"
] | [((25, 43), 'numpy.divide', 'np.divide', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (34, 43), True, 'import numpy as np\n')] |
import logging
import numpy as np
from .internal import Shape
DEFAULT_DIRECTIONAL_LIGHTS = (
[ 0.4, -0.4, -0.4 ],
[-0.25, -0.0625, -0.25 ],
[ 0, 0.125, -0.125]
)
logger = logging.getLogger(__name__)
class Scene:
"""A container to hold and display collections of primitives.
`Scene` keeps track of global information about a set of things to
be rendered and handles configuration of optional (possibly
backend-specific) rendering parameters.
Global information managed by a `Scene` includes the `size` of the
viewing window, `translation` and `rotation` applied to the scene
as a whole, and a `zoom` level.
Primitives can be added to a scene through the `primitives`
argument of the constructor or the `add_primitive`
method. Primitives can be retrieved by iterating over the scene::
for prim in scene:
# (do something with prim)
Optional rendering arguments are enabled as *features*, which are
name-value pairs identifying a feature by name and any
configuration of the feature in the value.
"""
def __init__(self, primitives=[], features={}, size=(40, 30),
translation=(0, 0, -50), rotation=(1, 0, 0, 0), zoom=1,
pixel_scale=20, **kwargs):
"""Initialize a `Scene` object.
:param primitives: List of primitives to include in the scene, or a single primitive
:param features: Dictionary mapping names of features to feature configuration options. Options can be single values (which will be converted to `dict(value=given_value)` or dicts.
:param size: Width and height, in scene units, of the viewport (before scaling by `zoom`)
:param translation: (x, y, z) translation to be applied to the scene as a whole after rotating. `x` is to the right, `y` is up, and `z` comes toward you out of the screen.
:param rotation: (r, x, y, z) rotation quaternion to be applied to the scene as a whole.
:param zoom: Zoom scaling factor to be applied to the scene.
:param pixel_scale: Number of pixels per scene unit length.
"""
# map each enabled feature's name to a configuration object
self._enabled_features = dict()
self._primitives = []
self._size = np.ones((2,), dtype=np.float32)
self._translation = np.zeros((3,), dtype=np.float32)
self._rotation = np.array([1, 0, 0, 0], dtype=np.float32)
self.pixel_scale = pixel_scale
self.size = size
self.zoom = zoom
self.translation = translation
self.rotation = rotation
if isinstance(primitives, Shape):
# Convert an individual primitive object to a list of primitives
primitives = [primitives]
for prim in primitives:
self.add_primitive(prim)
if 'directional_light' not in features:
self.enable('directional_light', DEFAULT_DIRECTIONAL_LIGHTS)
for feature in features:
config = features[feature]
if isinstance(config, dict):
if 'name' in config:
raise ValueError('Feature parameters can\'t be named "name"')
self.enable(feature, **config)
else:
self.enable(feature, value=config)
for name in kwargs:
setattr(self, name, kwargs[name])
def __iter__(self):
for prim in self._primitives:
yield prim
@property
def translation(self):
"""(x, y, z) translation to be applied to the scene as a whole after rotating.
`x` is to the right, `y` is up, and `z` comes toward you out
of the screen.
"""
return self._translation
@translation.setter
def translation(self, value):
self._translation[:] = value
for prim in self._primitives:
prim.translation = self._translation
@property
def rotation(self):
"""(r, x, y, z) rotation quaternion to be applied to the scene as a whole."""
return self._rotation
@rotation.setter
def rotation(self, value):
self._rotation[:] = value
for prim in self._primitives:
prim.rotation = self._rotation
if 'link_rotation' in self.enabled_features:
for target in self.get_feature_config('link_rotation')['targets']:
if target is not self and not np.allclose(target.rotation, value):
target.rotation = value
@property
def size(self):
"""Width and height, in scene units, of the viewport."""
return self._size
@size.setter
def size(self, value):
self._size[:] = value
@property
def size_pixels(self):
"""Width and height, in pixels, of the viewport."""
return self.size*self.pixel_scale
@size_pixels.setter
def size_pixels(self, value):
self.size = np.asarray(value, dtype=np.float32)/self.pixel_scale
def add_primitive(self, primitive):
"""Adds a primitive to the scene."""
self._primitives.append(primitive)
primitive.translation = self.translation
primitive.rotation = self.rotation
def remove_primitive(self, primitive, strict=True):
"""Removes a primitive from the scene.
:param primitive: primitive to (attempt to) remove
:param strict: If True, raise an IndexError if the primitive was not in the scene
"""
try:
self._primitives.remove(primitive)
except ValueError:
if strict:
raise
@property
def enabled_features(self):
return set(self._enabled_features)
def get_feature_config(self, name):
"""Return the configuration dictionary for a given feature.
If the feature has not been enabled, return None.
"""
if name in self._enabled_features:
return self._enabled_features[name]
else:
return None
def enable(self, name, auto_value=None, **parameters):
"""Enable an optional rendering feature.
:param name: Name of the feature to enable
:param auto_value: Shortcut for features with single-value configuration. If given as a positional argument, will be given the default configuration name 'value'.
:param parameters: Keyword arguments specifying additional configuration options for the given feature
"""
if auto_value is not None:
parameters['value'] = auto_value
if name == 'link_rotation':
targets = parameters.setdefault('targets', [])
if 'target' in parameters:
targets.append(parameters['target'])
if 'value' in parameters:
targets.append(parameters['value'])
for target in targets:
target.rotation = self._rotation
self._enabled_features[name] = dict(parameters)
def disable(self, name, strict=True):
"""Disable an optional rendering feature.
:param name: Name of the feature to disable
:param strict: if True, raise a KeyError if the feature was not enabled
"""
if not strict and name not in self._enabled_features:
return
if name == 'link_rotation':
targets = self.get_feature_config('link_rotation')['targets']
for target in targets:
target._rotation = target._rotation.copy()
del self._enabled_features[name]
def convert(self, backend, compatibility='warn'):
"""Convert this scene and all of its primitives to another backend.
:param backend: Backend plato.draw.* module to use in the new scene
:param compatibility: Behavior when unsupported primitives are encountered: 'warn', 'ignore', or 'error'
"""
backend_scene = backend.Scene(
features=self._enabled_features, size=self.size, translation=self.translation,
rotation=self.rotation, zoom=self.zoom, pixel_scale=self.pixel_scale)
for prim in self:
name = type(prim).__name__
try:
backend_cls = getattr(backend, name)
except AttributeError as e:
msg = 'Incompatible primitive {} for backend {}'.format(
name, backend)
if compatibility == 'warn':
logger.warning(msg)
elif compatibility == 'ignore':
continue
else:
raise TypeError(msg)
backend_scene.add_primitive(backend_cls.copy(prim))
return backend_scene
| [
"logging.getLogger",
"numpy.allclose",
"numpy.ones",
"numpy.asarray",
"numpy.array",
"numpy.zeros"
] | [((194, 221), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (211, 221), False, 'import logging\n'), ((2289, 2320), 'numpy.ones', 'np.ones', (['(2,)'], {'dtype': 'np.float32'}), '((2,), dtype=np.float32)\n', (2296, 2320), True, 'import numpy as np\n'), ((2349, 2381), 'numpy.zeros', 'np.zeros', (['(3,)'], {'dtype': 'np.float32'}), '((3,), dtype=np.float32)\n', (2357, 2381), True, 'import numpy as np\n'), ((2407, 2447), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {'dtype': 'np.float32'}), '([1, 0, 0, 0], dtype=np.float32)\n', (2415, 2447), True, 'import numpy as np\n'), ((4928, 4963), 'numpy.asarray', 'np.asarray', (['value'], {'dtype': 'np.float32'}), '(value, dtype=np.float32)\n', (4938, 4963), True, 'import numpy as np\n'), ((4423, 4458), 'numpy.allclose', 'np.allclose', (['target.rotation', 'value'], {}), '(target.rotation, value)\n', (4434, 4458), True, 'import numpy as np\n')] |
"""
Created on Fri Aug 9 15:26:45 2019
@author: Bogdan
"""
import copy
import os
import sys
import numpy as np
project_path = os.getcwd()
while os.path.basename(project_path) != 'image-tinkering':
project_path = os.path.dirname(project_path)
sys.path.append(project_path)
from backend import utils
def split_channels(image, extra_inputs, parameters):
""" Splits an image into its channels and returns them.
Arguments:
*image* (NumPy array) -- the image to be split
*extra_inputs* (dictionary) -- a dictionary holding any extra inputs
for the call (empty)
*parameters* (dictionary) -- a dictionary containing following keys:
*spectrum* (str, optional) -- the spectrum in which the channels
will be represented; possible values are *grayscale* and *color*;
default value is *color*
Returns:
list of NumPy array uint8 -- list containing the channels of the image
"""
if utils.is_color(image):
b = image[:, :, 0]
g = image[:, :, 1]
r = image[:, :, 2]
if 'spectrum' in parameters:
spectrum = parameters['spectrum']
else:
spectrum = 'color'
if spectrum == 'color':
zeros = np.zeros((image.shape[:2]), dtype=np.uint8)
b = utils.merge_channels([b, zeros, zeros])
g = utils.merge_channels([zeros, g, zeros])
r = utils.merge_channels([zeros, zeros, r])
return [b, g, r]
return [image]
def remove_channels(image, extra_inputs, parameters):
""" Zeroes out channels from an image.
Arguments:
*image* (NumPy array) -- the image from which to remove channels
*extra_inputs* (dictionary) -- a dictionary holding any extra inputs
for the call (empty)
*parameters* (dictionary) -- a dictionary containing following keys:
*channel(s)* (str) -- the channel(s) to be removed from the image;
possible values are *red*, *green*, *blue*, *red & green*, *red &
blue*, *green & blue*
Returns:
list of NumPy array uint8 -- list containing the image having the
requested channels removed
"""
channels_information = parameters['channel(s)']
image_copy = copy.deepcopy(image)
if utils.is_color(image):
if '&' in channels_information:
# Zero out the two specified channels
if 'red' in channels_information:
image_copy[:, :, 2] = 0
if 'green' in channels_information:
image_copy[:, :, 1] = 0
if 'blue' in channels_information:
image_copy[:, :, 0] = 0
else:
# Zero out the specified channel
if channels_information == 'red':
image_copy[:, :, 2] = 0
elif channels_information == 'green':
image_copy[:, :, 1] = 0
else:
image_copy[:, :, 0] = 0
return [image_copy]
| [
"backend.utils.merge_channels",
"os.getcwd",
"os.path.dirname",
"numpy.zeros",
"os.path.basename",
"copy.deepcopy",
"sys.path.append",
"backend.utils.is_color"
] | [((128, 139), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (137, 139), False, 'import os\n'), ((248, 277), 'sys.path.append', 'sys.path.append', (['project_path'], {}), '(project_path)\n', (263, 277), False, 'import sys\n'), ((146, 176), 'os.path.basename', 'os.path.basename', (['project_path'], {}), '(project_path)\n', (162, 176), False, 'import os\n'), ((218, 247), 'os.path.dirname', 'os.path.dirname', (['project_path'], {}), '(project_path)\n', (233, 247), False, 'import os\n'), ((976, 997), 'backend.utils.is_color', 'utils.is_color', (['image'], {}), '(image)\n', (990, 997), False, 'from backend import utils\n'), ((2283, 2303), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (2296, 2303), False, 'import copy\n'), ((2312, 2333), 'backend.utils.is_color', 'utils.is_color', (['image'], {}), '(image)\n', (2326, 2333), False, 'from backend import utils\n'), ((1262, 1303), 'numpy.zeros', 'np.zeros', (['image.shape[:2]'], {'dtype': 'np.uint8'}), '(image.shape[:2], dtype=np.uint8)\n', (1270, 1303), True, 'import numpy as np\n'), ((1322, 1361), 'backend.utils.merge_channels', 'utils.merge_channels', (['[b, zeros, zeros]'], {}), '([b, zeros, zeros])\n', (1342, 1361), False, 'from backend import utils\n'), ((1378, 1417), 'backend.utils.merge_channels', 'utils.merge_channels', (['[zeros, g, zeros]'], {}), '([zeros, g, zeros])\n', (1398, 1417), False, 'from backend import utils\n'), ((1434, 1473), 'backend.utils.merge_channels', 'utils.merge_channels', (['[zeros, zeros, r]'], {}), '([zeros, zeros, r])\n', (1454, 1473), False, 'from backend import utils\n')] |
import math
import select
import struct
from socket import socket, gethostbyname, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR
from collections import deque
import numpy as np
from operator import xor
import cv2
import imutils
import time
import scipy.io as sio
from scipy import linalg
import math
from math import sin, cos
import matplotlib.pyplot as plt
import sympy
import scipy.io
from matplotlib import style
from IPython.display import display, Latex
from scipy.io import loadmat
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sympy.init_printing(use_latex=True)
import threading
import queue
from numpy.linalg import multi_dot
current_time = lambda: time.time()
# Rotation matrix about x-axis
def rotx(theta):
R_x = np.array([[1, 0, 0 ],
[0, math.cos(theta), -math.sin(theta) ],
[0, math.sin(theta), math.cos(theta) ]
])
return R_x
# Rotation matrix about y-axis
def roty(theta):
R_y = np.array([[math.cos(theta), 0, math.sin(theta) ],
[0, 1, 0 ],
[-math.sin(theta), 0, math.cos(theta) ]
])
return R_y
# Rotation matrix about z-axis
def rotz(theta) :
R_z = np.array([[math.cos(theta), -math.sin(theta), 0],
[math.sin(theta), math.cos(theta), 0],
[0, 0, 1]
])
return R_z
# Homogeneous transformation matrix
def TransMat(R , t) :
T = np.array([[R[0][0], R[0][1], R[0][2], t[0]],
[R[1][0], R[1][1], R[1][2], t[1]],
[R[2][0], R[2][1], R[2][2], t[2]],
[0.0, 0.0, 0.0, 1.0 ]])
return T
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
# discrete xkp1=fk(xk,uk,w,L)
def fk(x,u,w,L):
fk_out=np.array([x[2],
x[3],
(2*x[2]*x[3]*sin(x[1]) - w*sin(x[0]) + (u[1]*cos(x[0]))/L)/cos(x[1]),
- cos(x[1])*sin(x[1])*np.square(x[2]) - (u[0]*cos(x[1]) + u[1]*sin(x[0])*sin(x[1]))/L - w*cos(x[0])*sin(x[1]),
0,
0])
return fk_out
# Transition matrix
def Fk(x,u,w,L,dt):
Fk_out = np.array([[ 1, 0, dt, 0,0,0],
[ 0, 1, 0, dt,0,0],
[ -(dt*(w*cos(x[0]) + (u[1]*sin(x[0]))/L))/cos(x[1]), 2*dt*x[2]*x[3] + (dt*sin(x[1])*(2*x[2]*x[3]*sin(x[1]) - w*sin(x[0]) + (u[1]*cos(x[0]))/L))/np.square(cos(x[1])) , (2*dt*x[3]*sin(x[1]))/cos(x[1]) + 1, (2*dt*x[2]*sin(x[1]))/cos(x[1]),0,0],
[ dt*(w*sin(x[0])*sin(x[1]) - (u[1]*cos(x[0])*sin(x[1]))/L), -dt*(np.square(x[2])*np.square(cos(x[1])) - np.square(x[2])*np.square(sin(x[1])) - (u[0]*sin(x[1]) - u[1]*cos(x[1])*sin(x[0]))/L + w*cos(x[0])*cos(x[1])), -2*dt*x[2]*cos(x[1])*sin(x[1]), 1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1]])
return Fk_out
# Extended Kalman Filter
def EKF(Lvec,uk,hat_Pkm1,hat_thetakm1,theta,r,dt):
D = 10 # number of times to do repeated Euler's method
g = 9.81 # gravity
L = r # lenght of pendulum
u = uk # acceleration of the crane tip
x = hat_thetakm1 # estimated pendulum oscillation angles and rates, and bias of pendulum oscillation angles
R = np.array([[ 0.00377597,-0.00210312],[-0.00210312,0.00125147]]) # Covariance matrix for measurement noise
Q = np.diag([0.00003,0.00003,0.0005,0.0005,0.0001,0.0001]) # Covariance matrix for process noise
H = np.array([[1,0,0,0,1,0],[0,1,0,0,0,1]]) # Observation matrix
Fi = Fk(x,u,g/r,L,dt)
zkp1 = np.array([math.atan2(-Lvec[1],Lvec[2]),math.atan2(Lvec[0],math.sqrt(np.square(Lvec[1])+np.square(Lvec[2])))]) # Measurement of payload oscillation angles
# Repeated Euler's method
for i in range(D-1):
x=fk(x,u,g/r,L)*dt/D+x
barP_kp1=multi_dot([Fi,hat_Pkm1,Fi.T])+Q
K_kp1=multi_dot([barP_kp1,H.T,np.linalg.inv(R+multi_dot([H,barP_kp1,H.T]))])
hat_thetak=x+np.dot(K_kp1,zkp1-np.dot(H,x))
hat_Pk=np.dot((np.diag([1,1,1,1,1,1])-np.dot(K_kp1,H)),barP_kp1)
return hat_thetak, hat_Pk, zkp1
# Direct linear triangulation
def DLT(c0,c1,c2):
uL, sL, vL = linalg.svd(np.array([np.dot(c0[0],P0[2,:])-P0[0,:],np.dot(c0[1],P0[2,:])-P0[1,:],np.dot(c1[0],P1[2,:])-P1[0,:],np.dot(c1[1],P1[2,:])-P1[1,:],np.dot(c2[0],P2[2,:])-P2[0,:],np.dot(c2[1],P2[2,:])-P2[1,:]]))
vL=vL.T.conj()
X=vL[:,3]
if np.absolute(vL[3][3]) > 0.000000000000000001:
X = np.divide(vL[:,3],vL[3,3])
else:
pass
return X
# Find direction vector of the line through the spheres, given in camera coordinates
def FindLine(center01,center02,center11,center12,center21,center22):
X1 = DLT(center01,center11,center21)
X2 = DLT(center02,center12,center22)
Lc0=X2[0:3]-X1[0:3]
return Lc0
# Class of parameters that are transfered between MATLAB and Python ov UDP
class ParamSendRecieve(object):
def __init__(self):
self.Lhat=0.5 # length of pendulum
self.q1=0.37 # slew joint
self.ddx=0.0 # acceleration of crane tip in x-direction
self.ddy=0.0 # acceleration of crane tip in y-direction
self.senddata=np.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # acceleration of crane tip in y-direction
# Class for storing data
class StoreData(object):
def __init__(self):
self.hat_phix = list()
self.hat_phiy = list()
self.hat_dphix = list()
self.hat_dphiy = list()
self._dphix = list()
self._dphiy = list()
self.xList = list()
self.yList=list()
self.yList2=list()
self.List_phix = 0
self.List_phiy = 0
self.tid = list()
self.delta_tid = list()
self.bias_x = list()
self.bias_y = list()
self.delta_tid = list()
def update(self,z,Phi,start,tk):
self.tid.append(current_time()-start)
self.delta_tid.append(current_time()-tk)
self.yList.append(math.degrees(z[0]))
self.yList2.append(math.degrees(z[1]))
self.hat_phix.append(math.degrees(Phi[0]))
self.hat_phiy.append(math.degrees(Phi[1]))
self.hat_dphix.append(math.degrees(Phi[2]))
self.hat_dphiy.append(math.degrees(Phi[3]))
self.bias_x.append(math.degrees(Phi[4]))
self.bias_y.append(math.degrees(Phi[5]))
# Downscale resolution of the cameras
def cam1_set720p():
vs1.set(3, 1280)
vs1.set(4, 720)
def cam2_set720p():
vs2.set(3, 1280)
vs2.set(4, 720)
def cam3_set720p():
vs3.set(3, 1280)
vs3.set(4, 720)
# Find pixel coordinates that corresponds to the spheres
def FindCenter(frame1,c1,c2):
# HSV colourspace parameters
v1_min = 43
v2_min = 54
v3_min = 86
v1_max = 73
v2_max = 250
v3_max = 255
blur_param = 5
#Gaussian blurred image
blur1 = cv2.GaussianBlur(frame1, (3+(2*blur_param-2), 3+(2*blur_param-2)), 0)
#Convert from RGB to HSV
hsv1 = cv2.cvtColor(blur1, cv2.COLOR_BGR2HSV)
#Binary image based on colours
mask1 = cv2.inRange(hsv1, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))
#Find objects in the image
cnts1 = cv2.findContours(mask1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#Cancel if none objects
cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]
if len(cnts1) > 1:
# Sort objects
cnts1 = sorted(cnts1, key=cv2.contourArea, reverse=True)[:5]
#The two objects corresponding to the spheres
c01 = cnts1[0]
c02 = cnts1[1]
#Calculate the moments of the spheres
M01 = cv2.moments(c01)
M02 = cv2.moments(c02)
#Calculate the centorids of the spheres
if M01["m00"] < 0.000001:
center01 = c1
else:
center01 = (float(M01["m10"] / M01["m00"]), float(M01["m01"] / M01["m00"]))
if M02["m00"] < 0.000001:
center02 = c2
else:
center02 = (float(M02["m10"] / M02["m00"]), float(M02["m01"] / M02["m00"]))
else:
center01 = c1
center02 = c2
if center01[1] > center02[1]:
tmp = center01
center01 = center02
center02 = tmp
else:
pass
return center01, center02 # returns the two pixels that corresponds to the two spheres
Obj=ParamSendRecieve()
# object for sending messages to MATLAB
def fnToMATLAB(msgToMatlab, *args):
cs = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
cs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ti =current_time()
while stop < 1:
cs.sendto(Obj.senddata, ('127.0.0.1',5001))
if 0.08-current_time()+ti< 0:
ti = current_time()
else:
time.sleep(0.08-current_time()+ti)
ti = current_time()
cs.close()
# object for reading messages from MATLAB
def fnFromMATLAB(msgfromMatlab,*args):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('',5002))
while stop < 1:
data, addr = s.recvfrom(32)
Obj.Lhat=np.array([struct.unpack('d', data[0:8])[0]])
Obj.q1=np.array([struct.unpack('d', data[8:16])[0]])
Obj.ddx=np.array([struct.unpack('d', data[16:24])[0]])
Obj.ddy=np.array([struct.unpack('d', data[24:32])[0]])
time.sleep(0.002)
s.close()
# Declare messages for sending and recieving over UDP
msgfromMatlab = queue.LifoQueue()
msgfromMatlab.put(0.0)
msgToMatlab = queue.LifoQueue()
msgToMatlab.put(np.array([0.0,0.0,0.0,0.0,0.0,0.0]))
stop =0.0
# Constructors for calling the objects for sending and recieving messages over UDP
threadsend=threading.Thread(target=fnToMATLAB, args=(msgToMatlab, 1))
threadread=threading.Thread(target=fnFromMATLAB, args=(msgfromMatlab,1))
threadsend.start()
threadread.start()
# Declare camera objects
vs1 = cv2.VideoCapture(0)
vs2 = cv2.VideoCapture(2)
vs3 = cv2.VideoCapture(1)
cam1_set720p()
cam2_set720p()
cam3_set720p()
# Camera calibration matrices
K0=np.array([[ 937,0 , 637.21],
[0 ,937, 381.54],
[0 , 0, 1.0]])
K1=np.array([[ 941,0 , 637.21],
[0 ,941, 349.9],
[0 , 0, 1.0]])
K2=np.array([[ 942,0 , 623.66],
[0 ,942, 345.69],
[0 , 0, 1.0]])
# Translation vectors between cameras
t_21=-np.array([ 233.8, 0, 0])
t_31 = -np.array([ 467, 0,0])
PII=np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]])
# Camera matrices
P0=np.dot(np.dot(K0,PII),TransMat(np.eye(3),np.array([0,0,0])))
P1=np.dot(np.dot(K1,PII),TransMat(np.eye(3),t_21))
P2=np.dot(np.dot(K2,PII),TransMat(np.eye(3),t_31))
cv2.namedWindow("Camera 1",cv2.WINDOW_NORMAL)
cv2.resizeWindow("Camera 1", 768, 432)
#init pixels and states for EKF
center01 = (0, 0)
center02 = (0, 0)
center11 = (0, 0)
center12 = (0, 0)
center21 = (0, 0)
center22 = (0, 0)
hat_Pkm1 = np.diag([0,0,0,0,0,0])
hat_thetakm1 = np.array([0,0,0,0,2.5*math.pi/180,-1.7*math.pi/180])
#Declare store data object
StoredData=StoreData()
# sleep for 2 seconds (start-up cameras)
time.sleep(2.0)
start = current_time()
tk = start
while 1:
# Read images from the cameras
frame1 = vs1.read()
frame2 = vs2.read()
frame3 = vs3.read()
frame1 = frame1[1]
frame2 = frame2[1]
frame3 = frame3[1]
if frame1 is None:
break
if frame2 is None:
break
if frame3 is None:
break
# Find pixels that corresponds to the spheres
center01, center02 = FindCenter(frame1,center01,center02)
center11, center12 = FindCenter(frame2,center11,center12)
center21, center22 = FindCenter(frame3,center21,center22)
# find direction vector of a line through the spheres, given in camera coordinates
Lc0 = FindLine(center01,center02,center11,center12,center21,center22)
# find direction vector of a line through the spheres, given in inertial coordinates
Lvec=np.dot(np.array([[ -cos(Obj.q1), 0, sin(Obj.q1)],[sin(Obj.q1), 0, cos(Obj.q1)],[0, 1, 0]]),Lc0)
if linalg.norm(Lvec) > 0.00001:
Lvec = Lvec/linalg.norm(Lvec)
# Extended Kalman Filter
hat_thetak, hat_Pk, zk = EKF(Lvec,np.array([Obj.ddx,Obj.ddy]),hat_Pkm1,hat_thetakm1,Obj.q1, Obj.Lhat,current_time()-tk)
# Collect estimated payload oscillation angles and rates, bias of payload oscillation angle, and measurement of payload oscillation angle
Obj.senddata = np.array([hat_thetak[0],hat_thetak[1],hat_thetak[2],hat_thetak[3],hat_thetak[4],hat_thetak[5],zk[0],zk[1]])
msgToMatlab.put(hat_thetak)
Obj.senddata
hat_thetakm1 = hat_thetak
hat_Pkm1 = hat_Pk
StoredData.update(zk,hat_thetak,start,tk)
tk = current_time()
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
break
stop=1
vs1.release()
vs2.release()
vs3.release()
cv2.destroyAllWindows()
| [
"numpy.linalg.multi_dot",
"imutils.is_cv2",
"time.sleep",
"math.cos",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.divide",
"cv2.resizeWindow",
"queue.LifoQueue",
"numpy.dot",
"cv2.waitKey",
"numpy.eye",
"math.degrees",
"sympy.init_printing",
"numpy.square",
"struct.unpack",
"math.... | [((583, 618), 'sympy.init_printing', 'sympy.init_printing', ([], {'use_latex': '(True)'}), '(use_latex=True)\n', (602, 618), False, 'import sympy\n'), ((10891, 10908), 'queue.LifoQueue', 'queue.LifoQueue', ([], {}), '()\n', (10906, 10908), False, 'import queue\n'), ((10950, 10967), 'queue.LifoQueue', 'queue.LifoQueue', ([], {}), '()\n', (10965, 10967), False, 'import queue\n'), ((11131, 11189), 'threading.Thread', 'threading.Thread', ([], {'target': 'fnToMATLAB', 'args': '(msgToMatlab, 1)'}), '(target=fnToMATLAB, args=(msgToMatlab, 1))\n', (11147, 11189), False, 'import threading\n'), ((11202, 11264), 'threading.Thread', 'threading.Thread', ([], {'target': 'fnFromMATLAB', 'args': '(msgfromMatlab, 1)'}), '(target=fnFromMATLAB, args=(msgfromMatlab, 1))\n', (11218, 11264), False, 'import threading\n'), ((11347, 11366), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (11363, 11366), False, 'import cv2\n'), ((11374, 11393), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(2)'], {}), '(2)\n', (11390, 11393), False, 'import cv2\n'), ((11401, 11420), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (11417, 11420), False, 'import cv2\n'), ((11518, 11577), 'numpy.array', 'np.array', (['[[937, 0, 637.21], [0, 937, 381.54], [0, 0, 1.0]]'], {}), '([[937, 0, 637.21], [0, 937, 381.54], [0, 0, 1.0]])\n', (11526, 11577), True, 'import numpy as np\n'), ((11617, 11675), 'numpy.array', 'np.array', (['[[941, 0, 637.21], [0, 941, 349.9], [0, 0, 1.0]]'], {}), '([[941, 0, 637.21], [0, 941, 349.9], [0, 0, 1.0]])\n', (11625, 11675), True, 'import numpy as np\n'), ((11720, 11779), 'numpy.array', 'np.array', (['[[942, 0, 623.66], [0, 942, 345.69], [0, 0, 1.0]]'], {}), '([[942, 0, 623.66], [0, 942, 345.69], [0, 0, 1.0]])\n', (11728, 11779), True, 'import numpy as np\n'), ((11933, 11985), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])\n', (11941, 11985), True, 'import numpy as np\n'), ((12168, 12214), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Camera 1"""', 'cv2.WINDOW_NORMAL'], {}), "('Camera 1', cv2.WINDOW_NORMAL)\n", (12183, 12214), False, 'import cv2\n'), ((12215, 12253), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""Camera 1"""', '(768)', '(432)'], {}), "('Camera 1', 768, 432)\n", (12231, 12253), False, 'import cv2\n'), ((12417, 12444), 'numpy.diag', 'np.diag', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (12424, 12444), True, 'import numpy as np\n'), ((12456, 12521), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 2.5 * math.pi / 180, -1.7 * math.pi / 180]'], {}), '([0, 0, 0, 0, 2.5 * math.pi / 180, -1.7 * math.pi / 180])\n', (12464, 12521), True, 'import numpy as np\n'), ((12608, 12623), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (12618, 12623), False, 'import time\n'), ((14463, 14486), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (14484, 14486), False, 'import cv2\n'), ((712, 723), 'time.time', 'time.time', ([], {}), '()\n', (721, 723), False, 'import time\n'), ((1785, 1927), 'numpy.array', 'np.array', (['[[R[0][0], R[0][1], R[0][2], t[0]], [R[1][0], R[1][1], R[1][2], t[1]], [R[2\n ][0], R[2][1], R[2][2], t[2]], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[R[0][0], R[0][1], R[0][2], t[0]], [R[1][0], R[1][1], R[1][2], t[\n 1]], [R[2][0], R[2][1], R[2][2], t[2]], [0.0, 0.0, 0.0, 1.0]])\n', (1793, 1927), True, 'import numpy as np\n'), ((4542, 4606), 'numpy.array', 'np.array', (['[[0.00377597, -0.00210312], [-0.00210312, 0.00125147]]'], {}), '([[0.00377597, -0.00210312], [-0.00210312, 0.00125147]])\n', (4550, 4606), True, 'import numpy as np\n'), ((4656, 4711), 'numpy.diag', 'np.diag', (['[3e-05, 3e-05, 0.0005, 0.0005, 0.0001, 0.0001]'], {}), '([3e-05, 3e-05, 0.0005, 0.0005, 0.0001, 0.0001])\n', (4663, 4711), True, 'import numpy as np\n'), ((4758, 4808), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1]])\n', (4766, 4808), True, 'import numpy as np\n'), ((8268, 8354), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frame1', '(3 + (2 * blur_param - 2), 3 + (2 * blur_param - 2))', '(0)'], {}), '(frame1, (3 + (2 * blur_param - 2), 3 + (2 * blur_param - 2\n )), 0)\n', (8284, 8354), False, 'import cv2\n'), ((8382, 8420), 'cv2.cvtColor', 'cv2.cvtColor', (['blur1', 'cv2.COLOR_BGR2HSV'], {}), '(blur1, cv2.COLOR_BGR2HSV)\n', (8394, 8420), False, 'import cv2\n'), ((8470, 8539), 'cv2.inRange', 'cv2.inRange', (['hsv1', '(v1_min, v2_min, v3_min)', '(v1_max, v2_max, v3_max)'], {}), '(hsv1, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))\n', (8481, 8539), False, 'import cv2\n'), ((9901, 9949), 'socket.socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (9914, 9949), False, 'from socket import socket, gethostbyname, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR\n'), ((10389, 10437), 'socket.socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (10402, 10437), False, 'from socket import socket, gethostbyname, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR\n'), ((10985, 11025), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (10993, 11025), True, 'import numpy as np\n'), ((11872, 11895), 'numpy.array', 'np.array', (['[233.8, 0, 0]'], {}), '([233.8, 0, 0])\n', (11880, 11895), True, 'import numpy as np\n'), ((11906, 11927), 'numpy.array', 'np.array', (['[467, 0, 0]'], {}), '([467, 0, 0])\n', (11914, 11927), True, 'import numpy as np\n'), ((12007, 12022), 'numpy.dot', 'np.dot', (['K0', 'PII'], {}), '(K0, PII)\n', (12013, 12022), True, 'import numpy as np\n'), ((12072, 12087), 'numpy.dot', 'np.dot', (['K1', 'PII'], {}), '(K1, PII)\n', (12078, 12087), True, 'import numpy as np\n'), ((12124, 12139), 'numpy.dot', 'np.dot', (['K2', 'PII'], {}), '(K2, PII)\n', (12130, 12139), True, 'import numpy as np\n'), ((14010, 14128), 'numpy.array', 'np.array', (['[hat_thetak[0], hat_thetak[1], hat_thetak[2], hat_thetak[3], hat_thetak[4],\n hat_thetak[5], zk[0], zk[1]]'], {}), '([hat_thetak[0], hat_thetak[1], hat_thetak[2], hat_thetak[3],\n hat_thetak[4], hat_thetak[5], zk[0], zk[1]])\n', (14018, 14128), True, 'import numpy as np\n'), ((5121, 5152), 'numpy.linalg.multi_dot', 'multi_dot', (['[Fi, hat_Pkm1, Fi.T]'], {}), '([Fi, hat_Pkm1, Fi.T])\n', (5130, 5152), False, 'from numpy.linalg import multi_dot\n'), ((5727, 5748), 'numpy.absolute', 'np.absolute', (['vL[3][3]'], {}), '(vL[3][3])\n', (5738, 5748), True, 'import numpy as np\n'), ((5786, 5815), 'numpy.divide', 'np.divide', (['vL[:, 3]', 'vL[3, 3]'], {}), '(vL[:, 3], vL[3, 3])\n', (5795, 5815), True, 'import numpy as np\n'), ((6505, 6555), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (6513, 6555), True, 'import numpy as np\n'), ((8714, 8730), 'imutils.is_cv2', 'imutils.is_cv2', ([], {}), '()\n', (8728, 8730), False, 'import imutils\n'), ((9029, 9045), 'cv2.moments', 'cv2.moments', (['c01'], {}), '(c01)\n', (9040, 9045), False, 'import cv2\n'), ((9061, 9077), 'cv2.moments', 'cv2.moments', (['c02'], {}), '(c02)\n', (9072, 9077), False, 'import cv2\n'), ((10782, 10799), 'time.sleep', 'time.sleep', (['(0.002)'], {}), '(0.002)\n', (10792, 10799), False, 'import time\n'), ((12031, 12040), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (12037, 12040), True, 'import numpy as np\n'), ((12041, 12060), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (12049, 12060), True, 'import numpy as np\n'), ((12096, 12105), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (12102, 12105), True, 'import numpy as np\n'), ((12148, 12157), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (12154, 12157), True, 'import numpy as np\n'), ((13622, 13639), 'scipy.linalg.norm', 'linalg.norm', (['Lvec'], {}), '(Lvec)\n', (13633, 13639), False, 'from scipy import linalg\n'), ((13761, 13789), 'numpy.array', 'np.array', (['[Obj.ddx, Obj.ddy]'], {}), '([Obj.ddx, Obj.ddy])\n', (13769, 13789), True, 'import numpy as np\n'), ((14313, 14327), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (14324, 14327), False, 'import cv2\n'), ((4874, 4903), 'math.atan2', 'math.atan2', (['(-Lvec[1])', 'Lvec[2]'], {}), '(-Lvec[1], Lvec[2])\n', (4884, 4903), False, 'import math\n'), ((5304, 5331), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (5311, 5331), True, 'import numpy as np\n'), ((5327, 5343), 'numpy.dot', 'np.dot', (['K_kp1', 'H'], {}), '(K_kp1, H)\n', (5333, 5343), True, 'import numpy as np\n'), ((7338, 7356), 'math.degrees', 'math.degrees', (['z[0]'], {}), '(z[0])\n', (7350, 7356), False, 'import math\n'), ((7386, 7404), 'math.degrees', 'math.degrees', (['z[1]'], {}), '(z[1])\n', (7398, 7404), False, 'import math\n'), ((7436, 7456), 'math.degrees', 'math.degrees', (['Phi[0]'], {}), '(Phi[0])\n', (7448, 7456), False, 'import math\n'), ((7488, 7508), 'math.degrees', 'math.degrees', (['Phi[1]'], {}), '(Phi[1])\n', (7500, 7508), False, 'import math\n'), ((7541, 7561), 'math.degrees', 'math.degrees', (['Phi[2]'], {}), '(Phi[2])\n', (7553, 7561), False, 'import math\n'), ((7594, 7614), 'math.degrees', 'math.degrees', (['Phi[3]'], {}), '(Phi[3])\n', (7606, 7614), False, 'import math\n'), ((7644, 7664), 'math.degrees', 'math.degrees', (['Phi[4]'], {}), '(Phi[4])\n', (7656, 7664), False, 'import math\n'), ((7694, 7714), 'math.degrees', 'math.degrees', (['Phi[5]'], {}), '(Phi[5])\n', (7706, 7714), False, 'import math\n'), ((13672, 13689), 'scipy.linalg.norm', 'linalg.norm', (['Lvec'], {}), '(Lvec)\n', (13683, 13689), False, 'from scipy import linalg\n'), ((893, 908), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (901, 908), False, 'import math\n'), ((963, 978), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (971, 978), False, 'import math\n'), ((980, 995), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (988, 995), False, 'import math\n'), ((1131, 1146), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (1139, 1146), False, 'import math\n'), ((1159, 1174), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (1167, 1174), False, 'import math\n'), ((1305, 1320), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (1313, 1320), False, 'import math\n'), ((1467, 1482), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (1475, 1482), False, 'import math\n'), ((1534, 1549), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (1542, 1549), False, 'import math\n'), ((1554, 1569), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (1562, 1569), False, 'import math\n'), ((2554, 2563), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (2557, 2563), False, 'from math import sin, cos\n'), ((5271, 5283), 'numpy.dot', 'np.dot', (['H', 'x'], {}), '(H, x)\n', (5277, 5283), True, 'import numpy as np\n'), ((911, 926), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (919, 926), False, 'import math\n'), ((1278, 1293), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (1286, 1293), False, 'import math\n'), ((1488, 1503), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (1496, 1503), False, 'import math\n'), ((2687, 2696), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (2690, 2696), False, 'from math import sin, cos\n'), ((3518, 3527), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (3521, 3527), False, 'from math import sin, cos\n'), ((3748, 3757), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (3751, 3757), False, 'from math import sin, cos\n'), ((4035, 4044), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (4038, 4044), False, 'from math import sin, cos\n'), ((5204, 5233), 'numpy.linalg.multi_dot', 'multi_dot', (['[H, barP_kp1, H.T]'], {}), '([H, barP_kp1, H.T])\n', (5213, 5233), False, 'from numpy.linalg import multi_dot\n'), ((5501, 5524), 'numpy.dot', 'np.dot', (['c0[0]', 'P0[2, :]'], {}), '(c0[0], P0[2, :])\n', (5507, 5524), True, 'import numpy as np\n'), ((5531, 5554), 'numpy.dot', 'np.dot', (['c0[1]', 'P0[2, :]'], {}), '(c0[1], P0[2, :])\n', (5537, 5554), True, 'import numpy as np\n'), ((5561, 5584), 'numpy.dot', 'np.dot', (['c1[0]', 'P1[2, :]'], {}), '(c1[0], P1[2, :])\n', (5567, 5584), True, 'import numpy as np\n'), ((5591, 5614), 'numpy.dot', 'np.dot', (['c1[1]', 'P1[2, :]'], {}), '(c1[1], P1[2, :])\n', (5597, 5614), True, 'import numpy as np\n'), ((5621, 5644), 'numpy.dot', 'np.dot', (['c2[0]', 'P2[2, :]'], {}), '(c2[0], P2[2, :])\n', (5627, 5644), True, 'import numpy as np\n'), ((5651, 5674), 'numpy.dot', 'np.dot', (['c2[1]', 'P2[2, :]'], {}), '(c2[1], P2[2, :])\n', (5657, 5674), True, 'import numpy as np\n'), ((10548, 10577), 'struct.unpack', 'struct.unpack', (['"""d"""', 'data[0:8]'], {}), "('d', data[0:8])\n", (10561, 10577), False, 'import struct\n'), ((10609, 10639), 'struct.unpack', 'struct.unpack', (['"""d"""', 'data[8:16]'], {}), "('d', data[8:16])\n", (10622, 10639), False, 'import struct\n'), ((10672, 10703), 'struct.unpack', 'struct.unpack', (['"""d"""', 'data[16:24]'], {}), "('d', data[16:24])\n", (10685, 10703), False, 'import struct\n'), ((10736, 10767), 'struct.unpack', 'struct.unpack', (['"""d"""', 'data[24:32]'], {}), "('d', data[24:32])\n", (10749, 10767), False, 'import struct\n'), ((13542, 13553), 'math.sin', 'sin', (['Obj.q1'], {}), '(Obj.q1)\n', (13545, 13553), False, 'from math import sin, cos\n'), ((13556, 13567), 'math.sin', 'sin', (['Obj.q1'], {}), '(Obj.q1)\n', (13559, 13567), False, 'from math import sin, cos\n'), ((13572, 13583), 'math.cos', 'cos', (['Obj.q1'], {}), '(Obj.q1)\n', (13575, 13583), False, 'from math import sin, cos\n'), ((2609, 2624), 'numpy.square', 'np.square', (['x[2]'], {}), '(x[2])\n', (2618, 2624), True, 'import numpy as np\n'), ((2677, 2686), 'math.cos', 'cos', (['x[0]'], {}), '(x[0])\n', (2680, 2686), False, 'from math import sin, cos\n'), ((3711, 3720), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (3714, 3720), False, 'from math import sin, cos\n'), ((3737, 3746), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (3740, 3746), False, 'from math import sin, cos\n'), ((4025, 4034), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (4028, 4034), False, 'from math import sin, cos\n'), ((4932, 4950), 'numpy.square', 'np.square', (['Lvec[1]'], {}), '(Lvec[1])\n', (4941, 4950), True, 'import numpy as np\n'), ((4951, 4969), 'numpy.square', 'np.square', (['Lvec[2]'], {}), '(Lvec[2])\n', (4960, 4969), True, 'import numpy as np\n'), ((13526, 13537), 'math.cos', 'cos', (['Obj.q1'], {}), '(Obj.q1)\n', (13529, 13537), False, 'from math import sin, cos\n'), ((2508, 2517), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (2511, 2517), False, 'from math import sin, cos\n'), ((2522, 2531), 'math.sin', 'sin', (['x[0]'], {}), '(x[0])\n', (2525, 2531), False, 'from math import sin, cos\n'), ((2540, 2549), 'math.cos', 'cos', (['x[0]'], {}), '(x[0])\n', (2543, 2549), False, 'from math import sin, cos\n'), ((2599, 2608), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (2602, 2608), False, 'from math import sin, cos\n'), ((3643, 3652), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (3646, 3652), False, 'from math import sin, cos\n'), ((3700, 3709), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (3703, 3709), False, 'from math import sin, cos\n'), ((3806, 3815), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (3809, 3815), False, 'from math import sin, cos\n'), ((3992, 4001), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (3995, 4001), False, 'from math import sin, cos\n'), ((2589, 2598), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (2592, 2598), False, 'from math import sin, cos\n'), ((2633, 2642), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (2636, 2642), False, 'from math import sin, cos\n'), ((2660, 2669), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (2663, 2669), False, 'from math import sin, cos\n'), ((3563, 3572), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (3566, 3572), False, 'from math import sin, cos\n'), ((3796, 3805), 'math.sin', 'sin', (['x[0]'], {}), '(x[0])\n', (3799, 3805), False, 'from math import sin, cos\n'), ((3834, 3843), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (3837, 3843), False, 'from math import sin, cos\n'), ((3982, 3991), 'math.cos', 'cos', (['x[0]'], {}), '(x[0])\n', (3985, 3991), False, 'from math import sin, cos\n'), ((2650, 2659), 'math.sin', 'sin', (['x[0]'], {}), '(x[0])\n', (2653, 2659), False, 'from math import sin, cos\n'), ((3485, 3494), 'math.cos', 'cos', (['x[0]'], {}), '(x[0])\n', (3488, 3494), False, 'from math import sin, cos\n'), ((3824, 3833), 'math.cos', 'cos', (['x[0]'], {}), '(x[0])\n', (3827, 3833), False, 'from math import sin, cos\n'), ((3854, 3869), 'numpy.square', 'np.square', (['x[2]'], {}), '(x[2])\n', (3863, 3869), True, 'import numpy as np\n'), ((3893, 3908), 'numpy.square', 'np.square', (['x[2]'], {}), '(x[2])\n', (3902, 3908), True, 'import numpy as np\n'), ((3503, 3512), 'math.sin', 'sin', (['x[0]'], {}), '(x[0])\n', (3506, 3512), False, 'from math import sin, cos\n'), ((3586, 3595), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (3589, 3595), False, 'from math import sin, cos\n'), ((3600, 3609), 'math.sin', 'sin', (['x[0]'], {}), '(x[0])\n', (3603, 3609), False, 'from math import sin, cos\n'), ((3618, 3627), 'math.cos', 'cos', (['x[0]'], {}), '(x[0])\n', (3621, 3627), False, 'from math import sin, cos\n'), ((3880, 3889), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (3883, 3889), False, 'from math import sin, cos\n'), ((3919, 3928), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (3922, 3928), False, 'from math import sin, cos\n'), ((3938, 3947), 'math.sin', 'sin', (['x[1]'], {}), '(x[1])\n', (3941, 3947), False, 'from math import sin, cos\n'), ((3965, 3974), 'math.sin', 'sin', (['x[0]'], {}), '(x[0])\n', (3968, 3974), False, 'from math import sin, cos\n'), ((3955, 3964), 'math.cos', 'cos', (['x[1]'], {}), '(x[1])\n', (3958, 3964), False, 'from math import sin, cos\n')] |
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import warnings
import sys
import matplotlib.pyplot as plt
from tqdm import tqdm
asteroid = sys.argv[1]
d2r = np.pi / 180.
r2d = 180. / np.pi
def _p1(x):
return np.exp(-90.56*np.square(np.tan(x/2)))*(1 - (0.986*np.sin(x))/(0.119 + 1.341*np.sin(x) - 0.754*np.square(np.sin(x)))) + (1-np.exp(-90.56*np.square(np.tan(x/2))))*np.exp(-3.332*np.power(np.tan(1/2 * x), 0.631))
def _p2(x):
return np.exp(-90.56*np.square(np.tan(x/2)))*(1 - (0.238*np.sin(x))/(0.119 + 1.341*np.sin(x) - 0.754*np.square(np.sin(x)))) + (1-np.exp(-90.56*np.square(np.tan(x/2))))*np.exp(-1.862*np.power(np.tan(1/2 * x), 1.218))
def HG(alpha, H, G):
return H - 2.5 * np.log10((1-G) * _p1(alpha) + G * _p2(alpha))
C_type = pd.read_csv(f'{asteroid}-C_type_models.txt', delimiter='\t', names=['ModelNo', 'PhaseAngle', 'ReducedMag'])
S_type = pd.read_csv(f'{asteroid}-S_type_models.txt', delimiter='\t', names=['ModelNo', 'PhaseAngle', 'ReducedMag'])
model_numbers_C = C_type.ModelNo.unique()
model_numbers_S = S_type.ModelNo.unique()
uncertainties = pd.read_csv(f'{asteroid}-geom.txt', delimiter='\t', header=None)[6].values
C_H_arr = np.zeros(shape=len(model_numbers_C))
C_G_arr = np.zeros(shape=len(model_numbers_C))
x_plot_range = np.linspace(0, C_type.PhaseAngle.max(), 100)
failed_models_C = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in tqdm(model_numbers_C):
failed_to_fit = False
model_data = C_type[C_type.ModelNo == i]
try:
(H, G), _ = curve_fit(HG, model_data.PhaseAngle.values, model_data.ReducedMag.values, sigma=uncertainties, absolute_sigma=True, bounds=([0, -1], [30, 1]))
except RuntimeError:
print(f"Model {i} could not fit. Skipping.")
failed_to_fit = True
failed_models_C.append(i)
if not failed_to_fit:
C_H_arr[i], C_G_arr[i] = H, G
C_H_std = np.std(C_H_arr)
C_G_std = np.std(C_G_arr)
S_H_arr = np.zeros(shape=len(model_numbers_S))
S_G_arr = np.zeros(shape=len(model_numbers_S))
x_plot_range = np.linspace(0, C_type.PhaseAngle.max(), 100)
failed_models_S = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in tqdm(model_numbers_S):
failed_to_fit = False
model_data = S_type[S_type.ModelNo == i]
try:
(H, G), _ = curve_fit(HG, model_data.PhaseAngle.values, model_data.ReducedMag.values, sigma=uncertainties, absolute_sigma=True, bounds=([0, -1], [30, 1]))
except RuntimeError:
print(f"Model {i} could not fit. Skipping.")
failed_to_fit = True
failed_models_S.append(i)
if not failed_to_fit:
S_H_arr[i], S_G_arr[i] = H, G
S_H_std = np.std(S_H_arr)
S_G_std = np.std(S_G_arr)
if failed_models_S or failed_models_C:
failed_models_all = list(set(failed_models_S + failed_models_C))
np.delete(C_H_arr, failed_models_all)
np.delete(C_G_arr, failed_models_all)
np.delete(S_H_arr, failed_models_all)
np.delete(S_G_arr, failed_models_all)
H_uncs = [C_H_std, S_H_std]
G_uncs = [C_G_std, S_G_std]
weights = [0.476, 0.524]
H_aspect_uncertainty = np.average(H_uncs, weights=weights)
G_aspect_uncertainty = np.average(G_uncs, weights=weights)
print(f'H Aspect Uncertainty: {H_aspect_uncertainty:.2f} mag')
print(f'G Aspect Uncertainty: {G_aspect_uncertainty:.2f}') | [
"scipy.optimize.curve_fit",
"numpy.tan",
"pandas.read_csv",
"numpy.average",
"numpy.delete",
"numpy.sin",
"tqdm.tqdm",
"warnings.catch_warnings",
"numpy.std",
"warnings.simplefilter"
] | [((785, 897), 'pandas.read_csv', 'pd.read_csv', (['f"""{asteroid}-C_type_models.txt"""'], {'delimiter': '"""\t"""', 'names': "['ModelNo', 'PhaseAngle', 'ReducedMag']"}), "(f'{asteroid}-C_type_models.txt', delimiter='\\t', names=[\n 'ModelNo', 'PhaseAngle', 'ReducedMag'])\n", (796, 897), True, 'import pandas as pd\n'), ((902, 1014), 'pandas.read_csv', 'pd.read_csv', (['f"""{asteroid}-S_type_models.txt"""'], {'delimiter': '"""\t"""', 'names': "['ModelNo', 'PhaseAngle', 'ReducedMag']"}), "(f'{asteroid}-S_type_models.txt', delimiter='\\t', names=[\n 'ModelNo', 'PhaseAngle', 'ReducedMag'])\n", (913, 1014), True, 'import pandas as pd\n'), ((1977, 1992), 'numpy.std', 'np.std', (['C_H_arr'], {}), '(C_H_arr)\n', (1983, 1992), True, 'import numpy as np\n'), ((2003, 2018), 'numpy.std', 'np.std', (['C_G_arr'], {}), '(C_G_arr)\n', (2009, 2018), True, 'import numpy as np\n'), ((2825, 2840), 'numpy.std', 'np.std', (['S_H_arr'], {}), '(S_H_arr)\n', (2831, 2840), True, 'import numpy as np\n'), ((2851, 2866), 'numpy.std', 'np.std', (['S_G_arr'], {}), '(S_G_arr)\n', (2857, 2866), True, 'import numpy as np\n'), ((3258, 3293), 'numpy.average', 'np.average', (['H_uncs'], {'weights': 'weights'}), '(H_uncs, weights=weights)\n', (3268, 3293), True, 'import numpy as np\n'), ((3317, 3352), 'numpy.average', 'np.average', (['G_uncs'], {'weights': 'weights'}), '(G_uncs, weights=weights)\n', (3327, 3352), True, 'import numpy as np\n'), ((1368, 1393), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1391, 1393), False, 'import warnings\n'), ((1399, 1430), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1420, 1430), False, 'import warnings\n'), ((1444, 1465), 'tqdm.tqdm', 'tqdm', (['model_numbers_C'], {}), '(model_numbers_C)\n', (1448, 1465), False, 'from tqdm import tqdm\n'), ((2200, 2225), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2223, 2225), False, 'import warnings\n'), ((2231, 2262), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2252, 2262), False, 'import warnings\n'), ((2276, 2297), 'tqdm.tqdm', 'tqdm', (['model_numbers_S'], {}), '(model_numbers_S)\n', (2280, 2297), False, 'from tqdm import tqdm\n'), ((2985, 3022), 'numpy.delete', 'np.delete', (['C_H_arr', 'failed_models_all'], {}), '(C_H_arr, failed_models_all)\n', (2994, 3022), True, 'import numpy as np\n'), ((3027, 3064), 'numpy.delete', 'np.delete', (['C_G_arr', 'failed_models_all'], {}), '(C_G_arr, failed_models_all)\n', (3036, 3064), True, 'import numpy as np\n'), ((3069, 3106), 'numpy.delete', 'np.delete', (['S_H_arr', 'failed_models_all'], {}), '(S_H_arr, failed_models_all)\n', (3078, 3106), True, 'import numpy as np\n'), ((3111, 3148), 'numpy.delete', 'np.delete', (['S_G_arr', 'failed_models_all'], {}), '(S_G_arr, failed_models_all)\n', (3120, 3148), True, 'import numpy as np\n'), ((1112, 1176), 'pandas.read_csv', 'pd.read_csv', (['f"""{asteroid}-geom.txt"""'], {'delimiter': '"""\t"""', 'header': 'None'}), "(f'{asteroid}-geom.txt', delimiter='\\t', header=None)\n", (1123, 1176), True, 'import pandas as pd\n'), ((1585, 1731), 'scipy.optimize.curve_fit', 'curve_fit', (['HG', 'model_data.PhaseAngle.values', 'model_data.ReducedMag.values'], {'sigma': 'uncertainties', 'absolute_sigma': '(True)', 'bounds': '([0, -1], [30, 1])'}), '(HG, model_data.PhaseAngle.values, model_data.ReducedMag.values,\n sigma=uncertainties, absolute_sigma=True, bounds=([0, -1], [30, 1]))\n', (1594, 1731), False, 'from scipy.optimize import curve_fit\n'), ((2433, 2579), 'scipy.optimize.curve_fit', 'curve_fit', (['HG', 'model_data.PhaseAngle.values', 'model_data.ReducedMag.values'], {'sigma': 'uncertainties', 'absolute_sigma': '(True)', 'bounds': '([0, -1], [30, 1])'}), '(HG, model_data.PhaseAngle.values, model_data.ReducedMag.values,\n sigma=uncertainties, absolute_sigma=True, bounds=([0, -1], [30, 1]))\n', (2442, 2579), False, 'from scipy.optimize import curve_fit\n'), ((268, 281), 'numpy.tan', 'np.tan', (['(x / 2)'], {}), '(x / 2)\n', (274, 281), True, 'import numpy as np\n'), ((294, 303), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (300, 303), True, 'import numpy as np\n'), ((428, 445), 'numpy.tan', 'np.tan', (['(1 / 2 * x)'], {}), '(1 / 2 * x)\n', (434, 445), True, 'import numpy as np\n'), ((501, 514), 'numpy.tan', 'np.tan', (['(x / 2)'], {}), '(x / 2)\n', (507, 514), True, 'import numpy as np\n'), ((527, 536), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (533, 536), True, 'import numpy as np\n'), ((661, 678), 'numpy.tan', 'np.tan', (['(1 / 2 * x)'], {}), '(1 / 2 * x)\n', (667, 678), True, 'import numpy as np\n'), ((390, 403), 'numpy.tan', 'np.tan', (['(x / 2)'], {}), '(x / 2)\n', (396, 403), True, 'import numpy as np\n'), ((623, 636), 'numpy.tan', 'np.tan', (['(x / 2)'], {}), '(x / 2)\n', (629, 636), True, 'import numpy as np\n'), ((320, 329), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (326, 329), True, 'import numpy as np\n'), ((348, 357), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (354, 357), True, 'import numpy as np\n'), ((553, 562), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (559, 562), True, 'import numpy as np\n'), ((581, 590), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (587, 590), True, 'import numpy as np\n')] |
if __name__ == '__main__':
import os
from glob import glob
from shutil import copy
import numpy as np
image_root = './Images/PSPT'
dir_dst = './Images/PSPT/DailyBest'
os.makedirs(dir_dst, exist_ok=True)
list_raw_paths = list()
list_grad_paths = list()
list_HMI = sorted(glob(os.path.join('./Images/HMI_100', '*.png')))
list_dir = sorted(os.listdir(image_root))
for dir in list_dir:
list_raw_paths.extend(sorted((glob(os.path.join(image_root, dir, 'Raw', '*.png')))))
list_grad_paths.extend(sorted((glob(os.path.join(image_root, dir, 'Gradient/2', '*.png')))))
list_dates = list()
for i in range(len(list_raw_paths)):
name = os.path.split(os.path.splitext(list_raw_paths[i])[0])[-1]
list_dates.append(name[:8])
tuple_dates = sorted(frozenset(list_dates))
for date in tuple_dates:
list_raw_same_date = list()
list_grads_same_date = list()
switch = False
for i, raw in enumerate(list_raw_paths):
if raw.find(date) != -1:
list_raw_same_date.append(list_raw_paths[i])
list_grads_same_date.append(np.fromfile(list_grad_paths[i]).sum())
switch = True
else:
if not switch:
continue
else:
break
np_grads_same_date = np.asarray(list_grads_same_date)
index = np_grads_same_date.argmax()
print(os.path.splitext(os.path.split(list_raw_same_date[index])[-1])[0][:15])
copy(list_raw_same_date[index], dir_dst)
| [
"numpy.fromfile",
"os.listdir",
"os.makedirs",
"os.path.join",
"numpy.asarray",
"os.path.splitext",
"os.path.split",
"shutil.copy"
] | [((196, 231), 'os.makedirs', 'os.makedirs', (['dir_dst'], {'exist_ok': '(True)'}), '(dir_dst, exist_ok=True)\n', (207, 231), False, 'import os\n'), ((382, 404), 'os.listdir', 'os.listdir', (['image_root'], {}), '(image_root)\n', (392, 404), False, 'import os\n'), ((1392, 1424), 'numpy.asarray', 'np.asarray', (['list_grads_same_date'], {}), '(list_grads_same_date)\n', (1402, 1424), True, 'import numpy as np\n'), ((1564, 1604), 'shutil.copy', 'copy', (['list_raw_same_date[index]', 'dir_dst'], {}), '(list_raw_same_date[index], dir_dst)\n', (1568, 1604), False, 'from shutil import copy\n'), ((316, 357), 'os.path.join', 'os.path.join', (['"""./Images/HMI_100"""', '"""*.png"""'], {}), "('./Images/HMI_100', '*.png')\n", (328, 357), False, 'import os\n'), ((475, 520), 'os.path.join', 'os.path.join', (['image_root', 'dir', '"""Raw"""', '"""*.png"""'], {}), "(image_root, dir, 'Raw', '*.png')\n", (487, 520), False, 'import os\n'), ((569, 621), 'os.path.join', 'os.path.join', (['image_root', 'dir', '"""Gradient/2"""', '"""*.png"""'], {}), "(image_root, dir, 'Gradient/2', '*.png')\n", (581, 621), False, 'import os\n'), ((721, 756), 'os.path.splitext', 'os.path.splitext', (['list_raw_paths[i]'], {}), '(list_raw_paths[i])\n', (737, 756), False, 'import os\n'), ((1167, 1198), 'numpy.fromfile', 'np.fromfile', (['list_grad_paths[i]'], {}), '(list_grad_paths[i])\n', (1178, 1198), True, 'import numpy as np\n'), ((1500, 1540), 'os.path.split', 'os.path.split', (['list_raw_same_date[index]'], {}), '(list_raw_same_date[index])\n', (1513, 1540), False, 'import os\n')] |
from matplotlib import pyplot as plt
import numpy as np
from abmarl.sim.components.state import ContinuousPositionState, SpeedAngleState
from abmarl.sim.components.actor import SpeedAngleMovementActor
from abmarl.sim.components.observer import SpeedObserver, AngleObserver
from abmarl.sim.components.done import TooCloseDone
from abmarl.sim.components.agent import SpeedAngleAgent, SpeedAngleActingAgent, \
SpeedAngleObservingAgent
from abmarl.sim import AgentBasedSimulation
from abmarl.tools.matplotlib_utils import mscatter
class BirdAgent(SpeedAngleAgent, SpeedAngleActingAgent, SpeedAngleObservingAgent): pass
class Flight(AgentBasedSimulation):
def __init__(self, **kwargs):
self.agents = kwargs['agents']
# State
self.position_state = ContinuousPositionState(**kwargs)
self.speed_angle_state = SpeedAngleState(**kwargs)
# Actor
self.move_actor = SpeedAngleMovementActor(
position_state=self.position_state, speed_angle_state=self.speed_angle_state, **kwargs
)
# Observer
self.speed_observer = SpeedObserver(**kwargs)
self.angle_observer = AngleObserver(**kwargs)
# Done
self.done = TooCloseDone(position=self.position_state, **kwargs)
self.finalize()
def reset(self, **kwargs):
self.position_state.reset(**kwargs)
self.speed_angle_state.reset(**kwargs)
def step(self, action_dict, **kwargs):
for agent, action in action_dict.items():
self.move_actor.process_move(
self.agents[agent], action.get('accelerate', np.zeros(1)),
action.get('bank', np.zeros(1)), **kwargs
)
def render(self, fig=None, **kwargs):
fig.clear()
# Draw the resources
ax = fig.gca()
# Draw the agents
ax.set(xlim=(0, self.position_state.region), ylim=(0, self.position_state.region))
ax.set_xticks(np.arange(0, self.position_state.region, 1))
ax.set_yticks(np.arange(0, self.position_state.region, 1))
agents_x = [agent.position[0] for agent in self.agents.values()]
agents_y = [agent.position[1] for agent in self.agents.values()]
mscatter(agents_x, agents_y, ax=ax, m='o', s=100, edgecolor='black', facecolor='gray')
plt.plot()
plt.pause(1e-6)
def get_obs(self, agent_id, **kwargs):
agent = self.agents[agent_id]
return {
**self.speed_observer.get_obs(agent, **kwargs),
**self.angle_observer.get_obs(agent, **kwargs),
}
def get_reward(self, agent_id, **kwargs):
pass
def get_done(self, agent_id, **kwargs):
return self.done.get_done(self.agents[agent_id], **kwargs)
def get_all_done(self, **kwargs):
return self.done.get_all_done(**kwargs)
def get_info(self, agent_id, **kwargs):
pass
if __name__ == "__main__":
agents = {
f'bird{i}': BirdAgent(
id=f'bird{i}', min_speed=0.5, max_speed=1.0, max_acceleration=0.1,
max_banking_angle=90, max_banking_angle_change=90,
initial_banking_angle=30
) for i in range(24)
}
sim = Flight(
region=20,
agents=agents,
collision_distance=1.0,
)
fig = plt.figure()
sim.reset()
sim.render(fig=fig)
print(sim.get_obs('bird0'))
for i in range(50):
sim.step({agent.id: agent.action_space.sample() for agent in agents.values()})
sim.render(fig=fig)
for agent in agents:
print(agent, ': ', sim.get_done(agent))
print('\n')
print(sim.get_all_done())
| [
"abmarl.sim.components.observer.AngleObserver",
"matplotlib.pyplot.plot",
"abmarl.sim.components.state.SpeedAngleState",
"abmarl.sim.components.actor.SpeedAngleMovementActor",
"abmarl.sim.components.state.ContinuousPositionState",
"matplotlib.pyplot.figure",
"abmarl.sim.components.observer.SpeedObserver... | [((3296, 3308), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3306, 3308), True, 'from matplotlib import pyplot as plt\n'), ((780, 813), 'abmarl.sim.components.state.ContinuousPositionState', 'ContinuousPositionState', ([], {}), '(**kwargs)\n', (803, 813), False, 'from abmarl.sim.components.state import ContinuousPositionState, SpeedAngleState\n'), ((847, 872), 'abmarl.sim.components.state.SpeedAngleState', 'SpeedAngleState', ([], {}), '(**kwargs)\n', (862, 872), False, 'from abmarl.sim.components.state import ContinuousPositionState, SpeedAngleState\n'), ((916, 1031), 'abmarl.sim.components.actor.SpeedAngleMovementActor', 'SpeedAngleMovementActor', ([], {'position_state': 'self.position_state', 'speed_angle_state': 'self.speed_angle_state'}), '(position_state=self.position_state,\n speed_angle_state=self.speed_angle_state, **kwargs)\n', (939, 1031), False, 'from abmarl.sim.components.actor import SpeedAngleMovementActor\n'), ((1100, 1123), 'abmarl.sim.components.observer.SpeedObserver', 'SpeedObserver', ([], {}), '(**kwargs)\n', (1113, 1123), False, 'from abmarl.sim.components.observer import SpeedObserver, AngleObserver\n'), ((1154, 1177), 'abmarl.sim.components.observer.AngleObserver', 'AngleObserver', ([], {}), '(**kwargs)\n', (1167, 1177), False, 'from abmarl.sim.components.observer import SpeedObserver, AngleObserver\n'), ((1214, 1266), 'abmarl.sim.components.done.TooCloseDone', 'TooCloseDone', ([], {'position': 'self.position_state'}), '(position=self.position_state, **kwargs)\n', (1226, 1266), False, 'from abmarl.sim.components.done import TooCloseDone\n'), ((2221, 2311), 'abmarl.tools.matplotlib_utils.mscatter', 'mscatter', (['agents_x', 'agents_y'], {'ax': 'ax', 'm': '"""o"""', 's': '(100)', 'edgecolor': '"""black"""', 'facecolor': '"""gray"""'}), "(agents_x, agents_y, ax=ax, m='o', s=100, edgecolor='black',\n facecolor='gray')\n", (2229, 2311), False, 'from abmarl.tools.matplotlib_utils import mscatter\n'), ((2317, 2327), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (2325, 2327), True, 'from matplotlib import pyplot as plt\n'), ((2336, 2352), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-06)'], {}), '(1e-06)\n', (2345, 2352), True, 'from matplotlib import pyplot as plt\n'), ((1954, 1997), 'numpy.arange', 'np.arange', (['(0)', 'self.position_state.region', '(1)'], {}), '(0, self.position_state.region, 1)\n', (1963, 1997), True, 'import numpy as np\n'), ((2021, 2064), 'numpy.arange', 'np.arange', (['(0)', 'self.position_state.region', '(1)'], {}), '(0, self.position_state.region, 1)\n', (2030, 2064), True, 'import numpy as np\n'), ((1612, 1623), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1620, 1623), True, 'import numpy as np\n'), ((1661, 1672), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1669, 1672), True, 'import numpy as np\n')] |
# Oriented FAST and Rotated BRIEF
import cv2
import numpy as np
from matplotlib import pyplot as plt
def run(image_path):
img = cv2.imread(image_path, 0)
orb = cv2.ORB_create()
kp = orb.detect(img, None)
kp, desc = orb.compute(img, kp)
# img2 = cv2.drawKeypoints(img, kp, img, color=(0, 255, 0), flags=0)
# convert key points to a normal list
points = []
for p in kp:
points.append([p.pt[0], p.pt[1]])
points = np.array(points, dtype='float32')
hull = cv2.convexHull(points)
# x, y, w, h = cv2.boundingRect(hull)
transformed_hull = np.array(hull).reshape((-1, 1, 2)).astype(np.int32) # need to change for output
out = cv2.drawKeypoints(img, kp, img, color=(0, 0, 255), flags=0)
out = cv2.drawContours(img, [transformed_hull], 0, (0, 255, 0), 2)
# rotate to provide proper output
rows, cols = out.shape
m = cv2.getRotationMatrix2D((cols / 2, rows / 2), 180, 1)
out = cv2.warpAffine(out, m, (cols, rows))
# cv2.drawContours(img, hull, 0, (0, 255, 0), 2)
# plt.plot(hull)
plt.imshow(out), plt.show()
| [
"matplotlib.pyplot.imshow",
"cv2.convexHull",
"cv2.warpAffine",
"cv2.drawContours",
"cv2.drawKeypoints",
"numpy.array",
"cv2.ORB_create",
"cv2.getRotationMatrix2D",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((135, 160), 'cv2.imread', 'cv2.imread', (['image_path', '(0)'], {}), '(image_path, 0)\n', (145, 160), False, 'import cv2\n'), ((171, 187), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (185, 187), False, 'import cv2\n'), ((463, 496), 'numpy.array', 'np.array', (['points'], {'dtype': '"""float32"""'}), "(points, dtype='float32')\n", (471, 496), True, 'import numpy as np\n'), ((509, 531), 'cv2.convexHull', 'cv2.convexHull', (['points'], {}), '(points)\n', (523, 531), False, 'import cv2\n'), ((689, 748), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img', 'kp', 'img'], {'color': '(0, 0, 255)', 'flags': '(0)'}), '(img, kp, img, color=(0, 0, 255), flags=0)\n', (706, 748), False, 'import cv2\n'), ((759, 819), 'cv2.drawContours', 'cv2.drawContours', (['img', '[transformed_hull]', '(0)', '(0, 255, 0)', '(2)'], {}), '(img, [transformed_hull], 0, (0, 255, 0), 2)\n', (775, 819), False, 'import cv2\n'), ((895, 948), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', '(180)', '(1)'], {}), '((cols / 2, rows / 2), 180, 1)\n', (918, 948), False, 'import cv2\n'), ((959, 995), 'cv2.warpAffine', 'cv2.warpAffine', (['out', 'm', '(cols, rows)'], {}), '(out, m, (cols, rows))\n', (973, 995), False, 'import cv2\n'), ((1075, 1090), 'matplotlib.pyplot.imshow', 'plt.imshow', (['out'], {}), '(out)\n', (1085, 1090), True, 'from matplotlib import pyplot as plt\n'), ((1092, 1102), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1100, 1102), True, 'from matplotlib import pyplot as plt\n'), ((597, 611), 'numpy.array', 'np.array', (['hull'], {}), '(hull)\n', (605, 611), True, 'import numpy as np\n')] |
"""
Particles data from [1] PDG
Notes
-----
The data copied from [1] is stored in data/particles.csv file
References
----------
[1] <NAME>., & others. (2020). Review of Particle Physics. PTEP, 2020(8), 083C01.
https://doi.org/10.1093/ptep/ptaa104
"""
import importlib.resources
from .. import resources
import numpy as np
import pandas as pd
from scipy.constants import hbar, eV, c, giga, centi
class Particle:
"""
Particle information and utilities
Parameters
----------
name : str
String name of the particle as stated in the README.md file
tau : tuple[float, float]
Life time in second unit
mass : tuple[float, float]
Mass in MeV unit
Attributes
----------
name: str
String name of the particle as stated in the README.md file
mass: tuple[float, float]
mass in GeV unit
tau: tuple[float, float]
Life time in second unit
Notes
-----
* all attributes in units of GeV or seconds
* all sizes of the form tuple[float, float] stands for: (value, uncertainty)
* SEC is the factor to convert time units from [s] to [GeV^-1] i.e. SEC * t [s] = t [GeV^-1]
* METER factor to convert length units from [m] to [GeV^-1] i.e. METER * x [m] = x [GeV^-1]
"""
SEC = (hbar / (giga * eV)) ** -1
METER = ((hbar * c) / (giga * eV)) ** -1
def __init__(self, name: str, mass: tuple[float, float], tau: tuple[float, float]):
self.name = name
# converting units to GeV
self.mass = mass[0] * 10**-3, mass[1] * 10**-3
self.tau = tau
def __repr__(self):
return self.name
def get_momentum_in_range(self, lb: float, rb: float) -> float:
"""
Calculate the most probable momentum for `self` particle to decay within the interval [`lb`, `rb`]
Parameters
----------
lb
left bound for desired decay in cm unit
rb
right bound for desired decay in cm unit
Returns
-------
momentum
momentum in GeV units
"""
if not (0 <= lb < rb):
raise ValueError("Boundaries should sustain the inequality 0 <= lb < rb")
if np.isinf(self.tau[0]):
raise ValueError("This particle will not decay at all")
if lb == 0:
return 0
t = self.tau[0] * Particle.SEC
m = self.mass[0]
left = lb * centi * Particle.METER
right = rb * centi * Particle.METER
return (m * (right - left)) / (t * np.log(right / left))
def get_momentum_probability(self, momentum: float, lb: float, rb: float) -> float:
"""
Calculate the probability for `self` particle to decay within the interval [`lb`, `rb`]
with momentum `momentum`
Parameters
----------
momentum
momentum in GeV units
lb
left boundary for desired decay in cm unit
rb
right boundary for desired decay in cm unit
Returns
-------
probability to decay
"""
if not (0 <= lb < rb):
raise ValueError("Boundaries should sustain the inequality 0 <= lb < rb")
if np.isinf(self.tau[0]):
# particle will not decay in any circumstances
return 0
t = self.tau[0] * Particle.SEC
m = self.mass[0]
left = lb * centi * Particle.METER
right = rb * centi * Particle.METER
if momentum == 0:
return 1
return np.exp(-t ** -1 * (m * left) / momentum) - np.exp(-t ** -1 * (m * right) / momentum)
def create_particles_dic():
"""feel free to add particles to particles.csv file if necessary"""
with importlib.resources.open_text(resources, 'particles.csv') as path:
df = pd.read_csv(path)
dic = dict()
for _, row in df.iterrows():
dic[row['name']] = Particle(row['name'],
(row['mass[MeV]'], row['sd_mass[MeV]']),
(row['tau[s]'], row['sd_tau[s]']))
return dic
| [
"numpy.exp",
"numpy.log",
"numpy.isinf",
"pandas.read_csv"
] | [((2223, 2244), 'numpy.isinf', 'np.isinf', (['self.tau[0]'], {}), '(self.tau[0])\n', (2231, 2244), True, 'import numpy as np\n'), ((3226, 3247), 'numpy.isinf', 'np.isinf', (['self.tau[0]'], {}), '(self.tau[0])\n', (3234, 3247), True, 'import numpy as np\n'), ((3818, 3835), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (3829, 3835), True, 'import pandas as pd\n'), ((3542, 3582), 'numpy.exp', 'np.exp', (['(-t ** -1 * (m * left) / momentum)'], {}), '(-t ** -1 * (m * left) / momentum)\n', (3548, 3582), True, 'import numpy as np\n'), ((3585, 3626), 'numpy.exp', 'np.exp', (['(-t ** -1 * (m * right) / momentum)'], {}), '(-t ** -1 * (m * right) / momentum)\n', (3591, 3626), True, 'import numpy as np\n'), ((2549, 2569), 'numpy.log', 'np.log', (['(right / left)'], {}), '(right / left)\n', (2555, 2569), True, 'import numpy as np\n')] |
import random
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
m = 32
n = 32
num_vertices = m * n
num_edges = 2 * m * n - 1
G = nx.Graph()
for i in range(m):
for j in range(n):
if i < (m - 1):
G.add_edge((i,j), (i+1, j))
if j < (n - 1):
G.add_edge((i,j), (i, j+1))
num_mazes = 1
mazes = []
# Generate maps.
for _ in range(num_mazes):
# Reset maze matrix.
maze = np.zeros((2*m+1, 2*n+1), dtype=np.int)
# Reset visited nodes.
for node in G.nodes():
maze[2*node[0]+1, 2*node[1]+1] = 1
G.nodes[node]['visited'] = False
# Pick a random starting node.
current = random.randint(0, m-1), random.randint(0, n-1)
G.nodes[current]['visited'] = True
history = [current]
# Break the walls!
while True:
# Get all reachable neighbors from current node.
reachable = [node for node in nx.neighbors(G, current) if not G.nodes[node]['visited']]
# If there are reachable neighbors from current node, get a
# random neighbor, set it to be visited and put node in
# history.
if len(reachable) > 0:
prev = current
current = random.choice(reachable)
dm = current[0] - prev[0]
dn = current[1] - prev[1]
maze[2 * prev[0] + dm + 1, 2 * prev[1] + dn + 1] = 1
G.nodes[current]['visited'] = True
history.append(current)
# If there are no reachable neighbors, check if there are nodes
# left to pop from history
elif len(history) > 0:
current = history.pop()
else:
break
start = random.choice([node for node in G.nodes])
end = random.choice([node for node in G.nodes])
mazes.append(maze.flatten())
plt.imshow(maze)
plt.show()
mazes = np.array(mazes)
| [
"matplotlib.pyplot.imshow",
"random.choice",
"networkx.neighbors",
"networkx.Graph",
"numpy.array",
"numpy.zeros",
"random.randint",
"matplotlib.pyplot.show"
] | [((153, 163), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (161, 163), True, 'import networkx as nx\n'), ((1845, 1860), 'numpy.array', 'np.array', (['mazes'], {}), '(mazes)\n', (1853, 1860), True, 'import numpy as np\n'), ((442, 488), 'numpy.zeros', 'np.zeros', (['(2 * m + 1, 2 * n + 1)'], {'dtype': 'np.int'}), '((2 * m + 1, 2 * n + 1), dtype=np.int)\n', (450, 488), True, 'import numpy as np\n'), ((1673, 1714), 'random.choice', 'random.choice', (['[node for node in G.nodes]'], {}), '([node for node in G.nodes])\n', (1686, 1714), False, 'import random\n'), ((1725, 1766), 'random.choice', 'random.choice', (['[node for node in G.nodes]'], {}), '([node for node in G.nodes])\n', (1738, 1766), False, 'import random\n'), ((1804, 1820), 'matplotlib.pyplot.imshow', 'plt.imshow', (['maze'], {}), '(maze)\n', (1814, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1835), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1833, 1835), True, 'import matplotlib.pyplot as plt\n'), ((669, 693), 'random.randint', 'random.randint', (['(0)', '(m - 1)'], {}), '(0, m - 1)\n', (683, 693), False, 'import random\n'), ((693, 717), 'random.randint', 'random.randint', (['(0)', '(n - 1)'], {}), '(0, n - 1)\n', (707, 717), False, 'import random\n'), ((1203, 1227), 'random.choice', 'random.choice', (['reachable'], {}), '(reachable)\n', (1216, 1227), False, 'import random\n'), ((914, 938), 'networkx.neighbors', 'nx.neighbors', (['G', 'current'], {}), '(G, current)\n', (926, 938), True, 'import networkx as nx\n')] |
#!/usr/bin/python3
# coding=utf8
#from datacube.storage import netcdf_writer
#from datacube.model import Variable
import datacube
import numpy as np
from datacube.drivers.netcdf import writer as netcdf_writer
from datacube.utils.geometry import CRS
from datacube.utils import geometry, data_resolution_and_offset
from rasterio.transform import from_bounds
from rasterio.warp import reproject, Resampling
from rasterio.coords import BoundingBox
from subprocess import CalledProcessError, Popen, PIPE, check_output
from affine import Affine
import rasterio
import os, glob
import re
import xarray as xr
import itertools
import rasterio
import time
import logging
logging.basicConfig(
format='%(levelname)s : %(asctime)s : %(message)s',
level=logging.DEBUG
)
# To print loggin information in the console
logging.getLogger().addHandler(logging.StreamHandler())
ALGORITHMS_FOLDER = "/web_storage/algorithms/workflows"
COMPLETE_ALGORITHMS_FOLDER="/web_storage/algorithms"
RESULTS_FOLDER = "/web_storage/results"
LOGS_FOLDER = "/web_storage/logs"
nodata=-9999
#def saveNC(output,filename,history):
# output.to_netcdf(filename,format='NETCDF3_CLASSIC')
def saveNC(output,filename,history):
logging.info('saveNC: dataset {} - {}'.format(
type(output),output
)
)
start = time.time()
nco=netcdf_writer.create_netcdf(filename)
nco.history = (history.encode('ascii','replace'))
coords=output.coords
cnames=()
# This 3 lines were created by Aurelio
# if an error occurs in this function please
# check this 3 lines first.
# we reorder the coordinates system to match
coord_names = list(output.coords.keys())
print('coord_names_antes',coord_names)
sample_coords = []
if 'time' in coord_names:
sample_coords.append('time')
coord_names.remove('time')
if 'latitude' in coord_names:
sample_coords.append('latitude')
coord_names.remove('latitude')
else:
raise Exception("No hay 'latitude' como coordenada en el dataset")
if 'longitude' in coord_names:
sample_coords.append('longitude')
coord_names.remove('longitude')
else:
raise Exception("No hay 'longitude' como coordenada en el dataset")
sample_coords = sample_coords + coord_names
coord_names = sample_coords
print('coord_names_despues',coord_names)
#for x in coords:
for x in coord_names:
if not 'units' in coords[x].attrs:
if x == "time":
coords[x].attrs["units"]=u"seconds since 1970-01-01 00:00:00"
netcdf_writer.create_coordinate(nco, x, coords[x].values, coords[x].units)
cnames=cnames+(x,)
_crs=output.crs
if isinstance(_crs, xr.DataArray):
_crs=CRS(str(_crs.spatial_ref))
netcdf_writer.create_grid_mapping_variable(nco, _crs)
for band in output.data_vars:
#Para evitar problemas con xarray <0.11
if band in coords.keys() or band == 'crs':
continue
output.data_vars[band].values[np.isnan(output.data_vars[band].values)]=nodata
var= netcdf_writer.create_variable(nco, band, netcdf_writer.Variable(output.data_vars[band].dtype, nodata, cnames, None) ,set_crs=True)
var[:] = netcdf_writer.netcdfy_data(output.data_vars[band].values)
nco.close()
end = time.time()
logging.info('TIEMPO SALIDA NC:' + str((end - start)))
def readNetCDF(file):
start = time.time()
try:
_xarr=xr.open_dataset(file, mask_and_scale=True)
if _xarr.data_vars['crs'] is not None:
_xarr.attrs['crs']= _xarr.data_vars['crs']
_xarr = _xarr.drop('crs')
end = time.time()
logging.info('TIEMPO CARGA NC:' + str((end - start)))
logging.info('readNetCDF: dataset {} - {}'.format(
type(_xarr),_xarr
)
)
return _xarr
except Exception as e:
logging.info('ERROR CARGA NC:' + str(e))
def getUpstreamVariable(task, context,key='return_value'):
start = time.time()
task_instance = context['task_instance']
upstream_tasks = task.get_direct_relatives(upstream=True)
upstream_task_ids = [task.task_id for task in upstream_tasks]
#upstream_task_ids = task.get_direct_relatives(upstream=True)
upstream_variable_values = task_instance.xcom_pull(task_ids=upstream_task_ids, key=key)
end = time.time()
logging.info('TIEMPO UPSTREAM:' + str((end - start)))
return list(itertools.chain.from_iterable(filter(None.__ne__,upstream_variable_values)))
def _get_transform_from_xr(dataset):
"""Create a geotransform from an xarray dataset.
"""
geobox=calculate_bounds_geotransform(dataset)
# geotransform = from_bounds(dataset.longitude[0], dataset.latitude[-1], dataset.longitude[-1], dataset.latitude[0],
# len(dataset.longitude), len(dataset.latitude))
geotransform = from_bounds(geobox['left'], geobox['top'], geobox['right'], geobox['bottom'],
len(dataset.longitude), len(dataset.latitude))
print(geotransform)
return geotransform
def calculate_bounds_geotransform(dataset):
# Convert rasterio CRS into datacube CRS object
if isinstance(dataset.crs,xr.DataArray):
crs_dict = dataset.crs.to_dict()
_crs = CRS(str(crs_dict['attrs']['spatial_ref']))
# Leave the CRS as it is (datacube CRS object)
elif isinstance(dataset.crs,datacube.utils.geometry._base.CRS):
_crs = dataset.crs
else:
raise Exception('dataset.crs datatype not know (please check calculate_bounds_geotransform)')
#crs_dict = dataset.crs.to_dict()
#_crs = CRS(str(crs_dict['attrs']['spatial_ref']))
dims = _crs.dimensions
xres, xoff = data_resolution_and_offset(dataset[dims[1]])
yres, yoff = data_resolution_and_offset(dataset[dims[0]])
GeoTransform = [xoff, xres, 0.0, yoff, 0.0, yres]
left, right = dataset[dims[1]][0] - 0.5 * xres, dataset[dims[1]][-1] + 0.5 * xres
bottom, top = dataset[dims[0]][0] - 0.5 * yres, dataset[dims[0]][-1] + 0.5 * yres
return {'left':left, 'right':right,'bottom':bottom, 'top':top, 'GeoTransform': GeoTransform}
def write_geotiff_from_xr(tif_path, dataset, bands=[], no_data=-9999, crs="EPSG:4326"):
"""Write a geotiff from an xarray dataset.
Args:
tif_path: path for the tif to be written to.
dataset: xarray dataset
bands: list of strings representing the bands in the order they should be written
no_data: nodata value for the dataset
crs: requested crs.
Affine(a,b,c,d,e,f)
a = width of a pixel
b = row rotation (typically zero)
c = x-coordinate of the upper-left corner of the upper-left pixel
d = column rotation (typically zero)
e = height of a pixel (typically negative)
f = y-coordinate of the of the upper-left corner of the upper-left pixel
"""
from rasterio.crs import CRS as CRS_rasterio
bands=list(dataset.data_vars.keys())
assert isinstance(bands, list), "Bands must a list of strings"
assert len(bands) > 0 and isinstance(bands[0], str), "You must supply at least one band."
logging.info('write_geotiff_from_xr: dataset {} - {}'.format(
type(dataset),dataset
)
)
#print(dataset.crs)
#if dataset.crs is not None:
# if isinstance(dataset.crs, xr.DataArray):
# print(type(dataset.crs))
# crs_dict = dataset.crs.to_dict()
# crs = CRS_rasterio.from_wkt(crs_dict['attrs']['crs_wkt'])
# print(crs_dict['attrs'])
# geobox = calculate_bounds_geotransform(dataset)
# bounds = BoundingBox(left=geobox['left'], bottom=geobox['bottom'], right=geobox['right'], top=geobox['top'])
# else:
# crs = dataset.crs.crs_str
#else:
# print("no entra")
# transform = _get_transform_from_xr(dataset)
#transform = _get_transform_from_xr(dataset)
if isinstance(dataset.crs,xr.DataArray):
crs_dict = dataset.crs.to_dict()
crs = CRS_rasterio.from_wkt(crs_dict['attrs']['crs_wkt'])
elif isinstance(dataset.crs,datacube.utils.geometry._base.CRS):
crs = CRS_rasterio.from_string(dataset.crs.crs_str)
else:
raise Exception('dataset.crs datatype not know (please check calculate_bounds_geotransform)')
geobox = calculate_bounds_geotransform(dataset)
bounds = BoundingBox(left=geobox['left'], bottom=geobox['bottom'], right=geobox['right'], top=geobox['top'])
transform = _get_transform_from_xr(dataset)
with rasterio.open(tif_path,'w',
driver='GTiff',
height=dataset.dims['latitude'],
width=dataset.dims['longitude'],
count=len(bands),
dtype=dataset[bands[0]].dtype,#str(dataset[bands[0]].dtype),
crs=crs,
transform=transform,
bounds=bounds,
nodata=no_data) as dst:
for index, band in enumerate(bands):
print(dataset[band].dtype)
dst.write_band(index + 1, dataset[band].values.astype(dataset[bands[0]].dtype), )
tag = {'Band_'+str(index+1): bands[index]}
dst.update_tags(**tag)
dst.set_band_description(index + 1, band)
dst.close()
def translate_netcdf_to_tiff(task_id, algorithm,folder,files):
bash_script_path = os.path.join(ALGORITHMS_FOLDER, "generate-geotiff", "generate-geotiff_1.0.sh")
try:
p = Popen([bash_script_path, task_id, algorithm, folder] + files, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
# out = check_output([bash_script_path, folder]+_files)
if stdout:
print(stdout)
return glob.glob("{}*{}*".format(folder, task_id))
else:
print(stderr)
raise AirflowSkipException("ERROR")
except CalledProcessError as cpe:
print('Error: No se pudo generarel geotiff ')
| [
"logging.basicConfig",
"logging.getLogger",
"datacube.drivers.netcdf.writer.Variable",
"logging.StreamHandler",
"datacube.drivers.netcdf.writer.netcdfy_data",
"datacube.drivers.netcdf.writer.create_coordinate",
"subprocess.Popen",
"rasterio.crs.CRS.from_string",
"os.path.join",
"xarray.open_datase... | [((663, 759), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s : %(asctime)s : %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(levelname)s : %(asctime)s : %(message)s',\n level=logging.DEBUG)\n", (682, 759), False, 'import logging\n'), ((843, 866), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (864, 866), False, 'import logging\n'), ((1309, 1320), 'time.time', 'time.time', ([], {}), '()\n', (1318, 1320), False, 'import time\n'), ((1329, 1366), 'datacube.drivers.netcdf.writer.create_netcdf', 'netcdf_writer.create_netcdf', (['filename'], {}), '(filename)\n', (1356, 1366), True, 'from datacube.drivers.netcdf import writer as netcdf_writer\n'), ((2792, 2845), 'datacube.drivers.netcdf.writer.create_grid_mapping_variable', 'netcdf_writer.create_grid_mapping_variable', (['nco', '_crs'], {}), '(nco, _crs)\n', (2834, 2845), True, 'from datacube.drivers.netcdf import writer as netcdf_writer\n'), ((3332, 3343), 'time.time', 'time.time', ([], {}), '()\n', (3341, 3343), False, 'import time\n'), ((3439, 3450), 'time.time', 'time.time', ([], {}), '()\n', (3448, 3450), False, 'import time\n'), ((4031, 4042), 'time.time', 'time.time', ([], {}), '()\n', (4040, 4042), False, 'import time\n'), ((4384, 4395), 'time.time', 'time.time', ([], {}), '()\n', (4393, 4395), False, 'import time\n'), ((5762, 5806), 'datacube.utils.data_resolution_and_offset', 'data_resolution_and_offset', (['dataset[dims[1]]'], {}), '(dataset[dims[1]])\n', (5788, 5806), False, 'from datacube.utils import geometry, data_resolution_and_offset\n'), ((5824, 5868), 'datacube.utils.data_resolution_and_offset', 'data_resolution_and_offset', (['dataset[dims[0]]'], {}), '(dataset[dims[0]])\n', (5850, 5868), False, 'from datacube.utils import geometry, data_resolution_and_offset\n'), ((8454, 8558), 'rasterio.coords.BoundingBox', 'BoundingBox', ([], {'left': "geobox['left']", 'bottom': "geobox['bottom']", 'right': "geobox['right']", 'top': "geobox['top']"}), "(left=geobox['left'], bottom=geobox['bottom'], right=geobox[\n 'right'], top=geobox['top'])\n", (8465, 8558), False, 'from rasterio.coords import BoundingBox\n'), ((9508, 9586), 'os.path.join', 'os.path.join', (['ALGORITHMS_FOLDER', '"""generate-geotiff"""', '"""generate-geotiff_1.0.sh"""'], {}), "(ALGORITHMS_FOLDER, 'generate-geotiff', 'generate-geotiff_1.0.sh')\n", (9520, 9586), False, 'import os, glob\n'), ((812, 831), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (829, 831), False, 'import logging\n'), ((2587, 2661), 'datacube.drivers.netcdf.writer.create_coordinate', 'netcdf_writer.create_coordinate', (['nco', 'x', 'coords[x].values', 'coords[x].units'], {}), '(nco, x, coords[x].values, coords[x].units)\n', (2618, 2661), True, 'from datacube.drivers.netcdf import writer as netcdf_writer\n'), ((3247, 3304), 'datacube.drivers.netcdf.writer.netcdfy_data', 'netcdf_writer.netcdfy_data', (['output.data_vars[band].values'], {}), '(output.data_vars[band].values)\n', (3273, 3304), True, 'from datacube.drivers.netcdf import writer as netcdf_writer\n'), ((3474, 3516), 'xarray.open_dataset', 'xr.open_dataset', (['file'], {'mask_and_scale': '(True)'}), '(file, mask_and_scale=True)\n', (3489, 3516), True, 'import xarray as xr\n'), ((3671, 3682), 'time.time', 'time.time', ([], {}), '()\n', (3680, 3682), False, 'import time\n'), ((8095, 8146), 'rasterio.crs.CRS.from_wkt', 'CRS_rasterio.from_wkt', (["crs_dict['attrs']['crs_wkt']"], {}), "(crs_dict['attrs']['crs_wkt'])\n", (8116, 8146), True, 'from rasterio.crs import CRS as CRS_rasterio\n'), ((9608, 9699), 'subprocess.Popen', 'Popen', (['([bash_script_path, task_id, algorithm, folder] + files)'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '([bash_script_path, task_id, algorithm, folder] + files, stdout=PIPE,\n stderr=PIPE)\n', (9613, 9699), False, 'from subprocess import CalledProcessError, Popen, PIPE, check_output\n'), ((3038, 3077), 'numpy.isnan', 'np.isnan', (['output.data_vars[band].values'], {}), '(output.data_vars[band].values)\n', (3046, 3077), True, 'import numpy as np\n'), ((3140, 3214), 'datacube.drivers.netcdf.writer.Variable', 'netcdf_writer.Variable', (['output.data_vars[band].dtype', 'nodata', 'cnames', 'None'], {}), '(output.data_vars[band].dtype, nodata, cnames, None)\n', (3162, 3214), True, 'from datacube.drivers.netcdf import writer as netcdf_writer\n'), ((8230, 8275), 'rasterio.crs.CRS.from_string', 'CRS_rasterio.from_string', (['dataset.crs.crs_str'], {}), '(dataset.crs.crs_str)\n', (8254, 8275), True, 'from rasterio.crs import CRS as CRS_rasterio\n')] |
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df1 = pd.read_csv("original-benchmark-results.csv")
df2 = pd.read_csv("warped-benchmark-results.csv")
mean1 = df1.groupby("sampler").mean()
mean2 = df2.groupby("sampler").mean()
cached1 = (
df1[(df1["cached"]) & (df1["sampler"] != "resnet18")].groupby("sampler").mean()
)
cached2 = (
df2[(df2["cached"]) & (df2["sampler"] != "resnet18")].groupby("sampler").mean()
)
not_cached1 = (
df1[(~df1["cached"]) & (df1["sampler"] != "resnet18")].groupby("sampler").mean()
)
not_cached2 = (
df2[(~df2["cached"]) & (df2["sampler"] != "resnet18")].groupby("sampler").mean()
)
print("cached, original\n", cached1)
print("cached, warped\n", cached2)
print("not cached, original\n", not_cached1)
print("not cached, warped\n", not_cached2)
cmap = sns.color_palette()
labels = ["GridGeoSampler", "RandomBatchGeoSampler", "RandomGeoSampler"]
fig, ax = plt.subplots()
x = np.arange(3)
width = 0.2
rects1 = ax.bar(
x - width * 3 / 2,
not_cached1["rate"],
width,
label="Raw Data, Not Cached",
color=cmap[0],
)
rects2 = ax.bar(
x - width * 1 / 2,
not_cached2["rate"],
width,
label="Preprocessed, Not Cached",
color=cmap[1],
)
rects2 = ax.bar(
x + width * 1 / 2, cached1["rate"], width, label="Raw Data, Cached", color=cmap[2]
)
rects3 = ax.bar(
x + width * 3 / 2,
cached2["rate"],
width,
label="Preprocessed, Cached",
color=cmap[3],
)
ax.set_ylabel("sampling rate (patches/sec)", fontsize=12)
ax.set_xticks(x)
ax.set_xticklabels(labels, fontsize=12)
ax.tick_params(axis="x", labelrotation=10)
ax.legend(fontsize="large")
plt.gca().spines.right.set_visible(False)
plt.gca().spines.top.set_visible(False)
plt.tight_layout()
plt.show()
| [
"seaborn.color_palette",
"pandas.read_csv",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((219, 264), 'pandas.read_csv', 'pd.read_csv', (['"""original-benchmark-results.csv"""'], {}), "('original-benchmark-results.csv')\n", (230, 264), True, 'import pandas as pd\n'), ((271, 314), 'pandas.read_csv', 'pd.read_csv', (['"""warped-benchmark-results.csv"""'], {}), "('warped-benchmark-results.csv')\n", (282, 314), True, 'import pandas as pd\n'), ((964, 983), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (981, 983), True, 'import seaborn as sns\n'), ((1069, 1083), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1081, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1088, 1100), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1097, 1100), True, 'import numpy as np\n'), ((1883, 1901), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1899, 1901), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1910, 1912), True, 'import matplotlib.pyplot as plt\n'), ((1801, 1810), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1808, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1843, 1852), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1850, 1852), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import copy
import importlib.resources
import json
import logging
import os
import platform
import random
import tempfile
import time
import cv2
import networkx as nx
import numpy as np
from ai2thor.build import arch_platform_map, build_name
from ai2thor.controller import Controller
from fuzzywuzzy import fuzz
import teach.meta_data_files.ai2thor_resources as ai2thor_resources
import teach.meta_data_files.config as config_directory
from teach.dataset.initialization import Initialization
from teach.dataset.pose import Pose
from teach.logger import create_logger
from teach.settings import get_settings
from teach.simulators.simulator_base import SimulatorBase
# Commit where FillLiquid bug is fixed: https://github.com/allenai/ai2thor/issues/844
COMMIT_ID = "fdc047690ee0ab7a91ede50d286bd387d379713a"
# debug manual flag
debug_print_all_sim_steps = False
logger = create_logger(__name__)
class TEAChController(Controller):
def __init__(self, base_dir: str, **kwargs):
self._base_dir = base_dir
os.makedirs(base_dir, exist_ok=True)
super().__init__(**kwargs)
@staticmethod
def build_local_executable_path(base_dir: str, commit_id: str, release_dir: str = "releases"):
"""Helper method to build the path to the local executable. Useful when executable is pre-downloaded."""
arch = arch_platform_map[platform.system()]
name = build_name(arch, commit_id)
return os.path.join(base_dir, release_dir, name, name)
@staticmethod
def base_dir_in_tmp():
tempdir = tempfile.gettempdir()
base_dir = os.path.join(tempdir, "ai2thor")
os.makedirs(base_dir, exist_ok=True)
return base_dir
@property
def base_dir(self):
return self._base_dir
class SimulatorTHOR(SimulatorBase):
def __init__(
self,
task_type="eqa_complex",
comments=None,
fps=25,
logger_name=__name__,
logger_level=logging.DEBUG,
dir_out=None,
s3_bucket_name=None,
web_window_size=900,
commander_embodied=False,
visibility_distance=1.5,
):
"""
Constructor for Simulator_THOR - a wrapper over AI2-THOR
:param task_type: Type of task. This is currently user-defined. Default = 'eqa_complex'
:type task_type: String
:param comments: Informative comments for the entire data collection session. Default = None (use current day, time)
:type comments: String
:param fps: Maximum frame rate for video feed. Default = 25
:type fps: Integer
:param logger_name: Name of logger. Default = __name__ (name of the current module)
:type logger_name: String
:param logger_level: Level for logger. Default = logging.DEBUG
:type logger_level: Enumeration. See logging.setLevel()
:param dir_out: Output directory for logging
:type dir_out: String
:param s3_bucket_name: S3 bucket for logging
:type s3_bucket_name: String
:param web_window_size: Window/ image sizes (square) to be used by simulator; 900 for TEACh data collection
:type web_window_size: Int
:param commander_embodied: True if the Commander should also be allowed to interact with objects; False for
TEACh data collection
:type commander_embodied: Bool
:param visibility_distance: Max distance an agent can be from an object to successfully interact with it; 1.5
for TEACh data collection
:type visibility_distance: Float
"""
time_start = time.time()
super().__init__(
task_type,
comments,
fps=fps,
logger_name=logger_name,
logger_level=logger_level,
dir_out=dir_out,
s3_bucket_name=s3_bucket_name,
)
time_base_init = time.time()
logger.info("Initializing simulator... time to init Simulator_base: %s sec" % (time_base_init - time_start))
self.controller = None
teach_settings = get_settings()
self.controller_base_dir = teach_settings.AI2THOR_BASE_DIR
use_local_exe = teach_settings.AI2THOR_USE_LOCAL_EXE
self.controller_local_executable_path = (
TEAChController.build_local_executable_path(self.controller_base_dir, COMMIT_ID) if use_local_exe else None
)
self.world_type = "Kitchen"
self.world = None
self.grid_size = 0.25
self.hotspot_pixel_width = 10
self.web_window_size = web_window_size
self.commander_embodied = commander_embodied
self.randomize_object_search = False
self.visibility_distance = visibility_distance
self.object_target_camera_idx = None
self.navigation_graph = self.navigation_points = None
self.topdown_cam_orth_size = self.topdown_lower_left_xz = None # Used for MapGoals
self.floor_oid = None # used for handoffs to temporarily store objects on the floor
# The following is a dictionary for custom object metadata. When adding custom object properties, DO NOT use
# property names already used by AI2-THOR. If the same property is needed here, prefix the property name with
# the project for which you are using it. For example, the AI2-THOR property isSliced could be changed to
# simbotIsSliced if the project simbot needed custom behaviour from isSliced
self.__custom_object_metadata = dict()
# Affordances by action type - identifies what properties an object must satisfy for it to be possible to take
# an action on it; Used in highlighting valid objects in TEACh data collection interface to assist annotators
self.action_to_affordances = {
"Pickup": [{"pickupable": True, "isPickedUp": False}],
"Place": [{"receptacle": True}],
"Open": [{"openable": True, "isOpen": False}],
"Close": [{"openable": True, "isOpen": True}],
"ToggleOn": [{"toggleable": True, "isToggled": False}],
"ToggleOff": [{"toggleable": True, "isToggled": True}],
"Slice": [{"sliceable": True, "isSliced": False}],
"Dirty": [{"dirtyable": True, "isDirty": False}],
"Clean": [{"dirtyable": True, "isDirty": True}],
"Fill": [{"canFillWithLiquid": True, "isFilledWithLiquid": False}],
"Empty": [{"canFillWithLiquid": True, "isFilledWithLiquid": True}],
"Pour": [
{"canFillWithLiquid": True, "isFilledWithLiquid": False},
{"objectType": "Sink"},
{"objectType": "SinkBasin"},
{"objectType": "Bathtub"},
{"objectType": "BathtubBasin"},
],
"Break": [{"breakable": True, "isBroken": False}],
}
time_end = time.time()
logger.info("Finished initializing simulator. Total time: %s sec" % (time_end - time_start))
def set_task(self, task, task_params=None, comments=""):
"""
Set the current task to provided Task_THOR object
Tasks are defined in json files under task_definitions
:param task: instance of Task_THOR class
:param task_params list of parameters to the task, possibly empty; must match definition nparams in length
:param comments: Informative comments for the current task. Default = ''
:type comments: String
"""
logger.debug("Setting task = %s" % str(task))
new_task = copy.deepcopy(task)
if task_params is not None:
new_task.task_params = task_params
new_task.comments = comments
new_task.episodes = [] if self.current_episode is None else [self.current_episode]
self._dataset.add_task(new_task)
self.current_task = new_task
self.logger.debug("New task: %d, %s, %s, %s" % (task.task_id, task.task_name, comments, str(task.task_params)))
self.to_broadcast["info"] = {"message": ""}
logger.info("SimulatorTHOR set_task done New task: %d, %s, %s" % (task.task_id, task.task_name, comments))
def set_task_by_id(self, task_id: int, task_params=None, comments=""):
"""
Set the current task to task defined in default_definitions.json with provided task_id
:param task_id: task id number from task definition json file
:param task_params list of parameters to the task, possibly empty; must match definition nparams in length
:param comments: Informative comments for the current task. Default = ''
:type comments: String
"""
task = self._dataset.definitions.map_tasks_id2info[task_id]
task.task_params = task_params
self.set_task(task=task, task_params=task_params, comments=comments)
def set_task_by_name(self, task_name: str, task_params=None, comments=""):
"""
Set the current task to task defined in default_definitions.json with provided task_name
:param task_name task name from task definition json file
:param task_params list of parameters to the task, possibly empty; must match definition nparams in length
:param comments: Informative comments for the current task. Default = ''
:type comments: String
"""
task = self._dataset.definitions.map_tasks_name2info[task_name]
task.task_params = task_params
self.set_task(task=task, task_params=task_params, comments=comments)
def __add_obj_classes_for_objs(self):
"""
For each object in AI2-THOR metadata, update with manually defined object classes to be tracked in custom
properties
"""
# Load custom object classes
with importlib.resources.open_text(ai2thor_resources, "custom_object_classes.json") as file_handle:
custom_object_classes = json.load(file_handle)
# Assign custom classes to each object
all_objects = self.get_objects(self.controller.last_event)
for obj in all_objects:
cur_obj_classes = [obj["objectType"]]
if obj["objectType"] == "Sink":
cur_obj_classes += ["SinkBasin"]
if obj["objectType"] == "SinkBasin":
cur_obj_classes += ["Sink"]
if obj["objectType"] == "Bathtub":
cur_obj_classes += ["BathtubBasin"]
if obj["objectType"] == "BathtubBasin":
cur_obj_classes += ["Bathtub"]
if obj["objectType"] in custom_object_classes:
cur_obj_classes += custom_object_classes[obj["objectType"]]
self.__update_custom_object_metadata(obj["objectId"], "simbotObjectClass", cur_obj_classes)
def __init_custom_object_metadata(self):
"""
Reset custom object metadata to initial state: erase previously tracked properties, add manual classes for all
objects and check for custom property updates from current state
"""
self.__custom_object_metadata = dict()
self.__add_obj_classes_for_objs()
self.__check_per_step_custom_properties()
def __check_per_step_custom_properties(self, objs_before_step=None):
"""
Check whether any custom object properties need to be updated; Should be called after taking each action
"""
# Update whether things got cleaned and filled with water
self.__update_sink_interaction_outcomes(self.controller.last_event)
# Update whether a mug should be filled with coffee
self.__update_custom_coffee_prop(self.controller.last_event, objs_before_step)
# Update whether things got cooked
self.__update_custom_property_cooked(self.controller.last_event)
# Check for objects that are boiled at the start of the episode
self.__update_custom_property_boiled(objs_before_step, self.controller.last_event)
def __update_custom_object_metadata(self, object_id, custom_property_name, custom_property_value):
"""
Update custom properties
"""
if object_id not in self.__custom_object_metadata:
self.__custom_object_metadata[object_id] = dict()
self.__custom_object_metadata[object_id][custom_property_name] = custom_property_value
def __append_to_custom_object_metadata_list(self, object_id, custom_property_name, custom_property_value):
"""
Add values to custom properties that are lists
"""
if object_id not in self.__custom_object_metadata:
self.__custom_object_metadata[object_id] = dict()
if custom_property_name not in self.__custom_object_metadata[object_id]:
self.__custom_object_metadata[object_id][custom_property_name] = list()
if custom_property_value not in self.__custom_object_metadata[object_id][custom_property_name]:
self.__custom_object_metadata[object_id][custom_property_name].append(custom_property_value)
def __delete_from_custom_object_metadata_list(self, object_id, custom_property_name, custom_property_value):
"""
Delete values from custom properties that are lists
"""
if (
object_id in self.__custom_object_metadata
and custom_property_name in self.__custom_object_metadata[object_id]
and custom_property_value in self.__custom_object_metadata[object_id][custom_property_name]
):
del self.__custom_object_metadata[object_id][custom_property_name][
self.__custom_object_metadata[object_id][custom_property_name].index(custom_property_value)
]
def __delete_object_from_custom_object_metadata(self, object_id):
"""
Delete custom properties of an object
:param object_id: ID of object whose properties are to be deleted
"""
if object_id in self.__custom_object_metadata:
del self.__custom_object_metadata[object_id]
for oid in self.__custom_object_metadata:
for prop in self.__custom_object_metadata[oid]:
if (
type(self.__custom_object_metadata[oid][prop]) is list
and object_id in self.__custom_object_metadata[oid][prop]
):
del self.__custom_object_metadata[oid][prop][
self.__custom_object_metadata[oid][prop].index(object_id)
]
elif object_id == self.__custom_object_metadata[oid][prop]:
self.__custom_object_metadata[oid][prop] = None
def __transfer_custom_metadata_on_slicing_cracking(self, objects):
"""
When objects get sliced or cracked, their object IDs change because one object may become multiple objects.
Transfer custom properties from the original object to the new object(s)
:param objects: Output of get_objects()
"""
objects_to_delete = set()
for obj in objects:
transfer_needed = False
orig_obj_id = None
if "Sliced" in obj["objectId"]:
transfer_needed = True
orig_obj_id = "|".join(obj["objectId"].split("|")[:-1])
if "Cracked" in obj["objectId"]:
transfer_needed = True
orig_obj_id = "|".join(obj["objectId"].split("|")[:-1])
if transfer_needed and orig_obj_id is not None and orig_obj_id in self.__custom_object_metadata:
self.__custom_object_metadata[obj["objectId"]] = copy.deepcopy(
self.__custom_object_metadata[orig_obj_id]
)
if (
"simbotLastParentReceptacle" in self.__custom_object_metadata[obj["objectId"]]
and self.__custom_object_metadata[obj["objectId"]]["simbotLastParentReceptacle"] is not None
):
poid = self.__custom_object_metadata[obj["objectId"]]["simbotLastParentReceptacle"]
self.__append_to_custom_object_metadata_list(poid, "simbotIsReceptacleOf", obj["objectId"])
objects_to_delete.add(orig_obj_id)
for obj_id in objects_to_delete:
self.__delete_object_from_custom_object_metadata(obj_id)
def get_objects(self, event=None):
"""
Return objects augmented by custom properties
:param event: Simulator event to be used to obtain object properties, usually self.controller.last_event to get
current object states
"""
if event is None:
if self.commander_embodied:
event = self.controller.last_event.events[0]
else:
event = self.controller.last_event
for obj in event.metadata["objects"]:
if obj["objectId"] in self.__custom_object_metadata:
obj.update(self.__custom_object_metadata[obj["objectId"]])
return event.metadata["objects"]
def get_inventory_objects(self, event):
"""
Return objects held in hand by agents
:param event: Simulator event to be used to obtain object properties, usually self.controller.last_event to get
current object states
"""
for obj in event.metadata["inventoryObjects"]:
if obj["objectId"] in self.__custom_object_metadata:
obj.update(self.__custom_object_metadata[obj["objectId"]])
return event.metadata["inventoryObjects"]
def start_new_episode(
self,
world=None,
world_type=None,
object_tuples=None,
commander_embodied=None,
episode_id=None,
randomize_object_search=False,
):
"""
Start a new episode in a random scene
:param world: AI2-THOR floor plan to be used or None; if None a random scene (matching specified world_type
if provided) is used
:param world_type: One of "Kitchen", "Bedroom", "Bathroom", "Living room" or None; if world is None and
world_type is specified, a random world of the specified world_type is used
:param object_tuples: Used to specify initial states of objects
:param commander_embodied: True if the Commander should also be allowed to interact with objects; False for
TEACh data collection
:param episode_id: Used to specify a custom episode ID
:param randomize_object_search: If True, attempts to search for objects will return a random object of type
matching the search string; if false, the object closest to the agent is always returned on search
"""
logger.info("In simulator_THOR.start_new_episode, world = %s world_type = %s" % (world, world_type))
self.randomize_object_search = randomize_object_search
if commander_embodied is not None:
self.commander_embodied = commander_embodied
else:
self.commander_embodied = False
logger.info("SimulatorTHOR warning: commander_embodied was not set on first episode init; default to False")
if world is None:
world_type, world = self.select_random_world(world_type=world_type)
super().start_new_episode(
world=world,
world_type=world_type,
object_tuples=object_tuples,
commander_embodied=commander_embodied,
episode_id=episode_id,
randomize_object_search=randomize_object_search,
)
logger.info("In SimulatorTHOR.start_new_episode, before __launch_simulator")
self.__launch_simulator(world=world, world_type=world_type)
logger.info("In SimulatorTHOR.start_new_episode, completed __launch_simulator")
self.__init_custom_object_metadata()
state = self.get_scene_object_locs_and_states()
self.current_episode.initial_state = Initialization(
time_start=0,
agents=state["agents"],
objects=state["objects"],
custom_object_metadata=self.__custom_object_metadata,
)
def save(self, file_name=None):
"""
Save the session using the current state as the final simulator state. This does not shut down the simulator.
Call done() instead if simulator should be shut down after this
:param file_name: If file_name is not None, the simulator session is saved in the same format as original games
"""
# Add final state to log.
state = self.get_scene_object_locs_and_states()
self.current_episode.final_state = Initialization(
time_start=time.time() - self.start_time,
agents=state["agents"],
objects=state["objects"],
custom_object_metadata=self.__custom_object_metadata,
)
# Save log file
super().save(file_name=file_name)
def done(self, file_name=None):
"""
Shut down the simulator and save the session with final simulator state; Should be called at end of collection/
replay of an episode
:param file_name: If file_name is not None, the simulator session is saved in the same format as original games
"""
# Add final state to log.
state = self.get_scene_object_locs_and_states()
self.current_episode.final_state = Initialization(
time_start=time.time() - self.start_time,
agents=state["agents"],
objects=state["objects"],
custom_object_metadata=self.__custom_object_metadata,
)
# End AI2-THOR Unity process
self.controller.stop()
self.controller = None
# Save log file and change current_episode metadata in the base
super().done(file_name=file_name)
def __argmin(self, lst):
"""
Return the index of the least element in l
"""
return lst.index(min(lst))
def __get_nearest_object_face_to_position(self, obj, pos):
"""
Examine the AI2-THOR property 'axisAlignedBoundingBox'['cornerPoints'] and return the pose closest to target
pose specified in param pos
:param obj: the object to examine the faces of
:param pos: the target position to get near
"""
coords = ["x", "y", "z"]
if obj["pickupable"]:
# For pickupable objects we don't actually need to examine corner points and doing so sometimes causes
# errors with clones
return obj["position"]
xzy_obj_face = {
c: obj["axisAlignedBoundingBox"]["cornerPoints"][
self.__argmin(
[
np.abs(obj["axisAlignedBoundingBox"]["cornerPoints"][pidx][coords.index(c)] - pos[c])
for pidx in range(len(obj["axisAlignedBoundingBox"]["cornerPoints"]))
]
)
][coords.index(c)]
for c in coords
}
return xzy_obj_face
def __aim_camera_at_object(self, obj, camera_id):
"""
Position camera specified by camera_id such that object obj is visible; Used to set target object view for
TEACh data collection interface
:param obj: Object to face - an element of the output of get_objects()
:param camera_id: A valid camera ID
"""
nav_point_idx = self.__get_nav_graph_point(obj["position"]["x"], obj["position"]["z"])
face_obj_rot = self.__get_nav_graph_rot(
self.navigation_points[nav_point_idx]["x"],
self.navigation_points[nav_point_idx]["z"],
obj["position"]["x"],
obj["position"]["z"],
)
# Calculate the angle at which to look at the object to center it.
# We look from the head height of the agent [https://github.com/allenai/ai2thor/issues/266]
# Head gaze is the hypotenuse of a right triangle whose legs are the xz (floor) distance to the obj and the
# difference in gaze versus object height.
# To get the object 'face' instead of center (which could be out of frame, especially for large objects like
# drawers and cabinets), we decide the x,z,y position of the obj as the min distance to its corners.
xzy_obj_face = self.__get_nearest_object_face_to_position(obj, self.navigation_points[nav_point_idx])
xz_dist = np.sqrt(
np.power(xzy_obj_face["x"] - self.navigation_points[nav_point_idx]["x"], 2)
+ np.power(xzy_obj_face["z"] - self.navigation_points[nav_point_idx]["z"], 2)
)
y_diff = 1.8 - xzy_obj_face["y"]
theta = np.arctan(y_diff / xz_dist) * 180.0 / np.pi if not np.isclose(xz_dist, 0) else 0
action = dict(
action="UpdateThirdPartyCamera",
thirdPartyCameraId=camera_id,
rotation=dict(x=theta, y=self.__get_y_rot_from_xz(face_obj_rot[0], face_obj_rot[1]), z=0),
position=dict(
x=self.navigation_points[nav_point_idx]["x"], y=1.8, z=self.navigation_points[nav_point_idx]["z"]
),
)
if debug_print_all_sim_steps:
logger.info("step %s", action)
self.controller.step(action)
return nav_point_idx, face_obj_rot
def teleport_agent_to_face_object(self, obj, agent_id, force_face=None, get_closest=True):
"""
Move agent to a position where object obj is visible
:param obj: Object to face - an element of the output of get_objects()
:param agent_id: 0 for Commander and 1 for Driver/ Follower
:param force_face: Specify a particular target rotation
:param get_closest: If True the agent is always places at closest position; if false, nucleus sampling within
a distance radius around the target object is used
"""
# Get point and facing direction.
tried_points = set()
face_obj_rot = nav_point_idx = None
while face_obj_rot is None or (force_face is not None and face_obj_rot != force_face):
nav_point_idx = self.__get_nav_graph_point(
obj["position"]["x"], obj["position"]["z"], exclude_points=tried_points, get_closest=get_closest
)
if nav_point_idx is None:
return False, None, None
face_obj_rot = self.__get_nav_graph_rot(
self.navigation_points[nav_point_idx]["x"],
self.navigation_points[nav_point_idx]["z"],
obj["position"]["x"],
obj["position"]["z"],
)
tried_points.add(nav_point_idx)
if force_face is not None and force_face != face_obj_rot:
return False, nav_point_idx, face_obj_rot
# Teleport
agent_pose = (
self.controller.last_event.events[agent_id].metadata["agent"]
if self.commander_embodied
else self.controller.last_event.metadata["agent"]
)
action = dict(
action="Teleport",
agentId=agent_id,
rotation=dict(
x=agent_pose["rotation"]["x"],
y=self.__get_y_rot_from_xz(face_obj_rot[0], face_obj_rot[1]),
z=agent_pose["rotation"]["z"],
),
position=dict(
x=self.navigation_points[nav_point_idx]["x"],
y=agent_pose["position"]["y"],
z=self.navigation_points[nav_point_idx]["z"],
),
horizon=0,
)
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
if not event.metadata["lastActionSuccess"]:
return False, nav_point_idx, face_obj_rot
return True, nav_point_idx, face_obj_rot
def obj_dist_to_nearest_agent(self, obj):
"""
Return Euclidean distance between a given object and the nearest agent in the sim.
"""
if self.commander_embodied:
# For immobile commander, only check what object is closest to driver.
events = [self.controller.last_event.events[0]]
else:
events = [self.controller.last_event]
ds = [
np.linalg.norm(
[
obj["position"]["x"] - e.metadata["agent"]["position"]["x"],
obj["position"]["y"] - e.metadata["agent"]["position"]["y"],
obj["position"]["z"] - e.metadata["agent"]["position"]["z"],
]
)
for e in events
]
return min(ds)
def __agent_dist_to_agent(self, agent_id_a, agent_id_b):
"""
Return Euclidean distance between two agents in the sim.
"""
a_agent_pos = self.controller.last_event.events[agent_id_a].metadata["agent"]["position"]
b_agent_pos = self.controller.last_event.events[agent_id_b].metadata["agent"]["position"]
return np.linalg.norm([a_agent_pos[c] - b_agent_pos[c] for c in ["x", "y", "z"]])
def check_episode_preconditions(self, task):
"""
Check whether the current simulator state is one in which the input task can be completed
:param task: Instance of Task_THOR; task to be checked
"""
return task.check_episode_preconditions(self, self.get_objects(self.controller.last_event))
def check_episode_progress(self, task):
"""
Check completion status of input task given the current simulator state
:param task: Instance of Task_THOR; task to be checked
:return: (task_desc:str, success:bool, subgoal_status:list)
Each element of subgoal_status is a dict with keys 'success':bool, 'description':str and 'steps':list
Each element of subgoal_status[idx]['steps'] is a dict with keys 'success':bool, 'objectId':str,
'objectType':str, 'desc':str
"""
progress_check_output = task.check_episode_progress(self.get_objects(self.controller.last_event), self)
return (
progress_check_output["description"],
progress_check_output["success"],
progress_check_output["subgoals"],
progress_check_output["goal_conditions_total"],
progress_check_output["goal_conditions_satisfied"],
)
def __get_nearest_object_matching_search_str(self, query, exclude_inventory=False):
"""
Obtain the nearest object to the commander OR driver matching the given search string.
:param query: the search string to check against AI2-THOR objectType of objects (uses fuzzy matching)
:param exclude_inventory: if True, don't include inventory objects as candidates (e.g., nothing held will return)
"""
closest_obj = closest_str_ratio = closet_obj_d_to_agent = None
if self.commander_embodied:
le = self.controller.last_event.events[0]
inv_objs = self.get_inventory_objects(self.controller.last_event.events[0])
inv_objs.extend(self.get_inventory_objects(self.controller.last_event.events[1]))
else:
le = self.controller.last_event
inv_objs = self.get_inventory_objects(le)
inv_obj_ids = [o["objectId"] for o in inv_objs]
for obj in le.metadata["objects"]:
if exclude_inventory and obj["objectId"] in inv_obj_ids:
logger.info("%s in inv; skipping" % obj["objectId"])
continue
str_ratio = fuzz.ratio(obj["objectType"], query)
if (
str_ratio > 0
and
# Closer string match or equal string match but closer to agent
(
closest_obj is None
or str_ratio > closest_str_ratio
or
# Physically closer to closest agent.
(str_ratio == closest_str_ratio and self.obj_dist_to_nearest_agent(obj) < closet_obj_d_to_agent)
)
):
closest_obj = obj
closest_str_ratio = str_ratio
closet_obj_d_to_agent = self.obj_dist_to_nearest_agent(obj)
return closest_obj
def __get_random_object_matching_search_str(self, query, exclude_inventory=False):
"""
Obtain a random object to the commander OR driver matching the given search string.
:param query: the search string to check against AI2-THOR objectType of objects (uses fuzzy matching)
:param exclude_inventory: if True, don't include inventory objects as candidates (e.g., nothing held will return)
"""
if self.commander_embodied:
le = self.controller.last_event.events[0]
inv_objs = self.get_inventory_objects(self.controller.last_event.events[0])
inv_objs.extend(self.get_inventory_objects(self.controller.last_event.events[1]))
else:
le = self.controller.last_event
inv_objs = self.get_inventory_objects(le)
inv_obj_ids = [o["objectId"] for o in inv_objs]
candidate_objects = self.get_objects(le)
if exclude_inventory:
candidate_objects = [obj for obj in candidate_objects if obj["objectId"] not in inv_obj_ids]
str_ratios = [fuzz.ratio(obj["objectType"], query) for obj in candidate_objects]
max_ratio = np.max(str_ratios)
max_ratio_idxs = [idx for idx in range(len(str_ratios)) if np.isclose(max_ratio, str_ratios[idx])]
closest_match_objects = [candidate_objects[idx] for idx in max_ratio_idxs]
return np.random.choice(closest_match_objects)
def get_target_object_seg_mask(self, oid):
"""
Get a numpy array with 1s on oid segmentation mask and 0s elsewhere.
:param oid: ID of object to be highlighted in the mask
"""
r = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=oid, return_full_seg_mask=True
)
return r
def set_target_object_view(self, oid, search):
"""
Move target object third party camera to look at specified objectId and returns associated hotspots
:param oid: ID of object to be shown or None
:param search: if oid is None, search string to use for fuzzy matching of object type
"""
assert oid is None or search is None
le = self.controller.last_event.events[0] if self.commander_embodied else self.controller.last_event
if oid is None: # need to choose an oid via search first
if self.randomize_object_search:
obj = self.__get_random_object_matching_search_str(search, exclude_inventory=True)
else:
obj = self.__get_nearest_object_matching_search_str(search, exclude_inventory=True)
if obj is None:
return False
else:
obj = self.__get_object_by_id(le.metadata["objects"], oid)
if obj is False:
return False
# First, teleport the camera to the nearest navigable point to the object of interest.
if self.navigation_graph is None:
self.__generate_navigation_graph()
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(obj, self.object_target_camera_idx)
# Get hotspots of the object from this vantage point.
shown_obj_id = obj["objectId"]
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=obj["objectId"]
)
parent_receptacles = self.get_parent_receptacles(obj, self.get_objects(self.controller.last_event))
# Back off to container if object is fully occluded.
if len(enc_obj_hotspots["hotspots"]) == 0:
if parent_receptacles is not None and len(parent_receptacles) > 0:
logger.warning('no hotspots for obj "%s", so checking parentReceptacles' % obj["objectId"])
for receptacle_obj in parent_receptacles:
if "Floor" in receptacle_obj: # ignore the floor as a parent since hotspotting it isn't helpful
continue
logger.info("... trying %s" % receptacle_obj)
shown_obj_id = receptacle_obj
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=receptacle_obj
)
if len(enc_obj_hotspots["hotspots"]) == 0:
# Couldn't see receptacle, so recenter camera and get a new frame
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(
le.get_object(receptacle_obj), self.object_target_camera_idx
)
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=receptacle_obj
)
if len(enc_obj_hotspots["hotspots"]) == 0:
# Put camera back on target object.
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(
obj, self.object_target_camera_idx
)
if len(enc_obj_hotspots["hotspots"]) > 0:
break # got a hotspot view for this parent
if len(enc_obj_hotspots["hotspots"]) == 0:
logger.warning(
'no hotspots for obj "%s", and no parentReceptacles hotspots,' % obj["objectId"]
+ "so getting hotspots for nearest receptacle..."
)
nn_objs = [obj["objectId"]]
while len(nn_objs) < 6: # try limited number of nearby objects
nn_obj = self.__get_object_by_position(
le.metadata["objects"], obj["position"], ignore_object_ids=nn_objs
)
logger.info("... trying %s" % nn_obj["objectId"])
if nn_obj["receptacle"]:
if "Floor" not in nn_obj["objectId"]: # ignore the floor as a parent
shown_obj_id = nn_obj["objectId"]
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=nn_obj["objectId"]
)
if len(enc_obj_hotspots["hotspots"]) == 0:
# Couldn't see receptacle, so recenter camera and get a new frame
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(
nn_obj, self.object_target_camera_idx
)
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=nn_obj["objectId"]
)
if len(enc_obj_hotspots["hotspots"]) == 0:
# Put camera back on target object.
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(
obj, self.object_target_camera_idx
)
if len(enc_obj_hotspots["hotspots"]) > 0:
break # got a hotspot view for this candidate receptacle
nn_objs.append(nn_obj["objectId"])
# If no receptacle hotspots can be found at all, just return the frame looking "at" the object.
if len(enc_obj_hotspots["hotspots"]) == 0:
logger.warning("no hotspots for parentReceptacles %s" % parent_receptacles)
shown_obj_id = ""
# Prep metadata to be sent up for UI.
obj_view_pos_norm = self.__get_click_normalized_position_from_xz(
self.navigation_points[nav_point_idx]["x"], self.navigation_points[nav_point_idx]["z"]
)
obj_data = {
"success": True,
"oid": obj["objectId"], # the object matching the query
"shown_oid": shown_obj_id, # The object whose hotspots are shown
"view_pos_norm": obj_view_pos_norm, # Location of the viewing camera on the topdown map
"view_rot_norm": [face_obj_rot[0], -face_obj_rot[1]], # flip y from thor coords
"pos_norm": self.__get_click_normalized_position_from_xz(obj["position"]["x"], obj["position"]["z"]),
}
obj_data.update({"view_%s" % k: enc_obj_hotspots[k] for k in enc_obj_hotspots}) # hotspot array and width data
return obj_data
def encode_image(self, img):
return super().encode_image(img)
def get_parent_receptacles(self, obj, objects):
"""
Recursively traces custom properties that track where objects were placed to identify receptacles of an object
when AI2-THOR's property parentReceptacles fails
:param obj: The object whose receptacles need to be identified
:param objects: Output of get_objects()
"""
if "parentReceptacles" in obj and obj["parentReceptacles"] is not None:
return obj["parentReceptacles"]
elif "simbotLastParentReceptacle" in obj:
immediate_parent_receptacle = obj["simbotLastParentReceptacle"]
if immediate_parent_receptacle is not None and immediate_parent_receptacle != obj["objectId"]:
# Second clause is to prevent infinite recursion in weird corner cases that should ideally never happen
parent_receptacles = [immediate_parent_receptacle]
immediate_parent_receptacle_obj = self.__get_object_by_id(objects, immediate_parent_receptacle)
if type(immediate_parent_receptacle_obj) == dict:
further_parent_receptacles = self.get_parent_receptacles(immediate_parent_receptacle_obj, objects)
if further_parent_receptacles is not None:
parent_receptacles += further_parent_receptacles
return parent_receptacles
return None
def success(self):
"""
When an episode ends, the parent function of done() will call this to see whether the episode can stop.
"""
return True # with the THOR backend, we can just say go ahead and stop
def __get_agent_poses(self):
"""
Return current poses of agents
"""
if self.controller is None:
return None
if self.commander_embodied:
cmd_xy = self.__get_agent_click_normalized_position(agent_id=0)
cmd_r = self.__get_agent_click_rotation(agent_id=0)
dri_xy = self.__get_agent_click_normalized_position(agent_id=1)
dri_r = self.__get_agent_click_rotation(agent_id=1)
return [(cmd_xy[0], cmd_xy[1], cmd_r[0], cmd_r[1]), (dri_xy[0], dri_xy[1], dri_r[0], dri_r[1])]
else:
e = self.controller.last_event
cmd_xy = self.__get_agent_click_normalized_position(agent_metadata=e.metadata["thirdPartyCameras"][0])
cmd_r = self.__get_agent_click_rotation(agent_metadata=e.metadata["thirdPartyCameras"][0])
dri_xy = self.__get_agent_click_normalized_position()
dri_r = self.__get_agent_click_rotation()
return [(cmd_xy[0], cmd_xy[1], cmd_r[0], cmd_r[1]), (dri_xy[0], dri_xy[1], dri_r[0], dri_r[1])]
def __get_nav_graph_point(self, thor_x, thor_z, exclude_points=None, get_closest=True):
"""
Get the index in the navigation graph nearest to the given x,z coord in AI2-THOR coordinates
:param thor_x: x coordinate on AI2-THOR floor plan
:param thor_z: z coordinate on AI2-THOR floor plan
:param exclude_points: Any navigation graph points that cannot be used
:param get_closest: If false, instead of returning closest navigation graph point, do nucleus sampling around
the coordinate; if True return the closest navigation graph point
"""
if self.navigation_graph is None:
self.__generate_navigation_graph()
t_point = nearest_t_d = None
distances = []
for idx in range(len(self.navigation_points)):
if exclude_points is not None and idx in exclude_points:
distances.append(float("inf"))
continue
d = np.abs(self.navigation_points[idx]["x"] - thor_x) + np.abs(self.navigation_points[idx]["z"] - thor_z)
distances.append(d)
if t_point is None or d < nearest_t_d:
t_point = idx
nearest_t_d = d
if not get_closest: # rather than returning closest point, do nucleus sampling on softmax of 1/d
scores = [np.exp(1.0 / d) for d in distances]
dps = {idx: scores[idx] / sum(scores) for idx in range(len(scores))}
dnps = {}
nucleus_density = 0.1
nucleus_sum = 0
for k, v in sorted(dps.items(), key=lambda item: item[1], reverse=True):
dnps[k] = v if nucleus_sum < nucleus_density or len(dnps) == 0 else 0
nucleus_sum += v
nps = [dnps[idx] for idx in range(len(scores))]
nps = [p / sum(nps) for p in nps]
t_point = np.random.choice(list(range(len(self.navigation_points))), p=nps)
return t_point
def __get_nav_graph_rot(self, thor_x, thor_z, thor_facing_x, thor_facing_z):
"""
Get the cardinal direction to rotate to, to be facing (thor_facing_x, thor_facing_x=z) when standing at
(thor_x, thor_z)
:param thor_x: x Coordinate on Ai2-THOR floor plan where agent is standing
:param thor_z: z Coordinate on Ai2-THOR floor plan where agent is standing
:param thor_facing_x: x Coordinate on Ai2-THOR floor plan where agent is desired to face
:param thor_facing_z: z Coordinate on Ai2-THOR floor plan where agent is desired to face
"""
# Determine target rotation.
if np.abs(thor_x - thor_facing_x) > np.abs(thor_z - thor_facing_z): # Difference is greater in the x direction.
if thor_x - thor_facing_x > 0: # Destination to the x left
t_rot = (-1, 0)
else:
t_rot = (1, 0)
else: # Difference is greater in the z direction
if thor_z - thor_facing_z > 0: # Destination to the z above
t_rot = (0, -1)
else:
t_rot = (0, 1)
return t_rot
def __generate_navigation_graph(self):
"""
Generate navigation graph: We construct a directed graph with nodes representing agent
position and rotation. For every occupiable grid point on the map, we create four nodes for each orientation.
Orientation nodes at a single occupiable point are connected with directed edges for turns.
Occupiable positions are connected with directed edges that preserve orientation.
"""
if debug_print_all_sim_steps:
logger.info("step %s", "GetReachablePositions")
event = self.controller.step(action="GetReachablePositions")
p = event.metadata["actionReturn"]
ng = nx.DiGraph()
rotations = [[1, 0], [-1, 0], [0, 1], [0, -1]]
for idx in range(len(p)):
for rx, rz in rotations:
ng.add_node((idx, rx, rz))
for idx in range(len(p)):
for irx, irz in rotations:
for jrx, jrz in rotations:
if irx + jrx == 0 or irz + jrz == 0:
continue # antipodal or identical
ng.add_edge((idx, irx, irz), (idx, jrx, jrz))
for jdx in range(len(p)):
if idx == jdx:
continue
rx = rz = None
if np.isclose(p[idx]["z"] - p[jdx]["z"], 0):
if np.isclose(p[idx]["x"] - p[jdx]["x"], self.grid_size):
rx = -1
rz = 0
elif np.isclose(p[idx]["x"] - p[jdx]["x"], -self.grid_size):
rx = 1
rz = 0
elif np.isclose(p[idx]["x"] - p[jdx]["x"], 0):
if np.isclose(p[idx]["z"] - p[jdx]["z"], self.grid_size):
rx = 0
rz = -1
elif np.isclose(p[idx]["z"] - p[jdx]["z"], -self.grid_size):
rx = 0
rz = 1
if rx is not None and rz is not None:
ng.add_edge((idx, rx, rz), (jdx, rx, rz))
self.navigation_graph = ng
self.navigation_points = p
def __update_custom_property_cooked(self, event):
"""
Check whether objects are cooked and update custom property. Augments objects marked as cooked according to the
AI2-THOR property "cooked" by using get_parent_receptacles() to track if any objects were newly placed on a
hot burner or in an switched on microwave
"""
cur_event_objects = self.get_objects(event)
# Mark all objects detected by THOR as cooked
thor_cooked = [obj for obj in cur_event_objects if obj["isCooked"]]
for obj in thor_cooked:
self.__update_custom_object_metadata(obj["objectId"], "simbotIsCooked", True)
candidate_objs = [
obj
for obj in cur_event_objects
if obj["cookable"] and not obj["isCooked"] and ("simbotIsCooked" not in obj or not obj["simbotIsCooked"])
]
for cur_obj in candidate_objs:
parent_receptacle_ids = self.get_parent_receptacles(cur_obj, cur_event_objects)
if parent_receptacle_ids is not None and len(parent_receptacle_ids) > 0:
parent_receptacle_ids = set(parent_receptacle_ids)
parent_microwaves_on = [
obj["isToggled"]
for obj in cur_event_objects
if obj["objectId"] in parent_receptacle_ids and obj["objectType"] == "Microwave"
]
if np.any(parent_microwaves_on):
self.__update_custom_object_metadata(cur_obj["objectId"], "simbotIsCooked", True)
continue
burners = [
obj
for obj in cur_event_objects
if obj["objectId"] in parent_receptacle_ids and obj["objectType"] == "StoveBurner"
]
# Depending on ai2thor version need to check either ObjectTemperature or temperature
parent_burners_hot = list()
for burner in burners:
if "ObjectTemperature" in burner and "Hot" in burner["ObjectTemperature"]:
parent_burners_hot.append(True)
elif "temperature" in burner and "Hot" in burner["temperature"]:
parent_burners_hot.append(True)
else:
parent_burners_hot.append(False)
if np.any(parent_burners_hot):
self.__update_custom_object_metadata(cur_obj["objectId"], "simbotIsCooked", True)
def __update_custom_property_boiled(self, last_event_objects, event):
"""
Check whether objects are boiled and update custom property. An object is considered boiled if it just got
cooked in the last time step, and was in a container filled with liquid at this time
"""
cur_event_objects = self.get_objects(event)
# Find objects whose isCooked property flipped after the last action
just_got_cooked = [
obj
for obj in cur_event_objects
if obj["isCooked"]
and (
last_event_objects is None
or type(self.__get_object_by_id(last_event_objects, obj["objectId"])) != dict
or not self.__get_object_by_id(last_event_objects, obj["objectId"])["isCooked"]
)
]
for obj in just_got_cooked:
parent_receptacles = self.get_parent_receptacles(obj, cur_event_objects)
if parent_receptacles is not None and last_event_objects is not None:
for parent_receptacle_id in parent_receptacles:
parent_receptacle = self.__get_object_by_id(cur_event_objects, parent_receptacle_id)
if type(parent_receptacle) == dict and (
parent_receptacle["isFilledWithLiquid"]
or (
"simbotIsFilledWithWater" in parent_receptacle
and parent_receptacle["simbotIsFilledWithWater"]
)
):
self.__update_custom_object_metadata(obj["objectId"], "simbotIsBoiled", True)
break
def __get_oid_at_frame_xy_with_affordance(
self,
x,
y,
le,
sim_agent_id,
candidate_affordance_properties,
region_backoff=False,
region_radius=None,
allow_agent_as_target=False,
):
"""
Identify an object around relative coordinate (x, y) in the egocentric frame that satisfies required affordances
:param x: Relative coordinate x in [0, 1) at which to find an object from the segmentation frame
:param y: Relative coordinate y in [0, 1) at which to find an object from the segmentation frame
:param le: the last event from the simulator
:param sim_agent_id: 0 for Commander and 1 for Driver/ Follower
:param candidate_affordance_properties: a list of dictionaries, an object's metadata must match key-value pairs
in one of these to be returned; see values in self.action_to_affordances for examples
:param region_backoff: if True and (x, y) gives no oid or an oid lacking the given affordance, do a radial
search in a region around (x, y) using IOU between the region and objects
:param allow_agent_as_target: if True, allow object ids starting with `agent_` to be returned as valid if
they're at the EXACT (x, y) click position only; will return oid `agent_[bodypart]` and obj None; False for
TEACh dataset
"""
assert not region_backoff or region_radius is not None
interacted_oid = interacted_obj = None
# if last event doesn't have a segmentation frame, get one
if le.instance_segmentation_frame is None:
if debug_print_all_sim_steps:
logger.info("step %s", dict(action="Pass", agentId=sim_agent_id, renderObjectImage=True))
self.controller.step(action="Pass", agentId=sim_agent_id, renderObjectImage=True)
le = (
self.controller.last_event.events[sim_agent_id]
if self.commander_embodied
else self.controller.last_event
)
# Check if we can get a matching object at exactly (x, y)
instance_segs = np.array(le.instance_segmentation_frame)
color_to_object_id = le.color_to_object_id
pixel_x, pixel_y = int(np.round(x * self.web_window_size)), int(np.round(y * self.web_window_size))
instance_color_id = tuple(instance_segs[pixel_y, pixel_x])
xy_match = False
if instance_color_id in color_to_object_id:
oid = color_to_object_id[instance_color_id]
if allow_agent_as_target and oid[: len("agent_")] == "agent_":
return oid, None
if oid in le.instance_detections2D:
obj = self.__get_object_by_id(self.get_objects(self.controller.last_event), oid)
if obj:
for affordance_properties in candidate_affordance_properties:
if np.all([k in obj and obj[k] == affordance_properties[k] for k in affordance_properties]):
interacted_oid = oid
interacted_obj = obj
xy_match = True
break
# Do a radial search in a region around (x, y)
if not xy_match and region_backoff:
# Count pixels of affordance-matching objects in the interaction region.
affordance_matching_oid_pixel_counts = {}
affordance_matching_oid_total_pixels = {}
affordance_matching_oid_to_obj = {}
affordance_nonmatching_oids = set()
for rx in range(max(0, pixel_x - region_radius), min(self.web_window_size, pixel_x + region_radius)):
for ry in range(max(0, pixel_y - region_radius), min(self.web_window_size, pixel_y + region_radius)):
instance_color_id = tuple(instance_segs[ry, rx])
if instance_color_id in color_to_object_id:
oid = color_to_object_id[instance_color_id]
if oid in affordance_nonmatching_oids: # seen oid, obj metadata does not match affordances
continue
if oid in affordance_matching_oid_pixel_counts: # seen oid with matching affordance
affordance_matching_oid_pixel_counts[oid] += 1
else: # Unseen oid, so find obj and check affordances
if oid in le.instance_detections2D:
obj = self.__get_object_by_id(self.get_objects(self.controller.last_event), oid)
obj_affordance_match = False
if obj:
for affordance_properties in candidate_affordance_properties:
if np.all(
[
k in obj and obj[k] == affordance_properties[k]
for k in affordance_properties
]
):
affordance_matching_oid_pixel_counts[oid] = 1
affordance_matching_oid_to_obj[oid] = obj
# Get the total pixel count for this object's mask in the frame.
affordance_matching_oid_total_pixels[oid] = np.sum(
np.all(instance_segs == instance_color_id, axis=2).astype(np.uint8)
)
obj_affordance_match = True
break
if not obj_affordance_match:
affordance_nonmatching_oids.add(oid)
# Tiebreak using IOU
if len(affordance_matching_oid_pixel_counts) > 0:
oid_ious = {
oid: affordance_matching_oid_pixel_counts[oid] / affordance_matching_oid_total_pixels[oid]
for oid in affordance_matching_oid_pixel_counts
}
oid_ious_s = sorted(oid_ious.items(), key=lambda k: k[1], reverse=True)
interacted_oid = oid_ious_s[0][0]
interacted_obj = affordance_matching_oid_to_obj[interacted_oid]
return interacted_oid, interacted_obj
def add_interaction(self, interaction, on_oid=None, force=False):
"""
Execute an Interaction - a formatted action - and add it to the current episode
:param interaction: instance of class Interaction defined in dataset.py
:param on_oid: To be used only during replay; allows forcing an action to take place on a specific object
:param force: To be used only during replay; force the action to be successful even if the agent is not near
enough to it
"""
if on_oid is not None:
logger.info("SimulatorTHOR add_interaction invoked with an on_oid; disallowed outside replay scripts")
if self.controller is None:
message = "Simulator was not initialized. Possible resolution: Start new episode."
self.logger.warning(message)
raise Exception(message)
sim_agent_id = interaction.agent_id if self.commander_embodied else 0
le = self.controller.last_event.events[sim_agent_id] if self.commander_embodied else self.controller.last_event
objects_before_cur_event = copy.deepcopy(self.get_objects(le))
action_definition = self._dataset.definitions.map_actions_id2info[interaction.action.action_id]
action_type = action_definition["action_type"]
action_name = action_definition["action_name"]
pose_delta = action_definition["pose_delta"]
if interaction.action.action_type == "Motion":
if not self.commander_embodied and interaction.agent_id == 0: # Commander third party camera motion
event = self.controller.last_event
current_position = event.metadata["thirdPartyCameras"][0]["position"]
current_rotation = event.metadata["thirdPartyCameras"][0]["rotation"]
new_position = current_position
new_rotation = current_rotation
# Get a rotation unit vector in the direction the camera is facing along the xz-plane.
unit_rot = self.__get_xz_rot_from_y(current_rotation["y"])
# Implement each movement as a function of current rotation direction.
if action_name == "Forward":
new_position["x"] += self.grid_size * unit_rot[0]
new_position["z"] += self.grid_size * unit_rot[1]
elif action_name == "Backward":
new_position["x"] += self.grid_size * -unit_rot[0]
new_position["z"] += self.grid_size * -unit_rot[1]
elif action_name == "Turn Left":
new_rotation["y"] = (new_rotation["y"] - 90) % 360
elif action_name == "Turn Right":
new_rotation["y"] = (new_rotation["y"] + 90) % 360
elif action_name == "Look Up": # strafe
new_position["y"] += self.grid_size
pass
elif action_name == "Look Down": # strafe
new_position["y"] -= self.grid_size
pass
elif action_name == "Pan Left": # strafe
rot_facing_left = self.__get_xz_rot_from_y((current_rotation["y"] - 90) % 360)
new_position["x"] += self.grid_size * rot_facing_left[0]
new_position["z"] += self.grid_size * rot_facing_left[1]
elif action_name == "Pan Right": # strafe
rot_facing_right = self.__get_xz_rot_from_y((current_rotation["y"] + 90) % 360)
new_position["x"] += self.grid_size * rot_facing_right[0]
new_position["z"] += self.grid_size * rot_facing_right[1]
elif action_name == "Stop":
pass
else:
logger.warning("%s: Motion not supported" % action_name)
interaction.action.success = 0
return False, "", None
tpc_ac = dict(
action="UpdateThirdPartyCamera", thirdPartyCameraId=0, rotation=new_rotation, position=new_position
)
if debug_print_all_sim_steps:
logger.info("step %s", tpc_ac)
self.controller.step(tpc_ac)
event = self.controller.last_event
super().add_interaction(interaction)
if event.metadata["lastActionSuccess"]:
interaction.action.success = 1
return True, "", None
else:
interaction.action.success = 0
return (
event.metadata["lastActionSuccess"],
event.metadata["errorMessage"],
self.__thor_error_to_help_message(event.metadata["errorMessage"]),
)
else: # Agent motion
# Note on events returned with multiagent: accessing e metadata directly keys into the event
# corresponding to the agent who just took the action, so logic like that below does not need any
# special hooks for specifying the agent id.
if action_name == "Forward":
ac = dict(action="MoveAhead", agentId=sim_agent_id, moveMagnitude=pose_delta[0], forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
elif action_name == "Backward":
ac = dict(action="MoveBack", agentId=sim_agent_id, moveMagnitude=-pose_delta[0], forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
elif action_name == "Look Up":
ac = dict(action="LookUp", agentId=sim_agent_id, degrees=-pose_delta[4], forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
elif action_name == "Look Down":
ac = dict(action="LookDown", agentId=sim_agent_id, degrees=pose_delta[4], forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
elif action_name == "Turn Left":
ac = dict(action="RotateLeft", agentId=sim_agent_id, degrees=pose_delta[5], forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
elif action_name == "Turn Right":
ac = dict(action="RotateRight", agentId=sim_agent_id, degrees=-pose_delta[5], forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
elif action_name == "Pan Left": # strafe left
ac = dict(action="MoveLeft", agentId=sim_agent_id, forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
elif action_name == "Pan Right": # strafe right
ac = dict(action="MoveRight", agentId=sim_agent_id, forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
elif action_name == "Stop": # do nothing
ac = dict(action="Pass", agentId=sim_agent_id)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
e = self.controller.step(ac)
else:
logger.warning("%s: Motion not supported" % action_name)
interaction.action.success = 0
return False, "", None
# Pose returned should be the one for the agent who just took an action based on behavior of event
# returns.
interaction.action.pose = self.get_current_pose(agent_id=sim_agent_id)
super().add_interaction(interaction)
# Return action success data.
if e.metadata["lastActionSuccess"]:
interaction.action.success = 1
return True, "", None
else:
interaction.action.success = 0
return (
e.metadata["lastActionSuccess"],
e.metadata["errorMessage"],
self.__thor_error_to_help_message(e.metadata["errorMessage"]),
)
elif action_type == "MapGoal":
if action_name == "Navigation":
# Get the latest reachable positions for this scene and build navigation graph.
if self.navigation_graph is None:
self.__generate_navigation_graph()
# Determine target grid cell based on click (x, y).
graph_constrained = True # whether to abide by navigation graph.
if self.commander_embodied:
agent_data = self.controller.last_event.events[sim_agent_id].metadata["agent"]
else:
if interaction.agent_id == 0: # it's the floating camera
graph_constrained = False # camera can fly over/through anything
agent_data = self.controller.last_event.metadata["thirdPartyCameras"][0]
else: # it's the driver robot agent
agent_data = self.controller.last_event.metadata["agent"]
# Topdown camera mapping derived from
# https://github.com/allenai/ai2thor/issues/445#issuecomment-713916052
# z is flipped from top-to-bottom y of UI, so 1 - y = z
sx, sz = interaction.action.start_x, (1 - interaction.action.start_y)
tx, tz = self.topdown_lower_left_xz + 2 * self.topdown_cam_orth_size * np.array((sx, sz))
t_face_x, t_face_z = self.topdown_lower_left_xz + 2 * self.topdown_cam_orth_size * np.array(
(interaction.action.end_x, (1 - interaction.action.end_y))
)
t_rot = self.__get_nav_graph_rot(tx, tz, t_face_x, t_face_z)
s_point = nearest_s_d = None
if graph_constrained: # Only need to find start graph node if graph constrained.
t_point = self.__get_nav_graph_point(tx, tz)
for idx in range(len(self.navigation_points)):
d = np.abs(self.navigation_points[idx]["x"] - agent_data["position"]["x"]) + np.abs(
self.navigation_points[idx]["z"] - agent_data["position"]["z"]
)
if s_point is None or d < nearest_s_d:
s_point = idx
nearest_s_d = d
else:
t_point = None
# Determine current rotation.
s_rot = self.__get_xz_rot_from_y(agent_data["rotation"]["y"])
if s_rot is None:
msg = "%.4f source rotation failed to align" % agent_data["rotation"]["y"]
logger.info(msg)
interaction.action.success = 0
return False, msg
# Build shortest path and unpack actions needed to execute it.
action_sequence = []
lrots = [(-1, 0, 0, -1), (0, -1, 1, 0), (1, 0, 0, 1), (0, 1, -1, 0)]
rrots = [(0, 1, 1, 0), (1, 0, 0, -1), (0, -1, -1, 0), (-1, 0, 0, 1)]
if graph_constrained:
node_path = nx.shortest_path(
self.navigation_graph, (s_point, s_rot[0], s_rot[1]), (t_point, t_rot[0], t_rot[1])
)
# Decode action sequence from graph path.
for idx in range(len(node_path) - 1):
# Determine action to get from node idx to node idx + 1.
if node_path[idx][0] != node_path[idx + 1][0]: # moving forward to a new node.
action_sequence.append("forward") # use web UI names to facilitate feedback thru it.
else:
rot_trans = (
node_path[idx][1],
node_path[idx][2],
node_path[idx + 1][1],
node_path[idx + 1][2],
)
if rot_trans in lrots: # rotate left
action_sequence.append("turn_left")
elif rot_trans in rrots: # rotate right
action_sequence.append("turn_right")
else:
msg = "could not determine action from points:", node_path[idx], node_path[idx + 1]
logger.info(msg)
interaction.action.success = 0
return False, msg
else:
# Create action sequence directly from source and target world coordinates.
# Without graph constraints, the camera will fly in fixed orientation to its destination (strafing)
# and then orient to target orientation once at the destination.
cx = agent_data["position"]["x"]
cz = agent_data["position"]["z"]
while tx - cx > self.grid_size: # target is x right
action_sequence.append(
"forward"
if s_rot[0] == 1
else "backward"
if s_rot[0] == -1
else "pan_left"
if s_rot[1] == -1
else "pan_right"
)
cx += self.grid_size
while cx - tx > self.grid_size: # target is x left
action_sequence.append(
"forward"
if s_rot[0] == -1
else "backward"
if s_rot[0] == 1
else "pan_left"
if s_rot[1] == 1
else "pan_right"
)
cx -= self.grid_size
while tz - cz > self.grid_size: # target is z right
action_sequence.append(
"forward"
if s_rot[1] == 1
else "backward"
if s_rot[1] == -1
else "pan_left"
if s_rot[0] == 1
else "pan_right"
)
cz += self.grid_size
while cz - tz > self.grid_size: # target is z left
action_sequence.append(
"forward"
if s_rot[1] == -1
else "backward"
if s_rot[1] == 1
else "pan_left"
if s_rot[0] == -1
else "pan_right"
)
cz -= self.grid_size
if s_rot != t_rot:
rot_trans = (s_rot[0], s_rot[1], t_rot[0], t_rot[1])
if rot_trans in lrots: # just need to rotate left
action_sequence.append("turn_left")
elif rot_trans in rrots: # just need to rotate right
action_sequence.append("turn_right")
else: # need to turn around
action_sequence.extend(["turn_left", "turn_left"])
else:
msg = "%s: NavigationGoal not supported" % action_name
logger.info(msg)
interaction.action.success = 0
return False, msg
super().add_interaction(interaction) # log successful nav action sequence initiated.
# Create and return error and message structure.
interaction.action.success = 1
return True, action_sequence
# Take the specified action on the target object at (x, y) on the screen.
elif action_type == "ObjectInteraction":
interacted_oid = None
x, y = interaction.action.x, interaction.action.y
msg = action = event = None
# This is a list of possible dictionaries of affordance a valid target object for a manipulation needs to
# have and is populated by the actual action we are attempting
candidate_affordance_properties = list()
# check if agent is holding knife in hand
inventory_objects_before_action = self.get_inventory_objects(le)
handoff = False # whether we should bypass normal action taking and do a handoff instead
if action_name == "Pickup":
action = dict(action="PickupObject", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["Pickup"]
elif action_name == "Place":
# Check whether holding anything.
if len(inventory_objects_before_action) == 0:
event = None
msg = "%s: ObjectInteraction only supported when holding an object" % action_name
else:
# Check whether the click position is the other agent, in which case we instead do a handoff.
if (
self.commander_embodied
and len(self.get_inventory_objects(self.controller.last_event.events[(sim_agent_id + 1) % 2]))
== 0
):
interacted_oid, _ = self.__get_oid_at_frame_xy_with_affordance(
x, y, le, sim_agent_id, {}, allow_agent_as_target=True
)
if interacted_oid is not None and "agent_" in interacted_oid:
# Check that agent target is close enough for a handoff.
if (
self.__agent_dist_to_agent(sim_agent_id, (sim_agent_id + 1) % 2)
<= self.visibility_distance
):
# Place the held object on the floor so other agent can pick it up.
floor_place = dict(
action="PutObject", objectId=self.floor_oid, agentId=sim_agent_id, forceAction=True
)
if debug_print_all_sim_steps:
logger.info("step %s", floor_place)
drop_e = self.controller.step(floor_place)
if drop_e.metadata["lastActionSuccess"]:
handoff = True
action = dict(
action="PickupObject", agentId=(sim_agent_id + 1) % 2, forceAction=True
)
interacted_oid = inventory_objects_before_action[0]["objectId"]
else:
msg = "You are unable to hand off the object to your partner."
else:
msg = "Your partner is too far away for a handoff."
if not handoff:
action = dict(action="PutObject", agentId=sim_agent_id, forceAction=True, placeStationary=True)
candidate_affordance_properties = self.action_to_affordances["Place"]
elif action_name == "Open":
action = dict(action="OpenObject", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["Open"]
elif action_name == "Close":
action = dict(action="CloseObject", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["Close"]
elif action_name == "ToggleOn":
action = dict(action="ToggleObjectOn", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["ToggleOn"]
elif action_name == "ToggleOff":
action = dict(action="ToggleObjectOff", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["ToggleOff"]
elif action_name == "Slice":
if (
len(inventory_objects_before_action) == 0
or "Knife" not in inventory_objects_before_action[0]["objectType"]
):
event = None
msg = "%s: ObjectInteraction only supported for held object Knife" % action_name
else:
action = dict(action="SliceObject", agentId=sim_agent_id, x=x, y=y)
candidate_affordance_properties = self.action_to_affordances["Slice"]
elif action_name == "Dirty":
action = dict(action="DirtyObject", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["Dirty"]
elif action_name == "Clean":
action = dict(action="CleanObject", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["Clean"]
elif action_name == "Fill":
action = dict(action="FillObjectWithLiquid", agentId=sim_agent_id, fillLiquid="water")
candidate_affordance_properties = self.action_to_affordances["Fill"]
elif action_name == "Empty":
action = dict(action="EmptyLiquidFromObject", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["Empty"]
elif action_name == "Pour":
if len(inventory_objects_before_action) == 0:
event = None
msg = "%s: ObjectInteraction only supported for held object filled with liquid" % action_name
else:
held_obj = self.__get_object_by_id(
self.get_objects(self.controller.last_event), inventory_objects_before_action[0]["objectId"]
)
if not held_obj["isFilledWithLiquid"]:
event = None
msg = "%s: ObjectInteraction only supported for held object filled with liquid" % action_name
else:
fillLiquid = (
"coffee"
if "simbotIsFilledWithCoffee" in held_obj and held_obj["simbotIsFilledWithCoffee"]
else "water"
)
action = dict(action="FillObjectWithLiquid", agentId=sim_agent_id, fillLiquid=fillLiquid)
candidate_affordance_properties = self.action_to_affordances["Pour"]
elif action_name == "Break":
action = dict(action="BreakObject", agentId=sim_agent_id)
candidate_affordance_properties = self.action_to_affordances["Break"]
else:
event = None
msg = "%s: ObjectInteraction not supported" % action_name
if action is not None: # action was recognized and dict is prepped, so get oid for interaction and step
# Get interaction oid and associated object from the segmentation mask.
if handoff:
interacted_obj = None
else:
interacted_oid, interacted_obj = self.__get_oid_at_frame_xy_with_affordance(
x,
y,
le,
sim_agent_id,
candidate_affordance_properties,
region_backoff=True,
region_radius=self.hotspot_pixel_width,
)
if interacted_oid is not None:
action["objectId"] = interacted_oid
if not interacted_obj and not handoff:
msg = "Could not retrieve object metadata for '%s'" % interacted_oid
# Need to do a manual visibilityDistance check because we're using forceAction=True to cause
# put into any receptacle regardless of metadata constraint (e.g., no sponge in microwave).
raycast_action = dict(action="GetCoordinateFromRaycast", x=x, y=y, agentId=sim_agent_id)
if debug_print_all_sim_steps:
logger.info("step %s", raycast_action)
raycast_e = self.controller.step(raycast_action)
clicked_xyz = raycast_e.metadata["actionReturn"]
if (
np.linalg.norm([clicked_xyz[c] - le.metadata["agent"]["position"][c] for c in ["x", "y", "z"]])
> self.visibility_distance
):
msg = "%s is too far away to be interacted with" % interacted_oid
del action["objectId"] # don't take the action because the obj is too far away.
# Override objectId if specified
if on_oid is not None:
action["objectId"] = on_oid
if force:
action["forceAction"] = True
# Actually take the simulator step.
if "objectId" in action:
# If PutObject, add back in (x, y) so raycast is used to inform final position on target.
if action["action"] == "PutObject":
action["x"] = x
action["y"] = y
action["putNearXY"] = True
ac = {k: action[k] for k in action if k != "objectId"}
if debug_print_all_sim_steps:
logger.info("step %s", ac)
event = self.controller.step(ac)
# If we're about to slice an object held by the other agent, cancel the action.
# If slice happens with a held object, THOR doesn't de-register inventory.
# We tried DropHandObject, but then the slices scatter around the robot base and trap it.
elif (
self.commander_embodied
and action["action"] == "SliceObject"
and action["objectId"]
in [
obj["objectId"]
for obj in self.get_inventory_objects(
self.controller.last_event.events[(sim_agent_id + 1) % 2]
)
]
):
msg = "You cannot slice something while your partner is holding it."
# Else, just take the action we prepared already.
else:
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
# If it is a pour action empty the inventory object
if action_name == "Pour":
interacted_obj = self.__get_object_by_id(
self.get_objects(self.controller.last_event), action["objectId"]
)
if event.metadata["lastActionSuccess"] or interacted_obj["objectType"] in [
"Sink",
"SinkBasin",
"Bathtub",
"BathtubBasin",
]:
held_obj = self.__get_object_by_id(
self.get_objects(self.controller.last_event),
self.get_inventory_objects(self.controller.last_event)[0]["objectId"],
)
empty_action = dict(
action="EmptyLiquidFromObject", agentId=sim_agent_id, objectId=held_obj["objectId"]
)
if debug_print_all_sim_steps:
logger.info("step %s", empty_action)
event = self.controller.step(empty_action)
if event.metadata["lastActionSuccess"]:
self.__update_custom_object_metadata(
held_obj["objectId"], "simbotIsFilledWithCoffee", False
)
# Set custom message for Pickup action on success.
# Note: action taken is actually the pickup by the partner agent on a handoff
if action_name == "Pickup" or (handoff and action_name == "Place"):
inventory_objects = self.get_inventory_objects(event)
if event is not None and event.metadata["lastActionSuccess"] and len(inventory_objects) > 0:
msg = "Picked up %s" % inventory_objects[0]["objectType"]
# Update parent/child relationships in inventory.
for obj in inventory_objects:
self.__update_custom_object_metadata(obj["objectId"], "simbotPickedUp", 1)
if "simbotLastParentReceptacle" in self.__custom_object_metadata[obj["objectId"]]:
parent_receptacle = self.__custom_object_metadata[obj["objectId"]][
"simbotLastParentReceptacle"
]
self.__delete_from_custom_object_metadata_list(
parent_receptacle, "simbotIsReceptacleOf", obj["objectId"]
)
self.__update_custom_object_metadata(
obj["objectId"], "simbotLastParentReceptacle", None
)
elif action_name == "Place":
if event is not None and "objectId" in action and event.metadata["lastActionSuccess"]:
msg = "Placed in %s" % action["objectId"]
for obj in inventory_objects_before_action:
self.__update_custom_object_metadata(
obj["objectId"], "simbotLastParentReceptacle", action["objectId"]
)
self.__append_to_custom_object_metadata_list(
action["objectId"], "simbotIsReceptacleOf", obj["objectId"]
)
elif msg is None:
msg = "Could not find a target object at the specified location"
super().add_interaction(interaction) # log attempt, regardless of success.
# If the event succeeded, do manual simulation updates based on fixed state change rules.
if event is not None and event.metadata["lastActionSuccess"]:
if action["action"] == "SliceObject":
self.__transfer_custom_metadata_on_slicing_cracking(self.get_objects(event))
# Update custom properties in case actions changed things up.
self.__check_per_step_custom_properties(objects_before_cur_event)
# If the event was created and succeeded, return with default msg.
if event is not None and event.metadata["lastActionSuccess"]:
interaction.action.success = 1
# if we successfully interacted, we need to set with what oid
assert interacted_oid is not None or on_oid is not None
interaction.action.oid = interacted_oid
return True, "%s @ (%.2f, %.2f)" % (action_name, x, y) if msg is None else msg, None
else:
interaction.action.success = 0
if event is None: # If the event call never even got made, use custom message.
return False, msg, self.__thor_error_to_help_message(msg)
else: # If the event call failed, use error from AI2THOR and try to generate human-readable system msg
if event.metadata["errorMessage"]:
return (
False,
event.metadata["errorMessage"],
self.__thor_error_to_help_message(event.metadata["errorMessage"]),
)
elif msg is not None:
return False, msg, self.__thor_error_to_help_message(msg)
else:
logger.warning(
"action was taken that failed but produced no custom or system error message: %s", action
)
return False, "", None
elif action_type == "ChangeCamera":
if not self.commander_embodied and interaction.agent_id == 0:
interaction.action.success = 0
return False, "Floating camera cannot perform a ChangeCamera action"
if action_name == "BehindAboveOn":
interaction.action.success = 0
raise NotImplementedError("CameraChange functions are being phased out")
elif action_name == "BehindAboveOff":
interaction.action.success = 0
raise NotImplementedError("CameraChange functions are being phased out")
return # noqa R502
elif action_type == "ProgressCheck":
# ProgressCheck actions are handled via calls made directly from simulator_base.
super().add_interaction(interaction)
return # noqa R502
elif action_type == "Keyboard":
if interaction.agent_id == 0: # Commander
self.logger.debug("*** Commander - Keyboard: %s ***" % interaction.action.utterance)
else:
self.logger.debug("*** Driver - Keyboard: %s ***" % interaction.action.utterance)
super().add_interaction(interaction)
interaction.action.success = 1
return # noqa R502
elif action_type == "Audio":
if interaction.agent_id == 0: # Commander
self.logger.info("*** Commander - Audio: %s ***" % interaction.action.utterance)
else:
self.logger.info("*** Driver - Audio: %s ***" % interaction.action.utterance)
super().add_interaction(interaction)
interaction.action.success = 1
return # noqa R502
else:
logger.warning("%s: Not supported" % interaction.action.action_type)
interaction.action.success = 0
return # noqa R502
def __update_custom_coffee_prop(self, event, objs_before_event=None):
"""
Check whether coffee has been made and update custom property - this uses get_parent_receptacles() for extra
reliability and checks that a container just got placed in a coffee maker and the coffee maker was on
"""
cur_objects = self.get_objects(event)
coffee_maker_ids = set(
[obj["objectId"] for obj in cur_objects if "CoffeeMachine" in obj["objectType"] and obj["isToggled"]]
)
for obj in cur_objects:
prev_filled_with_liquid = False
if objs_before_event is not None:
prev_state = self.__get_object_by_id(objs_before_event, obj["objectId"])
if prev_state:
prev_filled_with_liquid = prev_state["isFilledWithLiquid"]
parent_receptacles = self.get_parent_receptacles(obj, cur_objects)
placed_in_toggled_coffee_maker = False
if parent_receptacles is not None and len(set(parent_receptacles).intersection(coffee_maker_ids)) > 0:
placed_in_toggled_coffee_maker = True
if (
placed_in_toggled_coffee_maker
and obj["canFillWithLiquid"]
and obj["isFilledWithLiquid"]
and not prev_filled_with_liquid
):
self.__update_custom_object_metadata(obj["objectId"], "simbotIsFilledWithCoffee", True)
def __update_sink_interaction_outcomes(self, event):
"""
Force sink behaviour to be deterministic - if a faucet is turned on, clean all objects in the sink and
fill objects that can be filled with water
"""
cur_objects = self.get_objects(event)
sink_objects = list()
for obj in cur_objects:
# Check if any sink basin is filled with water and clean all dirty objects in.
if (
"SinkBasin" in obj["objectType"]
or "Sink" in obj["objectType"]
or "BathtubBasin" in obj["objectType"]
or "Bathtub" in obj["objectType"]
):
# Fetch the faucet near the sink
faucet_obj = self.__get_object_by_position(self.get_objects(event), obj["position"], obj_type="Faucet")
if faucet_obj["isToggled"]:
sink_objects.append(obj)
sink_obj_ids = set([obj["objectId"] for obj in sink_objects])
objs_in_sink = list()
for obj in cur_objects:
parent_receptacles = self.get_parent_receptacles(obj, cur_objects)
if parent_receptacles is not None:
if len(set(parent_receptacles).intersection(sink_obj_ids)) > 0:
objs_in_sink.append(obj)
for child_obj in objs_in_sink:
if child_obj["isDirty"]:
ac = dict(action="CleanObject", objectId=child_obj["objectId"], forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
if child_obj["canFillWithLiquid"]:
ac = dict(
action="FillObjectWithLiquid", objectId=child_obj["objectId"], fillLiquid="water", forceAction=True
)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
self.__update_custom_object_metadata(child_obj["objectId"], "simbotIsFilledWithWater", 1)
def __thor_error_to_help_message(self, msg):
"""
Translate AI2-THOR errorMessage field into something that can be shown as prompts to annotators for TEACh data
collection
"""
# Example: "Floor|+00.00|+00.00|+00.00 must have the property CanPickup to be picked up." # noqa: E800
if "CanPickup to be" in msg:
return 'Object "%s" can\'t be picked up.' % msg.split()[0].split("|")[0]
# Example: "Object ID appears to be invalid." # noqa: E800
if ("Object ID" in msg and "invalid" in msg) or "Could not retrieve object" in msg:
return "Could not determine what object was clicked."
# Example "Can't place an object if Agent isn't holding anything # noqa: E800
if "if Agent isn't holding" in msg:
return "Must be holding an object first."
# Example: "Slice: ObjectInteraction only supported for held object Knife" # noqa: E800
if "Slice: ObjectInteraction" in msg:
return "Must be holding a knife."
# Example: "object is not toggleable" # noqa: E800
if "not toggleable" in msg:
return "Object cannot be turned on or off."
# Example: "can't toggle object off if it's already off!" # noqa: E800
if "toggle object off if" in msg:
return "Object is already turned off."
# Example: "can't toggle object on if it's already on!" # noqa: E800
if "toggle object on if" in msg:
return "Object is already turned on."
# Example: "CounterTop|-00.08|+01.15|00.00 is not an Openable object" # noqa: E800
if "is not an Openable object" in msg:
return 'Object "%s" can\'t be opened.' % msg.split()[0].split("|")[0]
# Example: "CounterTop_d7cc8dfe Does not have the CanBeSliced property!" # noqa: E800
if "Does not have the CanBeSliced" in msg:
return "Object cannot be sliced."
# Example: "Object failed to open/close successfully." # noqa: E800
if "failed to open/close" in msg:
return "Something is blocking the object from opening or closing. Move farther away or remove obstruction."
# Example: "StandardIslandHeight is blocking Agent 0 from moving 0" # noqa: E800
if "is blocking" in msg:
return "Something is blocking the robot from moving in that direction."
# Example: "a held item: Book_3d15d052 with something if agent rotates Right 90 degrees" # noqa: E800
if "a held item" in msg and "if agent rotates" in msg:
return "The held item will collide with something if the robot turns that direction."
# Example: "No valid positions to place object found" # noqa: E800
if "No valid positions to place" in msg:
return "The receptacle is too full or too small to contain the held item."
# Example: "This target object is NOT a receptacle!" # noqa: E800
if "NOT a receptacle" in msg:
return "Object is not a receptacle the robot can place items in."
# Example: "Target must be OFF to open!" # noqa: E800
if "OFF to open!" in msg:
return "Object must be turned off before it can be opened."
# Example: "cracked_egg_5(Clone) is not a valid Object Type to be placed in StoveBurner_58b674c4" # noqa: E800
if "not a valid Object Type to be placed" in msg:
return "Held object cannot be placed there."
# Example: "No target found" # noqa: E800
if "No target found" in msg:
return "No reachable object at that location."
# Example: "Knife|-01.70|+01.71|+04.01 is not interactable and (perhaps it is occluded by something)." # noqa: E800
if "it is occluded by something" in msg:
return "An object is blocking you from interacting with the selected object."
# "Could not find a target object at the specified location" # noqa: E800
if "Could not find a target object" in msg:
return "No valid object at that location."
# "another object's collision is blocking held object from being placed" # noqa: E800
if "another object's collision is blocking" in msg:
return "The target area is too cluttered or the held object is already colliding with something."
# "CounterTop|+00.69|+00.95|-02.48 is too far away to be interacted with" # noqa: E800
if "too far away to" in msg:
return "That object is too far away to interact with."
# "Your partner is too far away for a handoff." # noqa: E800
if "too far away for" in msg:
return "Your partner is too far away for a handoff."
# "Place: ObjectInteraction only supported when holding an object" # noqa: E800
if "only supported when holding" in msg:
return "You are not holding an object."
# "Picking up object would cause it to collide and clip into something!" # noqa: E800
if "would cause it to collide and" in msg:
return "Cannot grab object from here without colliding with something."
# "You cannot slice something while your partner is holding it." # noqa: E800
if "cannot slice something while" in msg:
return msg
# If msg couldn't be handled, don't create a readable system message
return None
def get_hotspots(
self,
agent_id,
hotspot_pixel_width=None,
action_str=None,
object_id=None,
camera_id=None,
return_full_seg_mask=False,
):
"""
Return a segmentation mask highlighting object(s) in an egocentric image
:param agent_id: the agent whose image needs to be highlighted; 0 for Commander and 1 for Driver/ Follower
:param hotspot_pixel_width: Minimum hotspot size
:param action_str: Highlight objects on which this action can be performed
:param object_id: Specify object to be highlighted using object ID
:param camera_id: Generate segmentation mask for a disembodied camera with this ID instead of for an agent
:param return_full_seg_mask: additional flag to highlight a single object specified by object_id
"""
assert not return_full_seg_mask or object_id is not None
assert (action_str is None or object_id is None) and not (action_str is not None and object_id is not None)
assert agent_id is None or camera_id is None
if hotspot_pixel_width is None:
hotspot_pixel_width = self.hotspot_pixel_width
if agent_id is not None:
sim_agent_id = agent_id if self.commander_embodied else 0
le = (
self.controller.last_event.events[sim_agent_id]
if self.commander_embodied
else self.controller.last_event
)
# Take a no-op step to render the object segmentation frame for hotspots.
if le.instance_segmentation_frame is None:
ac = dict(action="Pass", agentId=sim_agent_id, renderObjectImage=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
if self.commander_embodied:
le = self.controller.last_event.events[sim_agent_id]
instance_segs = np.array(le.instance_segmentation_frame)
elif agent_id == 0: # commander camera
le = self.controller.last_event
instance_segs = np.array(le.third_party_instance_segmentation_frames[0])
else: # driver camera
le = self.controller.last_event
instance_segs = np.array(le.instance_segmentation_frame)
color_to_object_id = le.color_to_object_id
object_id_to_color = le.object_id_to_color
else:
le = self.controller.last_event.events[0] if self.commander_embodied else self.controller.last_event
if le.instance_segmentation_frame is None:
ac = dict(action="Pass", agentId=0, renderObjectImage=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
le = self.controller.last_event.events[0] if self.commander_embodied else self.controller.last_event
instance_segs = np.array(le.third_party_instance_segmentation_frames[camera_id])
color_to_object_id = le.color_to_object_id
object_id_to_color = le.object_id_to_color
if return_full_seg_mask:
mask = np.zeros_like(instance_segs, dtype=np.uint8)
if object_id in object_id_to_color:
color = object_id_to_color[object_id]
mask = cv2.inRange(instance_segs, color, color)
mask = np.array(mask) / 255
mask = mask.astype(int)
return {"mask": mask}
else:
hotspots = list()
for x in range(0, self.web_window_size, hotspot_pixel_width):
for y in range(0, self.web_window_size, hotspot_pixel_width):
instance_color_id = tuple(
instance_segs[y + hotspot_pixel_width // 2, x + hotspot_pixel_width // 2]
) # coordinate system is y x
is_hotspot = False
if instance_color_id in color_to_object_id: # anecdotally, some colors are missing from this map.
oid = color_to_object_id[instance_color_id]
obj = le.get_object(oid)
if action_str is not None: # search by action str
affordance_lists = self.action_to_affordances[action_str]
if obj is not None and oid in le.instance_detections2D and obj["visible"]:
if np.any(
[
np.all([obj[prop] == affordances[prop] for prop in affordances])
for affordances in affordance_lists
]
):
is_hotspot = True
elif (
self.commander_embodied
and action_str == "Place"
and oid[: len("agent_")] == "agent_"
and self.__agent_dist_to_agent(agent_id, (agent_id + 1) % 2) <= self.visibility_distance
):
is_hotspot = True # handoff to partner agent
elif object_id is not None: # search by objectId
if obj is not None and obj["objectId"] == object_id:
is_hotspot = True
if is_hotspot:
hotspots.append([float(x) / self.web_window_size, float(y) / self.web_window_size])
return {"hotspot_width": float(hotspot_pixel_width) / self.web_window_size, "hotspots": hotspots}
def reset(self):
"""
Reset the simulator to the initial state of self.current_episode
"""
self.__launch_simulator(world=self.world, world_type=self.world_type)
super().reset()
def info(self, include_scenes=False, include_objects=False):
"""
Obtain information about the current task and episode
"""
d = super().info(include_scenes=include_scenes, include_objects=include_objects)
d.update({"world_type": self.world_type, "world": self.world, "agent_poses": self.__get_agent_poses()})
return d
def select_random_world(self, world_type=None):
"""
Select a random AI2-THOR floor plan, constrained to the specified world_type if provided
:param world_type: One of "Kitchen", "Bedroom", "Bathroom" and "Living room" or None; if None all rooms will
be considered
"""
if world_type is None:
world_type = random.choice(["Kitchen", "Living room", "Bedroom", "Bathroom"])
world_type, scene_names = self.__get_available_scene_names(world_type=world_type)
return world_type, random.choice(scene_names)
def get_latest_images(self):
"""
Return current images
:return: {
"ego": Egocentric frame of driver/ follower,
"allo": Egocentric frame fo commander,
"targetobject": Target object view seen by commander
"semantic": Mask used to highlight an object in commander's target object view
}
"""
if self.controller is None:
return {}
# Allows animations by getting newest frame (water, fire, etc.)
ac = dict(action="Pass", agentId=0)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
if self.commander_embodied:
ac = dict(action="Pass", agentId=1)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
if self.commander_embodied:
return {
"ego": self.controller.last_event.events[1].frame,
"allo": self.controller.last_event.events[0].frame,
"targetobject": self.controller.last_event.events[0].third_party_camera_frames[
self.object_target_camera_idx
],
"semantic": self.controller.last_event.events[1].instance_segmentation_frame,
}
else:
return {
"ego": self.controller.last_event.frame,
"allo": self.controller.last_event.third_party_camera_frames[0],
"targetobject": self.controller.last_event.third_party_camera_frames[self.object_target_camera_idx],
"semantic": self.controller.last_event.instance_segmentation_frame,
}
def go_to_pose(self, pose):
"""
Teleport the agent to a desired pose
:param pose: Desired target pose; instance of class Pose defined in dataset.py
"""
if pose is None:
return
ac = dict(
action="TeleportFull",
agentId=1 if self.commander_embodied else 0,
x=pose.x,
y=pose.y,
z=pose.z,
rotation=dict(x=0.0, y=pose.y_rot, z=0.0),
horizon=pose.x_rot,
)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
def get_current_pose(self, agent_id=None):
"""
Return agent's current pose in the form of a Pose object
:param agent_id: 0 for Commander and 1 for Driver/ Follower
"""
event = self.controller.last_event.events[agent_id] if self.commander_embodied else self.controller.last_event
position = event.metadata["agent"]["position"]
rotation = event.metadata["agent"]["rotation"]
horizon = event.metadata["agent"]["cameraHorizon"]
return Pose.from_array([position["z"], -position["x"], position["y"], 0, horizon, -rotation["y"]])
def get_available_scenes(self):
"""
Load list of AI2-THOR floor plans
"""
with importlib.resources.open_text(config_directory, "metadata_ai2thor.json") as f:
data = json.load(f)
return data
def get_available_objects(self):
"""
Load list of AI2-THOR objects
"""
data = None
with importlib.resources.open_text(config_directory, "metadata_google_scanned_objects.json") as f:
data = json.load(f)
return data
def __get_agent_click_normalized_position(self, agent_id=None, agent_metadata=None):
"""
Convert agent position to a visual coordinate on topdown map for TEACh data collection
:param agent_id: 0 for Commander and 1 for Driver/ Follower
:param agent_metadata: Pass agent metadata from a specific simulator event if desired
"""
if agent_id is None:
e = self.controller.last_event
else:
e = self.controller.last_event.events[agent_id]
if agent_metadata is None:
agent_metadata = e.metadata["agent"]
ax = agent_metadata["position"]["x"]
az = agent_metadata["position"]["z"]
return self.__get_click_normalized_position_from_xz(ax, az)
def __get_click_normalized_position_from_xz(self, x, z):
"""
Convert AI2-THOR x, z coordinate to a visual coordinate on topdown map for
TEACh data collection
:param x: x coordinate on AI2-THOR floor plan
:param z: z coordinate on AI2-THOR floor plan
"""
norm_x, norm_z = (np.array((x, z)) - self.topdown_lower_left_xz) / (2 * self.topdown_cam_orth_size)
click_x, click_y = norm_x, (1 - norm_z) # z is flipped from top-to-bottom y of UI, so 1 - y = z
return click_x, click_y
def __get_agent_click_rotation(self, agent_id=None, agent_metadata=None):
"""
Convert agent rotation to a visual view cone on topdown map for TEACh data collection
:param agent_id: 0 for Commander and 1 for Driver/ Follower
:param agent_metadata: Pass agent metadata from a specific simulator event if desired
"""
if agent_metadata is None:
if agent_id is None:
e = self.controller.last_event
else:
e = self.controller.last_event.events[agent_id]
agent_metadata = e.metadata["agent"]
agent_y_rot = agent_metadata["rotation"]["y"]
s_rot = self.__get_xz_rot_from_y(agent_y_rot)
return s_rot[0], -s_rot[1] # y flips z in AI2THOR
def __get_xz_rot_from_y(self, y):
"""
Given degrees y in [0, 359], return the closest cardinal direction as a tuple (x_dir, z_dir) in {-1, 0, 1}^2
:param y: Input angle in [0, 359]
"""
dir_degrees = [270, 180, 90, 0]
closest_degree = dir_degrees[min(range(len(dir_degrees)), key=lambda i: abs(dir_degrees[i] - y))]
if closest_degree == 270: # facing x negative, z neutral
s_rot = (-1, 0)
elif closest_degree == 180: # facing x neutral, z negative
s_rot = (0, -1)
elif closest_degree == 90: # facing x positive, z neutral
s_rot = (1, 0)
else: # facing x neutral, z positive
s_rot = (0, 1)
return s_rot
def __get_y_rot_from_xz(self, x, z):
"""
Given (x, z) norm rotation (e.g., (0, 1)), return the closest degrees in [270, 180, 90, 0] matching.
"""
s_rot_to_dir_degree = {(-1, 0): 270, (0, -1): 180, (1, 0): 90, (0, 1): 0}
return s_rot_to_dir_degree[(x, z)]
def __get_available_scene_names(self, world_type=None):
"""
Return available AI2-THOR floor plans, restricting to world_type if provided
:param world_type: One of "Kitchen", "Bedroom", "Bathroom", "Living room" or None
"""
if world_type is None:
world_type = self.world_type
data = self.get_available_scenes()
scene_names = []
if data is not None:
for group in data["supported_worlds"]:
if group["world_type"] == world_type:
for world in group["worlds"]:
scene_names.append(world["name"])
break # done
return world_type, scene_names
def __get_world_type(self, world):
"""
Given an AI2-THOR floor plan name, return the world type
:param world: input floor plan name
:return: One of "Kitchen", "Bedroom", "Bathroom", "Living room"
"""
world_type = None
try:
number = int(world.split("_")[0][9:]) # Example: floor plan27_physics
room_lo_hi = [("Kitchen", 1, 31), ("Living room", 201, 231), ("Bedroom", 301, 331), ("Bathroom", 401, 431)]
for current_world_type, low, high in room_lo_hi:
if number >= low and number <= high:
world_type = current_world_type
break
except Exception as e:
self.logger.warning(str(e))
return world_type
def __initialize_oracle_view(self):
"""
Set up third party camera for TEACh data collection
"""
pose_robot = self.get_current_pose(agent_id=0)
ac = dict(
action="AddThirdPartyCamera",
rotation=dict(x=30, y=-pose_robot.z_rot, z=0), # Look down at 30 degrees
position=dict(x=-pose_robot.y, y=pose_robot.z + 1, z=pose_robot.x),
fieldOfView=90,
)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
def shutdown_simulator(self):
"""
Stop AI2-THOR Unity process; call when done using simulator
"""
if self.controller is not None:
self.controller.stop()
self.controller = None
def __launch_simulator(self, world=None, world_type=None):
"""
Initialize simulator for a new episode
"""
time_start = time.time()
need_new_map = False
if self.world is None and world is None: # no presets and no args, so choose randomly.
if world_type in ["Kitchen", "Living room", "Bedroom", "Bathroom"]:
self.world_type, self.world = self.__get_available_scene_names(world_type=world_type)
else:
self.world_type, self.world = self.select_random_world()
need_new_map = True
elif world is not None: # set world/type by args.
if self.world != world:
need_new_map = True
self.world = world
self.world_type = self.__get_world_type(world)
if self.controller is not None:
self.controller.stop()
init_params = dict(
base_dir=self.controller_base_dir,
local_executable_path=self.controller_local_executable_path,
scene=self.world,
gridSize=self.grid_size,
snapToGrid=True,
visibilityDistance=self.visibility_distance,
width=self.web_window_size,
height=self.web_window_size,
agentCount=2 if self.commander_embodied else 1,
commit_id=COMMIT_ID,
)
logger.info("In SimulatorTHOR.__launch_simulator, creating ai2thor controller (unity process)")
time_start_controller = time.time()
if debug_print_all_sim_steps:
logger.info("init %s", init_params)
self.controller = TEAChController(**init_params)
time_end_controller = time.time()
self.logger.info("Time to create controller: %s sec" % (time_end_controller - time_start_controller))
# Tilt agents down.
ac = dict(action="LookDown", agentId=0, degrees=30)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
if self.commander_embodied:
ac = dict(action="LookDown", agentId=1, degrees=30)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
# Get topdown map camera details used to turn MapGoal (x, y) clicks into (x, z) sim coords.
if need_new_map:
ac = {"action": "ToggleMapView"}
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
topdown_cam_position = self.controller.last_event.metadata["cameraPosition"]
self.topdown_cam_orth_size = self.controller.last_event.metadata["cameraOrthSize"]
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
self.topdown_lower_left_xz = (
np.array((topdown_cam_position["x"], topdown_cam_position["z"])) - self.topdown_cam_orth_size
)
self.navigation_graph = None # Clear cached nav graph if any
self.is_ready = True
# Initialize 3rd party camera for disembodied commander.
if not self.commander_embodied:
self.__initialize_oracle_view()
self.object_target_camera_idx = 1
else:
self.object_target_camera_idx = 0
# Initialize a 3rd party camera for object targetting (idx 0 if embodied commander, 1 else).
self.controller.step(
"AddThirdPartyCamera", rotation=dict(x=0, y=0, z=90), position=dict(x=-1.0, z=-2.0, y=1.0), fieldOfView=90
)
# Get floor oid.
self.floor_oid = self.__get_nearest_object_matching_search_str("Floor")["objectId"]
time_end = time.time()
self.logger.info("Time to launch simulator: %s sec" % (time_end - time_start))
self.logger.debug("Launched world: %s; commander embodied: %s" % (world, str(self.commander_embodied)))
def randomize_agent_positions(self):
"""
Randomize the positions of the agents in the current scene.
"""
ac = dict(action="GetReachablePositions")
if debug_print_all_sim_steps:
logger.info("step %s", ac)
event = self.controller.step(ac)
all_points = event.metadata["actionReturn"]
target_points = None
d = None
while d is None or d <= self.grid_size * 2:
target_points = list(np.random.choice(all_points, size=2, replace=False))
d = np.linalg.norm([target_points[0][c] - target_points[1][c] for c in ["x", "z"]])
locs = [
{
"position": {
"x": target_points[idx]["x"],
"y": event.metadata["agent"]["position"]["y"],
"z": target_points[idx]["z"],
},
"rotation": {
"x": event.metadata["agent"]["rotation"]["x"],
"y": self.__get_y_rot_from_xz(*[(-1, 0), (0, -1), (1, 0), (0, 1)][np.random.randint(0, 4)]),
"z": event.metadata["agent"]["rotation"]["z"],
},
"cameraHorizon": event.metadata["agent"]["cameraHorizon"],
}
for idx in range(2)
]
return self.set_agent_poses(locs)
def randomize_scene_objects_locations(
self, n_placement_attempts=5, min_duplicates=1, max_duplicates=4, duplicate_overrides=None
):
"""
Randomize the objects in the current scene.
Resets the scene, so object states will not survive this call.
https://ai2thor.allenai.org/ithor/documentation/actions/initialization/#object-position-randomization
:param n_placement_attempts: how many times to try to place each object (larger just takes longer)
:param min_duplicates: minimum number of each object type in the original scene to keep
:param max_duplicates: maximum number of each object type in the original scene to duplicate
:param duplicate_overrides: dict; override numDuplicatesOfType with key value pairs here for keys in override
"""
otypes = set()
for obj in self.get_objects(self.controller.last_event):
otypes.add(obj["objectType"])
duplicates = [
{
"objectType": ot,
"count": np.random.randint(
duplicate_overrides[ot]
if duplicate_overrides is not None and ot in duplicate_overrides
else min_duplicates,
max(max_duplicates, duplicate_overrides[ot] + 1)
if duplicate_overrides is not None and ot in duplicate_overrides
else max_duplicates,
),
}
for ot in otypes
]
ac = dict(
action="InitialRandomSpawn",
randomSeed=np.random.randint(0, 1000),
numPlacementAttempts=n_placement_attempts,
placeStationary=True,
numDuplicatesOfType=duplicates,
)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
event = self.controller.step(ac)
# Make objects unbreakable to prevent shattering plates, etc on Place that uses PutObjectAtPoint.
breakable_ots = list(set([obj["objectType"] for obj in self.get_objects() if obj["breakable"]]))
for ot in breakable_ots:
ac = dict(action="MakeObjectsOfTypeUnbreakable", objectType=ot)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
return event.metadata["lastActionSuccess"], event.metadata["errorMessage"]
def randomize_scene_objects_states(self):
"""
Randomize some object states for objects in the current scene.
"""
otypes_to_states = {}
randomize_attrs = {
"toggleable": ["isToggled", "ToggleObjectOn", "ToggleObjectOff"],
"canFillWithLiquid": ["isFilledWithLiquid", "FillObjectWithLiquid", "EmptyLiquidFromObject"],
"dirtyable": ["isDirty", "DirtyObject", "CleanObject"],
"canBeUsedUp": ["isUsedUp", "UseUpObject", None],
}
for obj in self.get_objects(self.controller.last_event):
ot = obj["objectType"]
if ot not in otypes_to_states:
otypes_to_states[ot] = {attr for attr in randomize_attrs if obj[attr]}
success = True
msgs = []
for obj in self.get_objects(self.controller.last_event):
for attr in otypes_to_states[obj["objectType"]]:
state = np.random.random() < 0.5
if (state and randomize_attrs[attr][1] is not None) or (
not state and randomize_attrs[attr][2] is not None
):
if obj[randomize_attrs[attr][0]] != state:
action = dict(
action=randomize_attrs[attr][1 if state else 2], objectId=obj["objectId"], forceAction=True
)
if action["action"] == "FillObjectWithLiquid":
action["fillLiquid"] = "water"
# if obj['objectType'] == 'Mug':
# continue
if action["action"] == "ToggleObjectOn" and obj["breakable"] and obj["isBroken"]:
continue # e.g., if a laptop is broken, it cannot be turned on
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
if not event.metadata["lastActionSuccess"]:
success = False
msgs.append([action, event.metadata["errorMessage"]])
return success, "\n".join(["%s: %s" % (msgs[idx][0], msgs[idx][1]) for idx in range(len(msgs))])
def get_scene_object_locs_and_states(self):
"""
Return all the object metadata and agent position data from AI2-THOR.
"""
if self.commander_embodied:
a = [
self.controller.last_event.events[0].metadata["agent"],
self.controller.last_event.events[1].metadata["agent"],
]
else:
a = [
self.controller.last_event.metadata["thirdPartyCameras"][0],
self.controller.last_event.metadata["agent"],
]
return {"objects": self.get_objects(self.controller.last_event), "agents": a}
def restore_scene_object_locs_and_states(self, objs):
"""
Restore all the object positions, rotations, and states saved.
:param objs: Object positions and states
"""
# Restore instances and locations.
success = True
msgs = []
object_poses = []
scene_objs = self.get_objects()
for obj in objs:
if obj["pickupable"] or obj["moveable"]:
obj_name = obj["name"][: obj["name"].index("(") if "(" in obj["name"] else len(obj["name"])]
if np.any(
[
obj_name
== s_obj["name"][: s_obj["name"].index("(") if "(" in s_obj["name"] else len(s_obj["name"])]
for s_obj in scene_objs
]
):
object_poses.append(
{"objectName": obj_name, "rotation": dict(obj["rotation"]), "position": dict(obj["position"])}
)
action = dict(
action="SetObjectPoses",
# cut off "(Copy)..." from object name
objectPoses=object_poses,
placeStationary=True,
)
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
if not event.metadata["lastActionSuccess"]:
success = False
msgs.append([action["action"], event.metadata["errorMessage"]])
# Restore object states.
restore_attrs = {
"toggleable": ["isToggled", "ToggleObjectOn", "ToggleObjectOff"],
"canFillWithLiquid": ["isFilledWithLiquid", "FillObjectWithLiquid", "EmptyLiquidFromObject"],
"dirtyable": ["isDirty", "DirtyObject", "CleanObject"],
"openable": ["isOpen", "OpenObject", "CloseObject"],
"canBeUsedUp": ["isUsedUp", "UseUpObject", None],
"sliceable": ["isSliced", "SliceObject", None],
"cookable": ["isCooked", "CookObject", None],
"breakable": ["isBroken", "BreakObject", None],
}
scene_objs = self.get_objects(self.controller.last_event)
for obj in objs:
for attr in restore_attrs:
attr_state, attr_on, attr_off = restore_attrs[attr]
if obj[attr]:
scene_obj = self.__get_object_by_id(scene_objs, obj["objectId"])
if not scene_obj:
scene_obj = self.__get_object_by_position(scene_objs, obj["position"])
if scene_obj["objectType"] != obj["objectType"]:
success = False
msgs.append(["restore states", "could not find scene obj for %s" % obj["objectId"]])
continue
if obj[attr_state] != scene_obj[attr_state]:
action = dict(
action=attr_on if obj[attr_state] else attr_off,
objectId=scene_obj["objectId"],
forceAction=True,
)
if action["action"] is None:
success = False
msgs.append(
[
"restore states",
"unable to take action to remedy object "
+ "%s wants state %s=%s while scene obj has state %s"
% (obj["objectId"], attr_state, str(obj[attr_state]), str(scene_obj[attr_state])),
]
)
continue
if action["action"] == "FillObjectWithLiquid":
action["fillLiquid"] = "water"
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
if not event.metadata["lastActionSuccess"]:
success = False
msgs.append([action, event.metadata["errorMessage"]])
return success, "\n".join(["%s: %s" % (msgs[idx][0], msgs[idx][1]) for idx in range(len(msgs))])
def restore_initial_state(self):
"""
Reset the simulator to initial state of current episode
"""
_, succ = self.load_scene_state(init_state=self.current_episode.initial_state)
return succ
def load_scene_state(self, fn=None, init_state=None):
"""
Reset start time and init state.
:param fn: Filename to load initial state from
:param init_state: Valid initial state to initialize simulator with; must be an instance of class
Initialization in dataset.py
"""
loaded_fn, succ = super().load_scene_state(fn=fn, init_state=init_state)
# Make objects unbreakable to prevent shattering plates, etc on Place that uses PutObjectAtPoint.
breakable_ots = list(set([obj["objectType"] for obj in self.get_objects() if obj["breakable"]]))
for ot in breakable_ots:
ac = dict(action="MakeObjectsOfTypeUnbreakable", objectType=ot)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
return loaded_fn, succ
def set_init_state(self):
"""
Set the initial state of the episode to current state.
"""
self.current_episode.initial_state = self.get_current_state()
super().set_init_state()
def get_current_state(self):
"""
Return current state in the form of an instance of class Initialization defined in dataset.py
"""
self.__add_obj_classes_for_objs() # Add object classes for any newly created objects (eg: slices)
self.__check_per_step_custom_properties() # Confirm whether any updates to custom properties need to be made
# from last time step
state = self.get_scene_object_locs_and_states()
return Initialization(
time_start=self.start_time,
agents=state["agents"],
objects=state["objects"],
custom_object_metadata=self.__custom_object_metadata,
)
def __get_object_by_position(self, m, pos, obj_type=None, ignore_object_ids=None):
"""
Get the object closet to the given position.
:param m: output of get_objects()
:param pos: object x, y, z dict pose
:param obj_type: nearest object of a particular type, or None for any
"""
o = None
d = None
for obj in [_obj for _obj in m if obj_type is None or _obj["objectType"] == obj_type]:
if ignore_object_ids is not None and obj["objectId"] in ignore_object_ids:
continue
obj_pos = obj["position"]
_d = np.linalg.norm([pos["x"] - obj_pos["x"], pos["y"] - obj_pos["y"], pos["z"] - obj_pos["z"]])
if d is None or _d < d:
d = _d
o = obj
return o
def __get_object_by_id(self, m, obj_id):
"""
Get the object matching the id.
:param m: output of get_objects()
:param obj_id: id to match
"""
for obj in m:
if obj["objectId"] == obj_id:
return obj
return False
def set_agent_poses(self, locs):
"""
Set agents to specified poses
:param locs: Desired agent poses
"""
success = True
msgs = []
if self.commander_embodied:
for idx in range(2):
action = dict(
action="Teleport",
agentId=idx,
x=locs[idx]["position"]["x"],
y=locs[idx]["position"]["y"],
z=locs[idx]["position"]["z"],
rotation=dict(
x=locs[idx]["rotation"]["x"], y=locs[idx]["rotation"]["y"], z=locs[idx]["rotation"]["z"]
),
horizon=locs[idx]["cameraHorizon"],
)
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
if not event.metadata["lastActionSuccess"]:
success = False
msgs.append([action["action"], event.metadata["errorMessage"]])
else:
action = dict(
action="UpdateThirdPartyCamera",
thirdPartyCameraId=0,
rotation=dict(x=locs[0]["rotation"]["x"], y=locs[0]["rotation"]["y"], z=locs[0]["rotation"]["z"]),
position=dict(x=locs[0]["position"]["x"], y=locs[0]["position"]["y"], z=locs[0]["position"]["z"]),
)
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
if not event.metadata["lastActionSuccess"]:
success = False
msgs.append([action["action"], event.metadata["errorMessage"]])
action = dict(
action="Teleport",
x=locs[1]["position"]["x"],
y=locs[1]["position"]["y"],
z=locs[1]["position"]["z"],
rotation=dict(x=locs[1]["rotation"]["x"], y=locs[1]["rotation"]["y"], z=locs[1]["rotation"]["z"]),
horizon=locs[1]["cameraHorizon"],
)
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
if not event.metadata["lastActionSuccess"]:
success = False
msgs.append([action["action"], event.metadata["errorMessage"]])
return success, "\n".join(["%s: %s" % (msgs[idx][0], msgs[idx][1]) for idx in range(len(msgs))])
| [
"ai2thor.build.build_name",
"numpy.array",
"networkx.shortest_path",
"copy.deepcopy",
"numpy.linalg.norm",
"numpy.random.random",
"networkx.DiGraph",
"teach.dataset.initialization.Initialization",
"numpy.max",
"numpy.exp",
"platform.system",
"numpy.round",
"numpy.arctan",
"numpy.abs",
"r... | [((977, 1000), 'teach.logger.create_logger', 'create_logger', (['__name__'], {}), '(__name__)\n', (990, 1000), False, 'from teach.logger import create_logger\n'), ((1129, 1165), 'os.makedirs', 'os.makedirs', (['base_dir'], {'exist_ok': '(True)'}), '(base_dir, exist_ok=True)\n', (1140, 1165), False, 'import os\n'), ((1500, 1527), 'ai2thor.build.build_name', 'build_name', (['arch', 'commit_id'], {}), '(arch, commit_id)\n', (1510, 1527), False, 'from ai2thor.build import arch_platform_map, build_name\n'), ((1543, 1590), 'os.path.join', 'os.path.join', (['base_dir', 'release_dir', 'name', 'name'], {}), '(base_dir, release_dir, name, name)\n', (1555, 1590), False, 'import os\n'), ((1655, 1676), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1674, 1676), False, 'import tempfile\n'), ((1696, 1728), 'os.path.join', 'os.path.join', (['tempdir', '"""ai2thor"""'], {}), "(tempdir, 'ai2thor')\n", (1708, 1728), False, 'import os\n'), ((1737, 1773), 'os.makedirs', 'os.makedirs', (['base_dir'], {'exist_ok': '(True)'}), '(base_dir, exist_ok=True)\n', (1748, 1773), False, 'import os\n'), ((3692, 3703), 'time.time', 'time.time', ([], {}), '()\n', (3701, 3703), False, 'import time\n'), ((3979, 3990), 'time.time', 'time.time', ([], {}), '()\n', (3988, 3990), False, 'import time\n'), ((4165, 4179), 'teach.settings.get_settings', 'get_settings', ([], {}), '()\n', (4177, 4179), False, 'from teach.settings import get_settings\n'), ((6961, 6972), 'time.time', 'time.time', ([], {}), '()\n', (6970, 6972), False, 'import time\n'), ((7630, 7649), 'copy.deepcopy', 'copy.deepcopy', (['task'], {}), '(task)\n', (7643, 7649), False, 'import copy\n'), ((19914, 20051), 'teach.dataset.initialization.Initialization', 'Initialization', ([], {'time_start': '(0)', 'agents': "state['agents']", 'objects': "state['objects']", 'custom_object_metadata': 'self.__custom_object_metadata'}), "(time_start=0, agents=state['agents'], objects=state[\n 'objects'], custom_object_metadata=self.__custom_object_metadata)\n", (19928, 20051), False, 'from teach.dataset.initialization import Initialization\n'), ((28959, 29035), 'numpy.linalg.norm', 'np.linalg.norm', (["[(a_agent_pos[c] - b_agent_pos[c]) for c in ['x', 'y', 'z']]"], {}), "([(a_agent_pos[c] - b_agent_pos[c]) for c in ['x', 'y', 'z']])\n", (28973, 29035), True, 'import numpy as np\n'), ((33398, 33416), 'numpy.max', 'np.max', (['str_ratios'], {}), '(str_ratios)\n', (33404, 33416), True, 'import numpy as np\n'), ((33622, 33661), 'numpy.random.choice', 'np.random.choice', (['closest_match_objects'], {}), '(closest_match_objects)\n', (33638, 33661), True, 'import numpy as np\n'), ((47503, 47515), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (47513, 47515), True, 'import networkx as nx\n'), ((55352, 55392), 'numpy.array', 'np.array', (['le.instance_segmentation_frame'], {}), '(le.instance_segmentation_frame)\n', (55360, 55392), True, 'import numpy as np\n'), ((114730, 114826), 'teach.dataset.pose.Pose.from_array', 'Pose.from_array', (["[position['z'], -position['x'], position['y'], 0, horizon, -rotation['y']]"], {}), "([position['z'], -position['x'], position['y'], 0, horizon, \n -rotation['y']])\n", (114745, 114826), False, 'from teach.dataset.pose import Pose\n'), ((120918, 120929), 'time.time', 'time.time', ([], {}), '()\n', (120927, 120929), False, 'import time\n'), ((122280, 122291), 'time.time', 'time.time', ([], {}), '()\n', (122289, 122291), False, 'import time\n'), ((122465, 122476), 'time.time', 'time.time', ([], {}), '()\n', (122474, 122476), False, 'import time\n'), ((124532, 124543), 'time.time', 'time.time', ([], {}), '()\n', (124541, 124543), False, 'import time\n'), ((137624, 137775), 'teach.dataset.initialization.Initialization', 'Initialization', ([], {'time_start': 'self.start_time', 'agents': "state['agents']", 'objects': "state['objects']", 'custom_object_metadata': 'self.__custom_object_metadata'}), "(time_start=self.start_time, agents=state['agents'], objects=\n state['objects'], custom_object_metadata=self.__custom_object_metadata)\n", (137638, 137775), False, 'from teach.dataset.initialization import Initialization\n'), ((1466, 1483), 'platform.system', 'platform.system', ([], {}), '()\n', (1481, 1483), False, 'import platform\n'), ((9965, 9987), 'json.load', 'json.load', (['file_handle'], {}), '(file_handle)\n', (9974, 9987), False, 'import json\n'), ((28227, 28435), 'numpy.linalg.norm', 'np.linalg.norm', (["[obj['position']['x'] - e.metadata['agent']['position']['x'], obj[\n 'position']['y'] - e.metadata['agent']['position']['y'], obj['position'\n ]['z'] - e.metadata['agent']['position']['z']]"], {}), "([obj['position']['x'] - e.metadata['agent']['position']['x'],\n obj['position']['y'] - e.metadata['agent']['position']['y'], obj[\n 'position']['z'] - e.metadata['agent']['position']['z']])\n", (28241, 28435), True, 'import numpy as np\n'), ((31519, 31555), 'fuzzywuzzy.fuzz.ratio', 'fuzz.ratio', (["obj['objectType']", 'query'], {}), "(obj['objectType'], query)\n", (31529, 31555), False, 'from fuzzywuzzy import fuzz\n'), ((33311, 33347), 'fuzzywuzzy.fuzz.ratio', 'fuzz.ratio', (["obj['objectType']", 'query'], {}), "(obj['objectType'], query)\n", (33321, 33347), False, 'from fuzzywuzzy import fuzz\n'), ((46311, 46341), 'numpy.abs', 'np.abs', (['(thor_x - thor_facing_x)'], {}), '(thor_x - thor_facing_x)\n', (46317, 46341), True, 'import numpy as np\n'), ((46344, 46374), 'numpy.abs', 'np.abs', (['(thor_z - thor_facing_z)'], {}), '(thor_z - thor_facing_z)\n', (46350, 46374), True, 'import numpy as np\n'), ((107885, 107949), 'numpy.array', 'np.array', (['le.third_party_instance_segmentation_frames[camera_id]'], {}), '(le.third_party_instance_segmentation_frames[camera_id])\n', (107893, 107949), True, 'import numpy as np\n'), ((108113, 108157), 'numpy.zeros_like', 'np.zeros_like', (['instance_segs'], {'dtype': 'np.uint8'}), '(instance_segs, dtype=np.uint8)\n', (108126, 108157), True, 'import numpy as np\n'), ((111627, 111691), 'random.choice', 'random.choice', (["['Kitchen', 'Living room', 'Bedroom', 'Bathroom']"], {}), "(['Kitchen', 'Living room', 'Bedroom', 'Bathroom'])\n", (111640, 111691), False, 'import random\n'), ((111809, 111835), 'random.choice', 'random.choice', (['scene_names'], {}), '(scene_names)\n', (111822, 111835), False, 'import random\n'), ((115036, 115048), 'json.load', 'json.load', (['f'], {}), '(f)\n', (115045, 115048), False, 'import json\n'), ((115315, 115327), 'json.load', 'json.load', (['f'], {}), '(f)\n', (115324, 115327), False, 'import json\n'), ((125297, 125382), 'numpy.linalg.norm', 'np.linalg.norm', (["[(target_points[0][c] - target_points[1][c]) for c in ['x', 'z']]"], {}), "([(target_points[0][c] - target_points[1][c]) for c in ['x',\n 'z']])\n", (125311, 125382), True, 'import numpy as np\n'), ((138456, 138551), 'numpy.linalg.norm', 'np.linalg.norm', (["[pos['x'] - obj_pos['x'], pos['y'] - obj_pos['y'], pos['z'] - obj_pos['z']]"], {}), "([pos['x'] - obj_pos['x'], pos['y'] - obj_pos['y'], pos['z'] -\n obj_pos['z']])\n", (138470, 138551), True, 'import numpy as np\n'), ((15614, 15671), 'copy.deepcopy', 'copy.deepcopy', (['self.__custom_object_metadata[orig_obj_id]'], {}), '(self.__custom_object_metadata[orig_obj_id])\n', (15627, 15671), False, 'import copy\n'), ((24419, 24494), 'numpy.power', 'np.power', (["(xzy_obj_face['x'] - self.navigation_points[nav_point_idx]['x'])", '(2)'], {}), "(xzy_obj_face['x'] - self.navigation_points[nav_point_idx]['x'], 2)\n", (24427, 24494), True, 'import numpy as np\n'), ((24509, 24584), 'numpy.power', 'np.power', (["(xzy_obj_face['z'] - self.navigation_points[nav_point_idx]['z'])", '(2)'], {}), "(xzy_obj_face['z'] - self.navigation_points[nav_point_idx]['z'], 2)\n", (24517, 24584), True, 'import numpy as np\n'), ((24703, 24725), 'numpy.isclose', 'np.isclose', (['xz_dist', '(0)'], {}), '(xz_dist, 0)\n', (24713, 24725), True, 'import numpy as np\n'), ((33484, 33522), 'numpy.isclose', 'np.isclose', (['max_ratio', 'str_ratios[idx]'], {}), '(max_ratio, str_ratios[idx])\n', (33494, 33522), True, 'import numpy as np\n'), ((44663, 44712), 'numpy.abs', 'np.abs', (["(self.navigation_points[idx]['x'] - thor_x)"], {}), "(self.navigation_points[idx]['x'] - thor_x)\n", (44669, 44712), True, 'import numpy as np\n'), ((44715, 44764), 'numpy.abs', 'np.abs', (["(self.navigation_points[idx]['z'] - thor_z)"], {}), "(self.navigation_points[idx]['z'] - thor_z)\n", (44721, 44764), True, 'import numpy as np\n'), ((45038, 45053), 'numpy.exp', 'np.exp', (['(1.0 / d)'], {}), '(1.0 / d)\n', (45044, 45053), True, 'import numpy as np\n'), ((48131, 48171), 'numpy.isclose', 'np.isclose', (["(p[idx]['z'] - p[jdx]['z'])", '(0)'], {}), "(p[idx]['z'] - p[jdx]['z'], 0)\n", (48141, 48171), True, 'import numpy as np\n'), ((50419, 50447), 'numpy.any', 'np.any', (['parent_microwaves_on'], {}), '(parent_microwaves_on)\n', (50425, 50447), True, 'import numpy as np\n'), ((51381, 51407), 'numpy.any', 'np.any', (['parent_burners_hot'], {}), '(parent_burners_hot)\n', (51387, 51407), True, 'import numpy as np\n'), ((55475, 55509), 'numpy.round', 'np.round', (['(x * self.web_window_size)'], {}), '(x * self.web_window_size)\n', (55483, 55509), True, 'import numpy as np\n'), ((55516, 55550), 'numpy.round', 'np.round', (['(y * self.web_window_size)'], {}), '(y * self.web_window_size)\n', (55524, 55550), True, 'import numpy as np\n'), ((106852, 106892), 'numpy.array', 'np.array', (['le.instance_segmentation_frame'], {}), '(le.instance_segmentation_frame)\n', (106860, 106892), True, 'import numpy as np\n'), ((108283, 108323), 'cv2.inRange', 'cv2.inRange', (['instance_segs', 'color', 'color'], {}), '(instance_segs, color, color)\n', (108294, 108323), False, 'import cv2\n'), ((116442, 116458), 'numpy.array', 'np.array', (['(x, z)'], {}), '((x, z))\n', (116450, 116458), True, 'import numpy as np\n'), ((123666, 123730), 'numpy.array', 'np.array', (["(topdown_cam_position['x'], topdown_cam_position['z'])"], {}), "((topdown_cam_position['x'], topdown_cam_position['z']))\n", (123674, 123730), True, 'import numpy as np\n'), ((125228, 125279), 'numpy.random.choice', 'np.random.choice', (['all_points'], {'size': '(2)', 'replace': '(False)'}), '(all_points, size=2, replace=False)\n', (125244, 125279), True, 'import numpy as np\n'), ((127687, 127713), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (127704, 127713), True, 'import numpy as np\n'), ((20649, 20660), 'time.time', 'time.time', ([], {}), '()\n', (20658, 20660), False, 'import time\n'), ((21399, 21410), 'time.time', 'time.time', ([], {}), '()\n', (21408, 21410), False, 'import time\n'), ((24652, 24679), 'numpy.arctan', 'np.arctan', (['(y_diff / xz_dist)'], {}), '(y_diff / xz_dist)\n', (24661, 24679), True, 'import numpy as np\n'), ((48196, 48249), 'numpy.isclose', 'np.isclose', (["(p[idx]['x'] - p[jdx]['x'])", 'self.grid_size'], {}), "(p[idx]['x'] - p[jdx]['x'], self.grid_size)\n", (48206, 48249), True, 'import numpy as np\n'), ((48478, 48518), 'numpy.isclose', 'np.isclose', (["(p[idx]['x'] - p[jdx]['x'])", '(0)'], {}), "(p[idx]['x'] - p[jdx]['x'], 0)\n", (48488, 48518), True, 'import numpy as np\n'), ((107025, 107081), 'numpy.array', 'np.array', (['le.third_party_instance_segmentation_frames[0]'], {}), '(le.third_party_instance_segmentation_frames[0])\n', (107033, 107081), True, 'import numpy as np\n'), ((107197, 107237), 'numpy.array', 'np.array', (['le.instance_segmentation_frame'], {}), '(le.instance_segmentation_frame)\n', (107205, 107237), True, 'import numpy as np\n'), ((108347, 108361), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (108355, 108361), True, 'import numpy as np\n'), ((129448, 129466), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (129464, 129466), True, 'import numpy as np\n'), ((48339, 48393), 'numpy.isclose', 'np.isclose', (["(p[idx]['x'] - p[jdx]['x'])", '(-self.grid_size)'], {}), "(p[idx]['x'] - p[jdx]['x'], -self.grid_size)\n", (48349, 48393), True, 'import numpy as np\n'), ((48543, 48596), 'numpy.isclose', 'np.isclose', (["(p[idx]['z'] - p[jdx]['z'])", 'self.grid_size'], {}), "(p[idx]['z'] - p[jdx]['z'], self.grid_size)\n", (48553, 48596), True, 'import numpy as np\n'), ((56138, 56232), 'numpy.all', 'np.all', (['[(k in obj and obj[k] == affordance_properties[k]) for k in\n affordance_properties]'], {}), '([(k in obj and obj[k] == affordance_properties[k]) for k in\n affordance_properties])\n', (56144, 56232), True, 'import numpy as np\n'), ((71826, 71932), 'networkx.shortest_path', 'nx.shortest_path', (['self.navigation_graph', '(s_point, s_rot[0], s_rot[1])', '(t_point, t_rot[0], t_rot[1])'], {}), '(self.navigation_graph, (s_point, s_rot[0], s_rot[1]), (\n t_point, t_rot[0], t_rot[1]))\n', (71842, 71932), True, 'import networkx as nx\n'), ((48686, 48740), 'numpy.isclose', 'np.isclose', (["(p[idx]['z'] - p[jdx]['z'])", '(-self.grid_size)'], {}), "(p[idx]['z'] - p[jdx]['z'], -self.grid_size)\n", (48696, 48740), True, 'import numpy as np\n'), ((70080, 70098), 'numpy.array', 'np.array', (['(sx, sz)'], {}), '((sx, sz))\n', (70088, 70098), True, 'import numpy as np\n'), ((70198, 70264), 'numpy.array', 'np.array', (['(interaction.action.end_x, 1 - interaction.action.end_y)'], {}), '((interaction.action.end_x, 1 - interaction.action.end_y))\n', (70206, 70264), True, 'import numpy as np\n'), ((70685, 70755), 'numpy.abs', 'np.abs', (["(self.navigation_points[idx]['x'] - agent_data['position']['x'])"], {}), "(self.navigation_points[idx]['x'] - agent_data['position']['x'])\n", (70691, 70755), True, 'import numpy as np\n'), ((70758, 70828), 'numpy.abs', 'np.abs', (["(self.navigation_points[idx]['z'] - agent_data['position']['z'])"], {}), "(self.navigation_points[idx]['z'] - agent_data['position']['z'])\n", (70764, 70828), True, 'import numpy as np\n'), ((85658, 85759), 'numpy.linalg.norm', 'np.linalg.norm', (["[(clicked_xyz[c] - le.metadata['agent']['position'][c]) for c in ['x', 'y',\n 'z']]"], {}), "([(clicked_xyz[c] - le.metadata['agent']['position'][c]) for\n c in ['x', 'y', 'z']])\n", (85672, 85759), True, 'import numpy as np\n'), ((125807, 125830), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (125824, 125830), True, 'import numpy as np\n'), ((58061, 58155), 'numpy.all', 'np.all', (['[(k in obj and obj[k] == affordance_properties[k]) for k in\n affordance_properties]'], {}), '([(k in obj and obj[k] == affordance_properties[k]) for k in\n affordance_properties])\n', (58067, 58155), True, 'import numpy as np\n'), ((109493, 109559), 'numpy.all', 'np.all', (['[(obj[prop] == affordances[prop]) for prop in affordances]'], {}), '([(obj[prop] == affordances[prop]) for prop in affordances])\n', (109499, 109559), True, 'import numpy as np\n'), ((58808, 58858), 'numpy.all', 'np.all', (['(instance_segs == instance_color_id)'], {'axis': '(2)'}), '(instance_segs == instance_color_id, axis=2)\n', (58814, 58858), True, 'import numpy as np\n')] |
import numpy as np
from torch.utils.data import Sampler
from data_reader.dataset_v1 import SpoofDatsetSystemID
class CustomSampler(Sampler):
def __init__(self, data_source, shuffle):
self.df = data_source
self.shuffle = shuffle
def getIndices(self):
labels = [0, 1]
digit_indices = []
digit_indices.append(np.where(self.df.labels == 0)[0])
digit_indices.append(np.where(self.df.labels != 0)[0])
num_genu = len(digit_indices[0])
num_spoofed = len(digit_indices[1])
'''
print('genu: %d, spoofed: %d' %(num_genu, num_spoofed))
print(digit_indices[0].shape)
'''
if self.shuffle:
for i in range(len(digit_indices)):
np.random.shuffle(digit_indices[i])
repetition = int(num_spoofed/num_genu)
digit_indices[0] = np.tile(digit_indices[0], repetition)
rest_part = num_spoofed%num_genu
rest = digit_indices[0][:rest_part]
# print(rest.shape)
digit_indices[0] = np.concatenate((digit_indices[0], rest), axis=0)
'''
num_genu = len(digit_indices[0])
num_spoofed = len(digit_indices[1])
print('genu: %d, spoofed: %d' %(num_genu, num_spoofed))
'''
return digit_indices
'''
tensor = np.tile(mat,repetition)
repetition = max_len % mat.shape[1]
rest = mat[:,:repetition]
tensor = np.hstack((tensor,rest))
min_size = np.size(digit_indices[0])
for i in range(1, len(digit_indices)):
size = np.size(digit_indices[i])
min_size = size if size < min_size else min_size
return digit_indices, min_size
'''
def __iter__(self):
digit_indices = self.getIndices()
assert len(digit_indices[0]) == len(digit_indices[1]), 'The amount of genuine and spoofed audios does not match!'
num_samples = len(digit_indices[0])
indices = []
for i in range(num_samples):
indices += [digit_indices[n][i] for n in range(2)]
return iter(indices)
def __len__(self):
digit_indices = self.getIndices()
return len(digit_indices[0])+len(digit_indices[1])
if __name__ == '__main__':
data_scp = 'feats/test_samples.scp'
data_utt2index = 'utt2systemID/test_samples_utt2index8_spectensor1'
data = SpoofDatsetSystemID(data_scp, data_utt2index, binary_class=False, leave_one_out=False)
sampler = CustomSampler(data, shuffle=False)
count = 0
for i in sampler.__iter__():
uttid, x, y = data[i]
print(y)
if count == 30: break
count += 1
| [
"numpy.tile",
"numpy.where",
"data_reader.dataset_v1.SpoofDatsetSystemID",
"numpy.concatenate",
"numpy.random.shuffle"
] | [((2176, 2266), 'data_reader.dataset_v1.SpoofDatsetSystemID', 'SpoofDatsetSystemID', (['data_scp', 'data_utt2index'], {'binary_class': '(False)', 'leave_one_out': '(False)'}), '(data_scp, data_utt2index, binary_class=False,\n leave_one_out=False)\n', (2195, 2266), False, 'from data_reader.dataset_v1 import SpoofDatsetSystemID\n'), ((792, 829), 'numpy.tile', 'np.tile', (['digit_indices[0]', 'repetition'], {}), '(digit_indices[0], repetition)\n', (799, 829), True, 'import numpy as np\n'), ((954, 1002), 'numpy.concatenate', 'np.concatenate', (['(digit_indices[0], rest)'], {'axis': '(0)'}), '((digit_indices[0], rest), axis=0)\n', (968, 1002), True, 'import numpy as np\n'), ((334, 363), 'numpy.where', 'np.where', (['(self.df.labels == 0)'], {}), '(self.df.labels == 0)\n', (342, 363), True, 'import numpy as np\n'), ((393, 422), 'numpy.where', 'np.where', (['(self.df.labels != 0)'], {}), '(self.df.labels != 0)\n', (401, 422), True, 'import numpy as np\n'), ((689, 724), 'numpy.random.shuffle', 'np.random.shuffle', (['digit_indices[i]'], {}), '(digit_indices[i])\n', (706, 724), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding=utf-8
"""
Script to sample uncertain user parameters
"""
from __future__ import division
import random as rd
import numpy as np
from scipy.stats import nakagami
def main():
nb_samples = 100000
import matplotlib.pyplot as plt
# Get samples of set temperatures within building
list_set_temp = calc_set_temp_samples(nb_samples=nb_samples)
print('List of set temperatures in degree Celsius:')
print(list_set_temp)
print()
fig = plt.figure()
# the histogram of the data
plt.hist(list_set_temp, bins='auto')
plt.xlabel('Set temperatures in degree Celsius')
plt.ylabel('Number of temperatures')
plt.show()
plt.close()
# Create constant user air exchange rates
list_usr_airx = calc_user_air_ex_rates(nb_samples)
print('List of user air exchange rates in 1/h:')
print(list_usr_airx)
print()
fig = plt.figure()
# the histogram of the data
plt.hist(list_usr_airx, bins='auto')
plt.xlabel('User air exchange rates in 1/h')
plt.ylabel('Number of values')
plt.show()
plt.close()
method = 'destatis'
# method = 'equal'
# Sample number of occupants in apartments:
list_occ_in_app = calc_sampling_occ_per_app(nb_samples=nb_samples,
method=method)
fig = plt.figure()
# the histogram of the data
plt.hist(list_occ_in_app, 5)
plt.xlabel('Number of occupants per apartment')
plt.ylabel('Number of values')
plt.show()
plt.close()
# Annual electric demand sampling per apartment (3 persons, SFH)
list_el_dem = calc_sampling_el_demand_per_apartment(nb_samples=nb_samples,
nb_persons=3,
type='sfh')
fig = plt.figure()
# the histogram of the data
plt.hist(list_el_dem, bins='auto')
plt.xlabel('Number of electric energy demands in kWh')
plt.ylabel('Number of values')
plt.title('Electric energy demand for\napartment with '
'3 occupants')
plt.show()
plt.close()
list_el_dem_2 = []
for nb_occ in list_occ_in_app:
sample_el = \
calc_sampling_el_demand_per_apartment(nb_samples=1,
nb_persons=nb_occ,
type='sfh')[0]
list_el_dem_2.append(sample_el)
fig = plt.figure()
# the histogram of the data
plt.hist(list_el_dem_2, bins='auto')
plt.xlabel('Number of electric energy demands in kWh')
plt.ylabel('Number of values')
plt.title('Electric energy demand for\napartment with '
'different number of occupants')
plt.show()
plt.close()
# list_dhw = calc_sampling_dhw_per_person(nb_samples=nb_samples)
#
# fig = plt.figure()
# # the histogram of the data
# plt.hist(list_dhw, bins='auto')
# plt.xlabel('Hot water volumes per person and day in liters')
# plt.ylabel('Number of values')
# plt.show()
# plt.close()
nb_persons = 5
list_dhw_vol_per_app = \
calc_sampling_dhw_per_apartment(nb_samples=nb_samples,
nb_persons=nb_persons)
fig = plt.figure()
# the histogram of the data
plt.hist(list_dhw_vol_per_app, bins='auto')
plt.xlabel('Hot water volumes per apartment and day in liters')
plt.ylabel('Number of values')
plt.title('Hot water volumes per person and day for ' + str(nb_persons)
+ ' person apartment')
plt.show()
plt.close()
list_dhw_per_app_2 = []
for nb_occ in list_occ_in_app:
sample_dhw = calc_sampling_dhw_per_apartment(nb_samples=1,
nb_persons=nb_occ)[0]
list_dhw_per_app_2.append(sample_dhw)
fig = plt.figure()
# the histogram of the data
plt.hist(list_dhw_per_app_2, bins='auto')
plt.xlabel('Hot water volumes per apartment and day in liters')
plt.ylabel('Number of values')
plt.title('Hot water volumes per person and day for\napartment with '
'different number of occupants')
plt.show()
plt.close()
# # Create environment
# # ####################################################################
#
# # Create extended environment of pycity_calc
# year = 2010
# timestep = 3600 # Timestep in seconds
# location = (51.529086, 6.944689) # (latitude, longitute) of Bottrop
# altitude = 55 # Altitude of Bottrop
#
# # Generate timer object
# timer = time.TimerExtended(timestep=timestep, year=year)
#
# # Generate weather object
# weather = Weather.Weather(timer, useTRY=True, location=location,
# altitude=altitude)
#
# # Generate market object
# market = mark.Market()
#
# # Generate co2 emissions object
# co2em = co2.Emissions(year=year)
#
# # Generate environment
# environment = env.EnvironmentExtended(timer, weather, prices=market,
# location=location, co2em=co2em)
#
# # # Create occupancy profile
# # #####################################################################
#
# num_occ = 3
#
# print('Calculate occupancy.\n')
# # Generate occupancy profile
# occupancy_obj = occ.Occupancy(environment, number_occupants=num_occ)
#
# print('Finished occupancy calculation.\n')
# # Generate user air exchange rate profiles
# # #####################################################################
# list_air_ex_profiles = \
# calc_user_air_ex_profiles_factors(nb_samples=
# nb_samples,
# occ_profile=occupancy_obj.occupancy,
# temp_profile=
# environment.weather.tAmbient,
# random_gauss=True)
#
# list_av_air_ex_rates = []
#
# for profile in list_air_ex_profiles:
# plt.plot(profile, alpha=0.5)
#
# av_rate = np.mean(profile)
#
# print('Average air exchange rate in 1/h:')
# print(av_rate)
#
# list_av_air_ex_rates.append(av_rate)
#
# plt.xlabel('Time in hours')
# plt.ylabel('User air exchange rate in 1/h')
# plt.show()
# plt.close()
#
# fig2 = plt.figure()
# # the histogram of the data
# plt.hist(list_av_air_ex_rates, 50)
# plt.xlabel('Average user air exchange rate in 1/h')
# plt.ylabel('Number of air exchange rates')
# plt.show()
# plt.close()
def calc_set_temp_samples(nb_samples, mean=20, sdev=2.5):
"""
Calculate array of indoor set temperature values from gaussian
distribution.
Parameters
----------
nb_samples : int
Number of samples
mean : float, optional
Mean temperature value in degree Celsius for gaussian distribution
(default: 20)
sdev : float, optional
Standard deviation in degree Celsius for gaussian distribution
(default: 2.5)
Returns
-------
array_set_temp : np.array (of floats)
Numpy array of indoor set temperatures in degree Celsius
"""
array_set_temp = np.random.normal(loc=mean, scale=sdev, size=nb_samples)
return array_set_temp
def calc_user_air_ex_rates(nb_samples, min_value=0, max_value=1.2,
pdf='nakagami'):
"""
Calculate array of user air exchange rate samples
Parameters
----------
nb_samples : int
Number of samples
min_value : float, optional
Minimum user air exchange rate (default: 0)
max_value : float, optional
Maximum user air exchange rate (default: 1.2)
dist : str, optional
Probability density function to choose samples (default: 'nakagami')
Options:
- 'equal' : Equal distribution between min_value and max_value
- 'triangle' : Triangular distribution
- 'nakagami' : Nakagami distribution
Returns
-------
array_usr_inf : np.array (of floats)
Numpy array holding user infiltration rates in 1/h
"""
assert pdf in ['equal', 'triangle', 'nakagami'], \
'Unknown value for pdf input.'
# list_usr_inf = []
array_usr_inf = np.zeros(nb_samples)
if pdf == 'equal':
min_value *= 1000
max_value *= 1000
for i in range(nb_samples):
array_usr_inf[i] = rd.randint(min_value, max_value)
for i in range(len(array_usr_inf)):
array_usr_inf[i] /= 1000
elif pdf == 'triangle':
mode = min_value + (max_value - min_value) * 0.2
for i in range(nb_samples):
val = np.random.triangular(left=min_value,
right=max_value,
mode=mode)
array_usr_inf[i] = val
elif pdf == 'nakagami':
array_usr_inf = nakagami.rvs(0.6, scale=0.4, size=nb_samples)
return array_usr_inf
# Led to problems within monte-carlo simulation, as extrem air exchange
# rates can lead to unrealistic thermal peak loads within space heating
# profiles
# def calc_user_air_ex_profiles_factors(nb_samples, occ_profile, temp_profile,
# random_gauss=True):
# """
# Calculate set of user air exchange rate profiles. Uses stochastic
# air exchange rate user profile generation, based on user_air_exchange.py.
# Moreover, random rescaling, based on gaussion distribution, can be
# activated
#
# Parameters
# ----------
# nb_samples : int
# Number of samples
# occ_profile : array (of ints)
# Occupancy profile per timestep
# temp_profile : array (of floats)
# Outdoor temperature profile in degree Celsius per timestep
# random_gauss : bool, optional
# Defines, if resulting profile should randomly be rescaled with
# gaussian distribution rescaling factor (default: True)
#
# Returns
# -------
# list_air_ex_profiles : list (of arrays)
# List of air exchange profiles
# """
#
# list_air_ex_profiles = []
#
# for i in range(nb_samples):
#
# air_exch = usair.gen_user_air_ex_rate(occ_profile=occ_profile,
# temp_profile=temp_profile,
# b_type='res',
# inf_rate=None)
#
# if random_gauss:
# rescale_factor = np.random.normal(loc=1, scale=0.25)
# if rescale_factor < 0:
# rescale_factor = 0
# air_exch *= rescale_factor
#
# list_air_ex_profiles.append(air_exch)
#
# return list_air_ex_profiles
def calc_sampling_occ_per_app(nb_samples, method='destatis',
min_occ=1, max_occ=5):
"""
Calculate array of nb. of occupants samples
Parameters
----------
nb_samples : int
Number of samples
method : str, optional
Method to calculate occupants per apartment samples
(default: 'destatis')
Options:
- 'equal' : Select samples between min_occ and max_occ from equal
distribution
- 'destatis' : Select samples with random numbers from Destatis
statistics from 2015
min_occ : int, optional
Minimal possible number of occupants per apartment (default: 1)
Only relevant for method == 'equal'
max_occ : int, optional
Maximal possible number of occupants per apartment (default: 5)
Only relevant for method == 'equal'
Returns
-------
array_nb_occ : np.array (of ints)
Numpy array holding number of occupants per apartment
Reference
---------
Statistisches Bundesamt (Destatis) (2017): Bevoelkerung in Deutschland.
Online verfuegbar unter
https://www.destatis.de/DE/ZahlenFakten/Indikatoren/LangeReihen/
Bevoelkerung/lrbev05.html;jsessionid=4AACC10D2225591EC88C40EDEFB5EDAC.cae2,
zuletzt geprueft am 05.04.2017.
"""
assert method in ['equal', 'destatis']
# list_nb_occ = []
array_nb_occ = np.zeros(nb_samples)
if method == 'equal':
for i in range(nb_samples):
curr_val = rd.randint(int(min_occ), int(max_occ))
array_nb_occ[i] = curr_val
elif method == 'destatis':
for i in range(nb_samples):
rand_nb = rd.randint(0, 100)
# Destatis values from 2015 about nb. of occupants per apartment
if rand_nb <= 41.4:
array_nb_occ[i] = 1
elif rand_nb <= 41.4 + 34.2:
array_nb_occ[i] = 2
elif rand_nb <= 41.4 + 34.2 + 12.1:
array_nb_occ[i] = 3
elif rand_nb <= 41.4 + 34.2 + 12.1 + 9:
array_nb_occ[i] = 4
# elif rand_nb <= 41.4 + 34.2 + 12.1 + 9 + 3.2:
else:
array_nb_occ[i] = 5
return array_nb_occ
def calc_sampling_el_demand_per_apartment(nb_samples, nb_persons, type,
method='stromspiegel2017'):
"""
Choose samples for electric energy demand, depending on nb_of_persons.
Parameters
----------
nb_samples : int
Number of samples
nb_persons : int
Total number of persons within apartment
type : str
Residential building type
Options:
- 'sfh' : Single-family house
- 'mfh' : Multi-family house
method : str, optional
Method to estimate electrical demand (default: 'stromspiegel2017')
Options:
- 'stromspiegel2017' : co2online Stromspiegel Deutschland 2017
Returns
-------
array_el_demands : np.array (of floats)
Numpy array holding annual electric demand values in kWh per apartment
"""
assert type in ['sfh', 'mfh']
assert method in ['stromspiegel2017']
assert nb_persons > 0
assert nb_persons <= 5
# list_el_demands = []
array_el_demands = np.zeros(nb_samples)
if method == 'stromspiegel2017':
# Stromspiegel data for electric demand without hot water coverage
dict_sfh = {1: [1300, 4000],
2: [2100, 4400],
3: [2600, 5200],
4: [2900, 5900],
5: [3500, 7500]}
dict_mfh = {1: [800, 2200],
2: [1300, 3100],
3: [1700, 3900],
4: [1900, 4500],
5: [2200, 5700]}
if type == 'sfh':
use_dict = dict_sfh
elif type == 'mfh':
use_dict = dict_mfh
# Select min. and max. possible value
minv = use_dict[nb_persons][0]
maxv = use_dict[nb_persons][1]
for i in range(nb_samples):
array_el_demands[i] = rd.randint(minv, maxv)
return array_el_demands
def calc_sampling_dhw_per_person(nb_samples, pdf='equal', equal_diff=34,
mean=64, std=10):
"""
Perform domestic hot water sampling (hot water volume in liters per person
and day; temperature split of 35 Kelvin, according to Annex 42 results).
Parameters
----------
nb_samples : int
Number of samples
pdf : str, optional
Probability density function (default: 'equal')
Options:
'equal' : Equal distribution
'gaussian' : Gaussian distribution
equal_diff : float, optional
Difference from mean within equal distribution (default: 34)
mean : float, optional
Mean domestic hot water volume per person and day in liter
(default: 64)
std : float, optional
Standard deviation of domestic hot water volume per person and day
in liter (default: 10)
Returns
-------
array_dhw_vol : np.array (of floats)
Numpy array of hot water volumes per person and day in liters
"""
assert pdf in ['gaussian', 'equal']
# list_dhw_vol = []
array_dhw_vol = np.zeros(nb_samples)
if pdf == 'gaussian':
list_dhw_vol = np.random.normal(loc=mean, scale=std, size=nb_samples)
elif pdf == 'equal':
for i in range(nb_samples):
array_dhw_vol[i] = rd.randint(int((mean - equal_diff) * 1000),
int((
mean + equal_diff) * 1000)) / 1000
return array_dhw_vol
def calc_dhw_ref_volume_for_multiple_occ(nb_occ, ref_one_occ=64):
"""
Calculate reference hot water volume demand per person, depending on number
of occupants per apartment
Parameters
----------
nb_occ : int
Number of occupants within apartment
ref_one_occ : float, optional
Reference hot water demand in liter per person and day (for single
person apartment) (default: 64)
Returns
-------
new_ref_dhw_per_occ : float
Hot water volume per person and day
"""
if nb_occ == 1:
new_ref_dhw_per_occ = ref_one_occ + 0.0 # Use default mean value
elif nb_occ == 2:
new_ref_dhw_per_occ = ref_one_occ * 0.9375 # 64 --> 60 Liters
elif nb_occ == 3:
new_ref_dhw_per_occ = ref_one_occ * 0.9333 # 64 --> 57 Liters
elif nb_occ == 4:
new_ref_dhw_per_occ = ref_one_occ * 0.9298 # 64 --> 55 Liters
elif nb_occ >= 5:
new_ref_dhw_per_occ = ref_one_occ * 0.9259 # 64 --> 54 Liters
return new_ref_dhw_per_occ
def calc_sampling_dhw_per_apartment(nb_samples, nb_persons,
method='stromspiegel_2017', pdf='equal',
equal_diff=34, mean=64, std=10,
b_type='sfh', delta_t=35, c_p_water=4182,
rho_water=995):
"""
Perform domestic hot water sampling (hot water volume in liters per
apartment and day; temperature split of 35 Kelvin, according to
Annex 42 results). Assumes gaussian distribution.
Parameters
----------
nb_samples : int
Number of samples
nb_persons : int
Number of persons
method : str, optional
Method to sample dhw volumina per person (default: 'nb_occ_dep')
Options:
'nb_occ_dep' : Dependend on number of occupants (reduced
demand per person, if more persons are present)
'indep' : Independent from total number of occupants
'stromspiegel_2017' : Based on hot water consumption data of
Stromspiegel 2017.
pdf : str, optional
Probability density function (default: 'equal')
Options:
'equal' : Equal distribution
'gaussian' : Gaussian distribution
mean : float, optional
Mean domestic hot water volume per person and day in liter
(default: 64)
equal_diff : float, optional
Difference from mean within equal distribution (default: 34)
std : float, optional
Standard deviation of domestic hot water volume per person and day
in liter for gaussian distribution (default: 10)
b_type : str, optional
Building type (default: 'sfh')
Options:
- 'sfh' : Apartment is within single family house
- 'mfh' : Apartment is within multi-family house
delta_t : float, optional
Temperature split of heated up water in Kelvin (default: 35)
c_p_water : float, optional
Specific heat capacity of water in J/kgK (default: 4182)
rho_water : float, optional
Density of water in kg/m3 (default: 995)
Returns
-------
array_dhw_vol : np.array (of floats)
Numpy array of hot water volumes per apartment and day in liters
"""
assert method in ['nb_occ_dep', 'indep', 'stromspiegel_2017']
assert pdf in ['equal', 'gaussian']
assert b_type in ['sfh', 'mfh']
# list_dhw_vol = []
array_dhw_vol = np.zeros(nb_samples)
if method == 'nb_occ_dep':
# Dhw consumption per occupants depends on total number of occupants
# Calculate new reference value for dhw volume per person and day
# depending on total number of occupants
new_mean = calc_dhw_ref_volume_for_multiple_occ(nb_occ=nb_persons,
ref_one_occ=mean)
for i in range(nb_samples):
dhw_value = 0
for p in range(nb_persons):
dhw_value += \
calc_sampling_dhw_per_person(nb_samples=1,
mean=new_mean,
pdf=pdf,
equal_diff=equal_diff,
std=std)[0]
array_dhw_vol[i] = dhw_value
elif method == 'indep':
# Dhw consumpton per occupants is independend from total number of
# occupants
for i in range(nb_samples):
dhw_value = 0
for p in range(nb_persons):
dhw_value += \
calc_sampling_dhw_per_person(nb_samples=1,
mean=mean,
pdf=pdf,
equal_diff=equal_diff,
std=std)[0]
array_dhw_vol[i] = dhw_value
elif method == 'stromspiegel_2017':
if nb_persons > 5:
nb_persons = 5
# Dictionaries holding min/max dhw energy demand values in kWh per
# capital and year (for apartments)
dict_sfh = {1: [200, 1000],
2: [400, 1400],
3: [400, 2100],
4: [600, 2100],
5: [700, 3400]}
dict_mfh = {1: [400, 800],
2: [700, 1100],
3: [900, 1700],
4: [900, 2000],
5: [1300, 3300]}
for i in range(nb_samples):
if b_type == 'sfh':
dhw_range = dict_sfh[nb_persons]
dhw_energy = rd.randrange(start=dhw_range[0],
stop=dhw_range[1])
elif b_type == 'mfh':
dhw_range = dict_mfh[nb_persons]
dhw_energy = rd.randrange(start=dhw_range[0],
stop=dhw_range[1])
# DHW volume in liter per apartment and day
dhw_value = dhw_energy * 3600 * 1000 * 1000 \
/ (rho_water * c_p_water * delta_t * 365)
array_dhw_vol[i] = dhw_value
return array_dhw_vol
def recalc_dhw_vol_to_energy(vol, delta_t=35, c_p_water=4182, rho_water=995):
"""
Calculates hot water energy in kWh/a from input hot water volume in
liters/apartment*day
Parameters
----------
vol : float
Input hot water volume in liters/apartment*day
delta_t : float, optional
Temperature split of heated up water in Kelvin (default: 35)
c_p_water : float, optional
Specific heat capacity of water in J/kgK (default: 4182)
rho_water : float, optional
Density of water in kg/m3 (default: 995)
Returns
-------
dhw_annual_kwh : float
Annual hot water energy demand in kWh/a
"""
en_per_day = vol / 1000 * rho_water * c_p_water * delta_t \
/ (3600 * 1000) # in kWh
dhw_annual_kwh = en_per_day * 365
return dhw_annual_kwh
if __name__ == '__main__':
main()
| [
"numpy.random.normal",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.random.triangular",
"random.randrange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.zeros",
"scipy.stats.nakagami.rvs",
"matplotlib.pyplot.title",
"random.randint",... | [((495, 507), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (505, 507), True, 'import matplotlib.pyplot as plt\n'), ((544, 580), 'matplotlib.pyplot.hist', 'plt.hist', (['list_set_temp'], {'bins': '"""auto"""'}), "(list_set_temp, bins='auto')\n", (552, 580), True, 'import matplotlib.pyplot as plt\n'), ((585, 633), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Set temperatures in degree Celsius"""'], {}), "('Set temperatures in degree Celsius')\n", (595, 633), True, 'import matplotlib.pyplot as plt\n'), ((638, 674), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of temperatures"""'], {}), "('Number of temperatures')\n", (648, 674), True, 'import matplotlib.pyplot as plt\n'), ((679, 689), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (687, 689), True, 'import matplotlib.pyplot as plt\n'), ((694, 705), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (703, 705), True, 'import matplotlib.pyplot as plt\n'), ((911, 923), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (921, 923), True, 'import matplotlib.pyplot as plt\n'), ((960, 996), 'matplotlib.pyplot.hist', 'plt.hist', (['list_usr_airx'], {'bins': '"""auto"""'}), "(list_usr_airx, bins='auto')\n", (968, 996), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1045), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""User air exchange rates in 1/h"""'], {}), "('User air exchange rates in 1/h')\n", (1011, 1045), True, 'import matplotlib.pyplot as plt\n'), ((1050, 1080), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of values"""'], {}), "('Number of values')\n", (1060, 1080), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1095), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1093, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1111), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1109, 1111), True, 'import matplotlib.pyplot as plt\n'), ((1355, 1367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1365, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1404, 1432), 'matplotlib.pyplot.hist', 'plt.hist', (['list_occ_in_app', '(5)'], {}), '(list_occ_in_app, 5)\n', (1412, 1432), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1484), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of occupants per apartment"""'], {}), "('Number of occupants per apartment')\n", (1447, 1484), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1519), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of values"""'], {}), "('Number of values')\n", (1499, 1519), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1532, 1534), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1550), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1548, 1550), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1862), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1860, 1862), True, 'import matplotlib.pyplot as plt\n'), ((1899, 1933), 'matplotlib.pyplot.hist', 'plt.hist', (['list_el_dem'], {'bins': '"""auto"""'}), "(list_el_dem, bins='auto')\n", (1907, 1933), True, 'import matplotlib.pyplot as plt\n'), ((1938, 1992), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of electric energy demands in kWh"""'], {}), "('Number of electric energy demands in kWh')\n", (1948, 1992), True, 'import matplotlib.pyplot as plt\n'), ((1997, 2027), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of values"""'], {}), "('Number of values')\n", (2007, 2027), True, 'import matplotlib.pyplot as plt\n'), ((2032, 2102), 'matplotlib.pyplot.title', 'plt.title', (['"""Electric energy demand for\napartment with 3 occupants"""'], {}), '("""Electric energy demand for\napartment with 3 occupants""")\n', (2041, 2102), True, 'import matplotlib.pyplot as plt\n'), ((2121, 2131), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2129, 2131), True, 'import matplotlib.pyplot as plt\n'), ((2136, 2147), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2145, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2478, 2490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2488, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2563), 'matplotlib.pyplot.hist', 'plt.hist', (['list_el_dem_2'], {'bins': '"""auto"""'}), "(list_el_dem_2, bins='auto')\n", (2535, 2563), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2622), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of electric energy demands in kWh"""'], {}), "('Number of electric energy demands in kWh')\n", (2578, 2622), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2657), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of values"""'], {}), "('Number of values')\n", (2637, 2657), True, 'import matplotlib.pyplot as plt\n'), ((2662, 2760), 'matplotlib.pyplot.title', 'plt.title', (['"""Electric energy demand for\napartment with different number of occupants"""'], {}), '(\n """Electric energy demand for\napartment with different number of occupants"""\n )\n', (2671, 2760), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2779), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2777, 2779), True, 'import matplotlib.pyplot as plt\n'), ((2784, 2795), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2793, 2795), True, 'import matplotlib.pyplot as plt\n'), ((3295, 3307), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3305, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3387), 'matplotlib.pyplot.hist', 'plt.hist', (['list_dhw_vol_per_app'], {'bins': '"""auto"""'}), "(list_dhw_vol_per_app, bins='auto')\n", (3352, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3455), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hot water volumes per apartment and day in liters"""'], {}), "('Hot water volumes per apartment and day in liters')\n", (3402, 3455), True, 'import matplotlib.pyplot as plt\n'), ((3460, 3490), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of values"""'], {}), "('Number of values')\n", (3470, 3490), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3618), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3616, 3618), True, 'import matplotlib.pyplot as plt\n'), ((3623, 3634), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3632, 3634), True, 'import matplotlib.pyplot as plt\n'), ((3898, 3910), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3908, 3910), True, 'import matplotlib.pyplot as plt\n'), ((3947, 3988), 'matplotlib.pyplot.hist', 'plt.hist', (['list_dhw_per_app_2'], {'bins': '"""auto"""'}), "(list_dhw_per_app_2, bins='auto')\n", (3955, 3988), True, 'import matplotlib.pyplot as plt\n'), ((3993, 4056), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hot water volumes per apartment and day in liters"""'], {}), "('Hot water volumes per apartment and day in liters')\n", (4003, 4056), True, 'import matplotlib.pyplot as plt\n'), ((4061, 4091), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of values"""'], {}), "('Number of values')\n", (4071, 4091), True, 'import matplotlib.pyplot as plt\n'), ((4096, 4208), 'matplotlib.pyplot.title', 'plt.title', (['"""Hot water volumes per person and day for\napartment with different number of occupants"""'], {}), '(\n """Hot water volumes per person and day for\napartment with different number of occupants"""\n )\n', (4105, 4208), True, 'import matplotlib.pyplot as plt\n'), ((4217, 4227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4225, 4227), True, 'import matplotlib.pyplot as plt\n'), ((4232, 4243), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4241, 4243), True, 'import matplotlib.pyplot as plt\n'), ((7412, 7467), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'sdev', 'size': 'nb_samples'}), '(loc=mean, scale=sdev, size=nb_samples)\n', (7428, 7467), True, 'import numpy as np\n'), ((8474, 8494), 'numpy.zeros', 'np.zeros', (['nb_samples'], {}), '(nb_samples)\n', (8482, 8494), True, 'import numpy as np\n'), ((12368, 12388), 'numpy.zeros', 'np.zeros', (['nb_samples'], {}), '(nb_samples)\n', (12376, 12388), True, 'import numpy as np\n'), ((14240, 14260), 'numpy.zeros', 'np.zeros', (['nb_samples'], {}), '(nb_samples)\n', (14248, 14260), True, 'import numpy as np\n'), ((16238, 16258), 'numpy.zeros', 'np.zeros', (['nb_samples'], {}), '(nb_samples)\n', (16246, 16258), True, 'import numpy as np\n'), ((20126, 20146), 'numpy.zeros', 'np.zeros', (['nb_samples'], {}), '(nb_samples)\n', (20134, 20146), True, 'import numpy as np\n'), ((16309, 16363), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'std', 'size': 'nb_samples'}), '(loc=mean, scale=std, size=nb_samples)\n', (16325, 16363), True, 'import numpy as np\n'), ((8639, 8671), 'random.randint', 'rd.randint', (['min_value', 'max_value'], {}), '(min_value, max_value)\n', (8649, 8671), True, 'import random as rd\n'), ((15060, 15082), 'random.randint', 'rd.randint', (['minv', 'maxv'], {}), '(minv, maxv)\n', (15070, 15082), True, 'import random as rd\n'), ((8895, 8959), 'numpy.random.triangular', 'np.random.triangular', ([], {'left': 'min_value', 'right': 'max_value', 'mode': 'mode'}), '(left=min_value, right=max_value, mode=mode)\n', (8915, 8959), True, 'import numpy as np\n'), ((9126, 9171), 'scipy.stats.nakagami.rvs', 'nakagami.rvs', (['(0.6)'], {'scale': '(0.4)', 'size': 'nb_samples'}), '(0.6, scale=0.4, size=nb_samples)\n', (9138, 9171), False, 'from scipy.stats import nakagami\n'), ((12643, 12661), 'random.randint', 'rd.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (12653, 12661), True, 'import random as rd\n'), ((22366, 22417), 'random.randrange', 'rd.randrange', ([], {'start': 'dhw_range[0]', 'stop': 'dhw_range[1]'}), '(start=dhw_range[0], stop=dhw_range[1])\n', (22378, 22417), True, 'import random as rd\n'), ((22572, 22623), 'random.randrange', 'rd.randrange', ([], {'start': 'dhw_range[0]', 'stop': 'dhw_range[1]'}), '(start=dhw_range[0], stop=dhw_range[1])\n', (22584, 22623), True, 'import random as rd\n')] |
import numpy as np
from scipy.stats import truncnorm
import matplotlib.pyplot as plt
from time import time
from athena import ndarray
from athena import stream
from athena import cpu_links as cpu_op
from athena import gpu_links as gpu_op
from athena import gpu_ops as ad
def test_normal(size, mean=0, std=1):
ctx = ndarray.gpu(0)
cuda_x = ndarray.empty(size, ctx=ctx)
stre = stream.create_stream_handle(ctx)
np_st = time()
for i in range(10):
x = np.random.normal(loc=mean, scale=std, size=size).astype(np.float32)
cuda_x[:] = x
np_en = time()
print('numpy time: ', np_en - np_st)
cu_st = time()
for i in range(10):
gpu_op.normal_init(cuda_x, mean, std, 123, stre)
stre.sync()
cu_en = time()
print('cuda time: ', cu_en - cu_st)
fig, ax = plt.subplots(1, 1)
cuda_x = cuda_x.asnumpy()
assert (cuda_x.shape == x.shape)
ax.hist(x.flatten(), histtype='stepfilled', alpha=0.2, bins=50, label='numpy')
ax.hist(cuda_x.flatten(), histtype='step', alpha=0.2, bins=50, label='cuda')
ax.legend(loc='best', frameon=False)
# ax2.legend(loc='best', frameon=False)
file_name = 'normal_%f_%f.png' % (mean, std)
plt.savefig(file_name)
plt.close()
test_normal((1024, 128), 0, 1)
test_normal((1024, 128), 4.5, 2.6)
test_normal((1024, 128), -2.6, 4.5)
test_normal((1024, 128, 128), -10, 9)
def test_uniform(size, lb=-1, ub=1):
ctx = ndarray.gpu(0)
cuda_x = ndarray.empty(size, ctx=ctx)
stre = stream.create_stream_handle(ctx)
np_st = time()
for i in range(10):
x = np.random.uniform(low=lb, high=ub, size=size).astype(np.float32)
cuda_x[:] = x
np_en = time()
print('numpy time: ', np_en - np_st)
cu_st = time()
for i in range(10):
gpu_op.uniform_init(cuda_x, lb, ub, 123, stre)
stre.sync()
cu_en = time()
print('cuda time: ', cu_en - cu_st)
fig, ax = plt.subplots(1, 1)
cuda_x = cuda_x.asnumpy()
assert (cuda_x.shape == x.shape)
ax.hist(x.flatten(), histtype='stepfilled', alpha=0.2, bins=50, label='numpy')
ax.hist(cuda_x.flatten(), histtype='step', alpha=0.2, bins=50, label='cuda')
ax.legend(loc='best', frameon=False)
# ax2.legend(loc='best', frameon=False)
file_name = 'uniform_%f_%f.png' % (lb, ub)
plt.savefig(file_name)
plt.close()
test_uniform((1024, 128), 0, 1)
test_uniform((1024, 128), -100, 100)
test_uniform((1024, 128), -4.5, -4.4)
test_uniform((1024, 128, 128), -10, 9)
def test_truncated_normal(size, mean=0, std=1):
ctx = ndarray.gpu(0)
cuda_x = ndarray.empty(size, ctx=ctx)
stre = stream.create_stream_handle(ctx)
np_st = time()
for i in range(10):
x = truncnorm.rvs(-2.0, 2.0, loc=mean, scale=std, size=size).astype(np.float32)
cuda_x[:] = x
np_en = time()
print('numpy time: ', np_en - np_st)
cu_st = time()
for i in range(10):
gpu_op.truncated_normal_init(cuda_x, mean, std, 123, stre)
stre.sync()
cu_en = time()
print('cuda time: ', cu_en - cu_st)
fig, ax = plt.subplots(1, 1)
cuda_x = cuda_x.asnumpy()
assert (cuda_x.shape == x.shape)
ax.hist(x.flatten(), histtype='stepfilled', alpha=0.2, bins=50, label='numpy')
ax.hist(cuda_x.flatten(), histtype='step', alpha=0.2, bins=50, label='cuda')
ax.legend(loc='best', frameon=False)
# ax2.legend(loc='best', frameon=False)
file_name = 'truncated_normal_%f_%f.png' % (mean, std)
plt.savefig(file_name)
plt.close()
test_truncated_normal((1024, 128), 0, 1)
test_truncated_normal((1024, 128), 4.5, 2.6)
test_truncated_normal((1024, 128), -2.6, 4.5)
test_truncated_normal((1024, 128, 128), -10, 9)
def test_cpu_normal(size, mean=0, std=1):
cpu_x = ndarray.empty(size, ctx=ndarray.cpu(0))
np_st = time()
for i in range(10):
x = np.random.normal(loc=mean, scale=std, size=size).astype(np.float32)
cpu_x[:] = x
np_en = time()
print('numpy time: ', np_en - np_st)
cpu_st = time()
for i in range(10):
cpu_op.normal_init(cpu_x, mean, std, 123)
cpu_en = time()
print('cpu time: ', cpu_en - cpu_st)
fig, ax = plt.subplots(1, 1)
cpu_x = cpu_x.asnumpy()
assert (cpu_x.shape == x.shape)
ax.hist(x.flatten(), histtype='stepfilled', alpha=0.2, bins=50, label='numpy')
ax.hist(cpu_x.flatten(), histtype='step', alpha=0.2, bins=50, label='cpu')
ax.legend(loc='best', frameon=False)
# ax2.legend(loc='best', frameon=False)
file_name = 'normal_%f_%f_cpu.png' % (mean, std)
plt.savefig(file_name)
plt.close()
test_cpu_normal((1024, 128), 0, 1)
test_cpu_normal((1024, 128), 4.5, 2.6)
test_cpu_normal((1024, 128), -2.6, 4.5)
test_cpu_normal((1024, 128, 128), -10, 9)
def test_cpu_uniform(size, lb=-1, ub=1):
cpu_x = ndarray.empty(size, ctx=ndarray.cpu(0))
np_st = time()
for i in range(10):
x = np.random.uniform(low=lb, high=ub, size=size).astype(np.float32)
cpu_x[:] = x
np_en = time()
print('numpy time: ', np_en - np_st)
cpu_st = time()
for i in range(10):
cpu_op.uniform_init(cpu_x, lb, ub, 123)
cpu_en = time()
print('cpu time: ', cpu_en - cpu_st)
fig, ax = plt.subplots(1, 1)
cpu_x = cpu_x.asnumpy()
assert (cpu_x.shape == x.shape)
ax.hist(x.flatten(), histtype='stepfilled', alpha=0.2, bins=50, label='numpy')
ax.hist(cpu_x.flatten(), histtype='step', alpha=0.2, bins=50, label='cpu')
ax.legend(loc='best', frameon=False)
# ax2.legend(loc='best', frameon=False)
file_name = 'uniform_%f_%f_cpu.png' % (lb, ub)
plt.savefig(file_name)
plt.close()
test_cpu_uniform((1024, 128), 0, 1)
test_cpu_uniform((1024, 128), -100, 100)
test_cpu_uniform((1024, 128), -4.5, -4.4)
test_cpu_uniform((1024, 128, 128), -10, 9)
def test_cpu_truncated_normal(size, mean=0, std=1):
cpu_x = ndarray.empty(size, ctx=ndarray.cpu(0))
np_st = time()
for i in range(10):
x = truncnorm.rvs(-2.0, 2.0, loc=mean, scale=std, size=size).astype(np.float32)
cpu_x[:] = x
np_en = time()
print('numpy time: ', np_en - np_st)
cpu_st = time()
for i in range(10):
cpu_op.truncated_normal_init(cpu_x, mean, std, 123)
cpu_en = time()
print('cpu time: ', cpu_en - cpu_st)
fig, ax = plt.subplots(1, 1)
cpu_x = cpu_x.asnumpy()
assert (cpu_x.shape == x.shape)
ax.hist(x.flatten(), histtype='stepfilled', alpha=0.2, bins=50, label='numpy')
ax.hist(cpu_x.flatten(), histtype='step', alpha=0.2, bins=50, label='cpu')
ax.legend(loc='best', frameon=False)
# ax2.legend(loc='best', frameon=False)
file_name = 'truncated_normal_%f_%f.png' % (mean, std)
plt.savefig(file_name)
plt.close()
test_cpu_truncated_normal((1024, 128), 0, 1)
test_cpu_truncated_normal((1024, 128), 4.5, 2.6)
test_cpu_truncated_normal((1024, 128), -2.6, 4.5)
test_cpu_truncated_normal((1024, 128, 128), -10, 9)
| [
"numpy.random.normal",
"athena.cpu_links.normal_init",
"athena.gpu_links.normal_init",
"athena.ndarray.gpu",
"matplotlib.pyplot.savefig",
"athena.cpu_links.truncated_normal_init",
"scipy.stats.truncnorm.rvs",
"athena.ndarray.empty",
"matplotlib.pyplot.close",
"athena.cpu_links.uniform_init",
"nu... | [((321, 335), 'athena.ndarray.gpu', 'ndarray.gpu', (['(0)'], {}), '(0)\n', (332, 335), False, 'from athena import ndarray\n'), ((349, 377), 'athena.ndarray.empty', 'ndarray.empty', (['size'], {'ctx': 'ctx'}), '(size, ctx=ctx)\n', (362, 377), False, 'from athena import ndarray\n'), ((389, 421), 'athena.stream.create_stream_handle', 'stream.create_stream_handle', (['ctx'], {}), '(ctx)\n', (416, 421), False, 'from athena import stream\n'), ((434, 440), 'time.time', 'time', ([], {}), '()\n', (438, 440), False, 'from time import time\n'), ((579, 585), 'time.time', 'time', ([], {}), '()\n', (583, 585), False, 'from time import time\n'), ((639, 645), 'time.time', 'time', ([], {}), '()\n', (643, 645), False, 'from time import time\n'), ((755, 761), 'time.time', 'time', ([], {}), '()\n', (759, 761), False, 'from time import time\n'), ((816, 834), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (828, 834), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1226), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (1215, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1242), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1240, 1242), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1446), 'athena.ndarray.gpu', 'ndarray.gpu', (['(0)'], {}), '(0)\n', (1443, 1446), False, 'from athena import ndarray\n'), ((1460, 1488), 'athena.ndarray.empty', 'ndarray.empty', (['size'], {'ctx': 'ctx'}), '(size, ctx=ctx)\n', (1473, 1488), False, 'from athena import ndarray\n'), ((1500, 1532), 'athena.stream.create_stream_handle', 'stream.create_stream_handle', (['ctx'], {}), '(ctx)\n', (1527, 1532), False, 'from athena import stream\n'), ((1545, 1551), 'time.time', 'time', ([], {}), '()\n', (1549, 1551), False, 'from time import time\n'), ((1687, 1693), 'time.time', 'time', ([], {}), '()\n', (1691, 1693), False, 'from time import time\n'), ((1747, 1753), 'time.time', 'time', ([], {}), '()\n', (1751, 1753), False, 'from time import time\n'), ((1861, 1867), 'time.time', 'time', ([], {}), '()\n', (1865, 1867), False, 'from time import time\n'), ((1922, 1940), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1934, 1940), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2330), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (2319, 2330), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2346), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2344, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2567), 'athena.ndarray.gpu', 'ndarray.gpu', (['(0)'], {}), '(0)\n', (2564, 2567), False, 'from athena import ndarray\n'), ((2581, 2609), 'athena.ndarray.empty', 'ndarray.empty', (['size'], {'ctx': 'ctx'}), '(size, ctx=ctx)\n', (2594, 2609), False, 'from athena import ndarray\n'), ((2621, 2653), 'athena.stream.create_stream_handle', 'stream.create_stream_handle', (['ctx'], {}), '(ctx)\n', (2648, 2653), False, 'from athena import stream\n'), ((2666, 2672), 'time.time', 'time', ([], {}), '()\n', (2670, 2672), False, 'from time import time\n'), ((2819, 2825), 'time.time', 'time', ([], {}), '()\n', (2823, 2825), False, 'from time import time\n'), ((2879, 2885), 'time.time', 'time', ([], {}), '()\n', (2883, 2885), False, 'from time import time\n'), ((3005, 3011), 'time.time', 'time', ([], {}), '()\n', (3009, 3011), False, 'from time import time\n'), ((3066, 3084), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3078, 3084), True, 'import matplotlib.pyplot as plt\n'), ((3464, 3486), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (3475, 3486), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3502), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3500, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3792, 3798), 'time.time', 'time', ([], {}), '()\n', (3796, 3798), False, 'from time import time\n'), ((3936, 3942), 'time.time', 'time', ([], {}), '()\n', (3940, 3942), False, 'from time import time\n'), ((3997, 4003), 'time.time', 'time', ([], {}), '()\n', (4001, 4003), False, 'from time import time\n'), ((4091, 4097), 'time.time', 'time', ([], {}), '()\n', (4095, 4097), False, 'from time import time\n'), ((4153, 4171), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (4165, 4171), True, 'import matplotlib.pyplot as plt\n'), ((4540, 4562), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (4551, 4562), True, 'import matplotlib.pyplot as plt\n'), ((4567, 4578), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4576, 4578), True, 'import matplotlib.pyplot as plt\n'), ((4842, 4848), 'time.time', 'time', ([], {}), '()\n', (4846, 4848), False, 'from time import time\n'), ((4983, 4989), 'time.time', 'time', ([], {}), '()\n', (4987, 4989), False, 'from time import time\n'), ((5044, 5050), 'time.time', 'time', ([], {}), '()\n', (5048, 5050), False, 'from time import time\n'), ((5136, 5142), 'time.time', 'time', ([], {}), '()\n', (5140, 5142), False, 'from time import time\n'), ((5198, 5216), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (5210, 5216), True, 'import matplotlib.pyplot as plt\n'), ((5583, 5605), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (5594, 5605), True, 'import matplotlib.pyplot as plt\n'), ((5610, 5621), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5619, 5621), True, 'import matplotlib.pyplot as plt\n'), ((5902, 5908), 'time.time', 'time', ([], {}), '()\n', (5906, 5908), False, 'from time import time\n'), ((6054, 6060), 'time.time', 'time', ([], {}), '()\n', (6058, 6060), False, 'from time import time\n'), ((6115, 6121), 'time.time', 'time', ([], {}), '()\n', (6119, 6121), False, 'from time import time\n'), ((6219, 6225), 'time.time', 'time', ([], {}), '()\n', (6223, 6225), False, 'from time import time\n'), ((6281, 6299), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (6293, 6299), True, 'import matplotlib.pyplot as plt\n'), ((6674, 6696), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (6685, 6696), True, 'import matplotlib.pyplot as plt\n'), ((6701, 6712), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6710, 6712), True, 'import matplotlib.pyplot as plt\n'), ((678, 726), 'athena.gpu_links.normal_init', 'gpu_op.normal_init', (['cuda_x', 'mean', 'std', '(123)', 'stre'], {}), '(cuda_x, mean, std, 123, stre)\n', (696, 726), True, 'from athena import gpu_links as gpu_op\n'), ((1786, 1832), 'athena.gpu_links.uniform_init', 'gpu_op.uniform_init', (['cuda_x', 'lb', 'ub', '(123)', 'stre'], {}), '(cuda_x, lb, ub, 123, stre)\n', (1805, 1832), True, 'from athena import gpu_links as gpu_op\n'), ((2918, 2976), 'athena.gpu_links.truncated_normal_init', 'gpu_op.truncated_normal_init', (['cuda_x', 'mean', 'std', '(123)', 'stre'], {}), '(cuda_x, mean, std, 123, stre)\n', (2946, 2976), True, 'from athena import gpu_links as gpu_op\n'), ((4036, 4077), 'athena.cpu_links.normal_init', 'cpu_op.normal_init', (['cpu_x', 'mean', 'std', '(123)'], {}), '(cpu_x, mean, std, 123)\n', (4054, 4077), True, 'from athena import cpu_links as cpu_op\n'), ((5083, 5122), 'athena.cpu_links.uniform_init', 'cpu_op.uniform_init', (['cpu_x', 'lb', 'ub', '(123)'], {}), '(cpu_x, lb, ub, 123)\n', (5102, 5122), True, 'from athena import cpu_links as cpu_op\n'), ((6154, 6205), 'athena.cpu_links.truncated_normal_init', 'cpu_op.truncated_normal_init', (['cpu_x', 'mean', 'std', '(123)'], {}), '(cpu_x, mean, std, 123)\n', (6182, 6205), True, 'from athena import cpu_links as cpu_op\n'), ((3764, 3778), 'athena.ndarray.cpu', 'ndarray.cpu', (['(0)'], {}), '(0)\n', (3775, 3778), False, 'from athena import ndarray\n'), ((4814, 4828), 'athena.ndarray.cpu', 'ndarray.cpu', (['(0)'], {}), '(0)\n', (4825, 4828), False, 'from athena import ndarray\n'), ((5874, 5888), 'athena.ndarray.cpu', 'ndarray.cpu', (['(0)'], {}), '(0)\n', (5885, 5888), False, 'from athena import ndarray\n'), ((477, 525), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'std', 'size': 'size'}), '(loc=mean, scale=std, size=size)\n', (493, 525), True, 'import numpy as np\n'), ((1588, 1633), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'lb', 'high': 'ub', 'size': 'size'}), '(low=lb, high=ub, size=size)\n', (1605, 1633), True, 'import numpy as np\n'), ((2709, 2765), 'scipy.stats.truncnorm.rvs', 'truncnorm.rvs', (['(-2.0)', '(2.0)'], {'loc': 'mean', 'scale': 'std', 'size': 'size'}), '(-2.0, 2.0, loc=mean, scale=std, size=size)\n', (2722, 2765), False, 'from scipy.stats import truncnorm\n'), ((3835, 3883), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'std', 'size': 'size'}), '(loc=mean, scale=std, size=size)\n', (3851, 3883), True, 'import numpy as np\n'), ((4885, 4930), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'lb', 'high': 'ub', 'size': 'size'}), '(low=lb, high=ub, size=size)\n', (4902, 4930), True, 'import numpy as np\n'), ((5945, 6001), 'scipy.stats.truncnorm.rvs', 'truncnorm.rvs', (['(-2.0)', '(2.0)'], {'loc': 'mean', 'scale': 'std', 'size': 'size'}), '(-2.0, 2.0, loc=mean, scale=std, size=size)\n', (5958, 6001), False, 'from scipy.stats import truncnorm\n')] |
import numpy
from PIL import Image
pascal_colormap = [
0 , 0, 0,
0.5020, 0, 0,
0, 0.5020, 0,
0.5020, 0.5020, 0,
0, 0, 0.5020,
0.5020, 0, 0.5020,
0, 0.5020, 0.5020,
0.5020, 0.5020, 0.5020,
0.2510, 0, 0,
0.7529, 0, 0,
0.2510, 0.5020, 0,
0.7529, 0.5020, 0,
0.2510, 0, 0.5020,
0.7529, 0, 0.5020,
0.2510, 0.5020, 0.5020,
0.7529, 0.5020, 0.5020,
0, 0.2510, 0,
0.5020, 0.2510, 0,
0, 0.7529, 0,
0.5020, 0.7529, 0,
0, 0.2510, 0.5020,
0.5020, 0.2510, 0.5020,
0, 0.7529, 0.5020,
0.5020, 0.7529, 0.5020,
0.2510, 0.2510, 0,
0.7529, 0.2510, 0,
0.2510, 0.7529, 0,
0.7529, 0.7529, 0,
0.2510, 0.2510, 0.5020,
0.7529, 0.2510, 0.5020,
0.2510, 0.7529, 0.5020,
0.7529, 0.7529, 0.5020,
0, 0, 0.2510,
0.5020, 0, 0.2510,
0, 0.5020, 0.2510,
0.5020, 0.5020, 0.2510,
0, 0, 0.7529,
0.5020, 0, 0.7529,
0, 0.5020, 0.7529,
0.5020, 0.5020, 0.7529,
0.2510, 0, 0.2510,
0.7529, 0, 0.2510,
0.2510, 0.5020, 0.2510,
0.7529, 0.5020, 0.2510,
0.2510, 0, 0.7529,
0.7529, 0, 0.7529,
0.2510, 0.5020, 0.7529,
0.7529, 0.5020, 0.7529,
0, 0.2510, 0.2510,
0.5020, 0.2510, 0.2510,
0, 0.7529, 0.2510,
0.5020, 0.7529, 0.2510,
0, 0.2510, 0.7529,
0.5020, 0.2510, 0.7529,
0, 0.7529, 0.7529,
0.5020, 0.7529, 0.7529,
0.2510, 0.2510, 0.2510,
0.7529, 0.2510, 0.2510,
0.2510, 0.7529, 0.2510,
0.7529, 0.7529, 0.2510,
0.2510, 0.2510, 0.7529,
0.7529, 0.2510, 0.7529,
0.2510, 0.7529, 0.7529,
0.7529, 0.7529, 0.7529,
0.1255, 0, 0,
0.6275, 0, 0,
0.1255, 0.5020, 0,
0.6275, 0.5020, 0,
0.1255, 0, 0.5020,
0.6275, 0, 0.5020,
0.1255, 0.5020, 0.5020,
0.6275, 0.5020, 0.5020,
0.3765, 0, 0,
0.8784, 0, 0,
0.3765, 0.5020, 0,
0.8784, 0.5020, 0,
0.3765, 0, 0.5020,
0.8784, 0, 0.5020,
0.3765, 0.5020, 0.5020,
0.8784, 0.5020, 0.5020,
0.1255, 0.2510, 0,
0.6275, 0.2510, 0,
0.1255, 0.7529, 0,
0.6275, 0.7529, 0,
0.1255, 0.2510, 0.5020,
0.6275, 0.2510, 0.5020,
0.1255, 0.7529, 0.5020,
0.6275, 0.7529, 0.5020,
0.3765, 0.2510, 0,
0.8784, 0.2510, 0,
0.3765, 0.7529, 0,
0.8784, 0.7529, 0,
0.3765, 0.2510, 0.5020,
0.8784, 0.2510, 0.5020,
0.3765, 0.7529, 0.5020,
0.8784, 0.7529, 0.5020,
0.1255, 0, 0.2510,
0.6275, 0, 0.2510,
0.1255, 0.5020, 0.2510,
0.6275, 0.5020, 0.2510,
0.1255, 0, 0.7529,
0.6275, 0, 0.7529,
0.1255, 0.5020, 0.7529,
0.6275, 0.5020, 0.7529,
0.3765, 0, 0.2510,
0.8784, 0, 0.2510,
0.3765, 0.5020, 0.2510,
0.8784, 0.5020, 0.2510,
0.3765, 0, 0.7529,
0.8784, 0, 0.7529,
0.3765, 0.5020, 0.7529,
0.8784, 0.5020, 0.7529,
0.1255, 0.2510, 0.2510,
0.6275, 0.2510, 0.2510,
0.1255, 0.7529, 0.2510,
0.6275, 0.7529, 0.2510,
0.1255, 0.2510, 0.7529,
0.6275, 0.2510, 0.7529,
0.1255, 0.7529, 0.7529,
0.6275, 0.7529, 0.7529,
0.3765, 0.2510, 0.2510,
0.8784, 0.2510, 0.2510,
0.3765, 0.7529, 0.2510,
0.8784, 0.7529, 0.2510,
0.3765, 0.2510, 0.7529,
0.8784, 0.2510, 0.7529,
0.3765, 0.7529, 0.7529,
0.8784, 0.7529, 0.7529,
0, 0.1255, 0,
0.5020, 0.1255, 0,
0, 0.6275, 0,
0.5020, 0.6275, 0,
0, 0.1255, 0.5020,
0.5020, 0.1255, 0.5020,
0, 0.6275, 0.5020,
0.5020, 0.6275, 0.5020,
0.2510, 0.1255, 0,
0.7529, 0.1255, 0,
0.2510, 0.6275, 0,
0.7529, 0.6275, 0,
0.2510, 0.1255, 0.5020,
0.7529, 0.1255, 0.5020,
0.2510, 0.6275, 0.5020,
0.7529, 0.6275, 0.5020,
0, 0.3765, 0,
0.5020, 0.3765, 0,
0, 0.8784, 0,
0.5020, 0.8784, 0,
0, 0.3765, 0.5020,
0.5020, 0.3765, 0.5020,
0, 0.8784, 0.5020,
0.5020, 0.8784, 0.5020,
0.2510, 0.3765, 0,
0.7529, 0.3765, 0,
0.2510, 0.8784, 0,
0.7529, 0.8784, 0,
0.2510, 0.3765, 0.5020,
0.7529, 0.3765, 0.5020,
0.2510, 0.8784, 0.5020,
0.7529, 0.8784, 0.5020,
0, 0.1255, 0.2510,
0.5020, 0.1255, 0.2510,
0, 0.6275, 0.2510,
0.5020, 0.6275, 0.2510,
0, 0.1255, 0.7529,
0.5020, 0.1255, 0.7529,
0, 0.6275, 0.7529,
0.5020, 0.6275, 0.7529,
0.2510, 0.1255, 0.2510,
0.7529, 0.1255, 0.2510,
0.2510, 0.6275, 0.2510,
0.7529, 0.6275, 0.2510,
0.2510, 0.1255, 0.7529,
0.7529, 0.1255, 0.7529,
0.2510, 0.6275, 0.7529,
0.7529, 0.6275, 0.7529,
0, 0.3765, 0.2510,
0.5020, 0.3765, 0.2510,
0, 0.8784, 0.2510,
0.5020, 0.8784, 0.2510,
0, 0.3765, 0.7529,
0.5020, 0.3765, 0.7529,
0, 0.8784, 0.7529,
0.5020, 0.8784, 0.7529,
0.2510, 0.3765, 0.2510,
0.7529, 0.3765, 0.2510,
0.2510, 0.8784, 0.2510,
0.7529, 0.8784, 0.2510,
0.2510, 0.3765, 0.7529,
0.7529, 0.3765, 0.7529,
0.2510, 0.8784, 0.7529,
0.7529, 0.8784, 0.7529,
0.1255, 0.1255, 0,
0.6275, 0.1255, 0,
0.1255, 0.6275, 0,
0.6275, 0.6275, 0,
0.1255, 0.1255, 0.5020,
0.6275, 0.1255, 0.5020,
0.1255, 0.6275, 0.5020,
0.6275, 0.6275, 0.5020,
0.3765, 0.1255, 0,
0.8784, 0.1255, 0,
0.3765, 0.6275, 0,
0.8784, 0.6275, 0,
0.3765, 0.1255, 0.5020,
0.8784, 0.1255, 0.5020,
0.3765, 0.6275, 0.5020,
0.8784, 0.6275, 0.5020,
0.1255, 0.3765, 0,
0.6275, 0.3765, 0,
0.1255, 0.8784, 0,
0.6275, 0.8784, 0,
0.1255, 0.3765, 0.5020,
0.6275, 0.3765, 0.5020,
0.1255, 0.8784, 0.5020,
0.6275, 0.8784, 0.5020,
0.3765, 0.3765, 0,
0.8784, 0.3765, 0,
0.3765, 0.8784, 0,
0.8784, 0.8784, 0,
0.3765, 0.3765, 0.5020,
0.8784, 0.3765, 0.5020,
0.3765, 0.8784, 0.5020,
0.8784, 0.8784, 0.5020,
0.1255, 0.1255, 0.2510,
0.6275, 0.1255, 0.2510,
0.1255, 0.6275, 0.2510,
0.6275, 0.6275, 0.2510,
0.1255, 0.1255, 0.7529,
0.6275, 0.1255, 0.7529,
0.1255, 0.6275, 0.7529,
0.6275, 0.6275, 0.7529,
0.3765, 0.1255, 0.2510,
0.8784, 0.1255, 0.2510,
0.3765, 0.6275, 0.2510,
0.8784, 0.6275, 0.2510,
0.3765, 0.1255, 0.7529,
0.8784, 0.1255, 0.7529,
0.3765, 0.6275, 0.7529,
0.8784, 0.6275, 0.7529,
0.1255, 0.3765, 0.2510,
0.6275, 0.3765, 0.2510,
0.1255, 0.8784, 0.2510,
0.6275, 0.8784, 0.2510,
0.1255, 0.3765, 0.7529,
0.6275, 0.3765, 0.7529,
0.1255, 0.8784, 0.7529,
0.6275, 0.8784, 0.7529,
0.3765, 0.3765, 0.2510,
0.8784, 0.3765, 0.2510,
0.3765, 0.8784, 0.2510,
0.8784, 0.8784, 0.2510,
0.3765, 0.3765, 0.7529,
0.8784, 0.3765, 0.7529,
0.3765, 0.8784, 0.7529,
0.8784, 0.8784, 0.7529]
def save_with_pascal_colormap(filename, arr):
colmap = (numpy.array(pascal_colormap) * 255).round().astype("uint8")
palimage = Image.new('P', (16, 16))
palimage.putpalette(colmap)
im = Image.fromarray(numpy.squeeze(arr.astype("uint8")))
im2 = im.quantize(palette=palimage)
im2.save(filename)
| [
"PIL.Image.new",
"numpy.array"
] | [((8893, 8917), 'PIL.Image.new', 'Image.new', (['"""P"""', '(16, 16)'], {}), "('P', (16, 16))\n", (8902, 8917), False, 'from PIL import Image\n'), ((8820, 8848), 'numpy.array', 'numpy.array', (['pascal_colormap'], {}), '(pascal_colormap)\n', (8831, 8848), False, 'import numpy\n')] |
# base tree class
import crast.node
import csv
import numpy as np
import random
import statistics
class Tree:
def __init__(self) -> None:
self.nodes = []
# constructor from shap tree
@classmethod
def from_shap_tree(cls, in_tree):
out = cls()
# import shap
for inode in range(len(in_tree.features)):
this_node = crast.node.CrastNode()
this_node.id = inode
this_node.feature = in_tree.features[inode]
this_node.threshold = in_tree.thresholds[inode]
this_node.child_left = in_tree.children_left[inode]
this_node.child_right = in_tree.children_right[inode]
this_node.value = float(in_tree.values[inode])
this_node.sample_weight = in_tree.node_sample_weight[inode]
out.nodes.append(this_node)
return out
# constructor from sklearn shap tree
@classmethod
def from_sk_shap_tree(cls, in_tree):
out = cls()
# import shap
for inode in range(len(in_tree.features)):
this_node = crast.node.CrastNode()
this_node.id = inode
this_node.feature = in_tree.features[inode]
this_node.threshold = in_tree.thresholds[inode]
this_node.child_left = in_tree.children_left[inode]
this_node.child_right = in_tree.children_right[inode]
this_node.value = float(in_tree.values[inode][0])
this_node.sample_weight = in_tree.node_sample_weight[inode]
out.nodes.append(this_node)
return out
def print_tree(self) -> None:
print('Printing Tree Info')
print('-----------------------')
for nn in self.nodes:
print('Node: %i'%(nn.id))
print('Feature: %i'%(nn.feature))
print('Threshold: %f'%(nn.threshold))
print('Child Left: %i'%(nn.child_left))
print('Child Right: %i'%(nn.child_right))
print('Value: %i'%(nn.value))
print('-----------------------')
def read_tree(self, in_path):
header_idxs = {'child_left':-1, 'child_right':-1, 'feature':-1, 'threshold':-1, 'value':-1, 'sample_weight':-1, 'depth':-1}
with open(in_path, 'r', newline='') as ifh:
reader = csv.reader(ifh, delimiter=',', quotechar='|')
for irow, row in enumerate(reader):
if irow == 0:
for hidx in header_idxs:
for ii in range(len(row)):
if hidx == row[ii]:
header_idxs[hidx] = ii
break
if header_idxs[hidx] < 0:
print('could not find %s'%(hidx))
exit()
else:
this_node = crast.node.CrastNode()
for hh in header_idxs:
this_node.set_value(hh, row[header_idxs[hh]])
self.nodes.append(this_node)
def predict_tree(self, X):
next_idx = 0
while next_idx >= 0:
this_idx = next_idx
next_idx = self.nodes[next_idx].predict(X)
return self.nodes[this_idx].value
# early ejection prediction given a set of non-missing features indexed by ftr_idxs
def predict_tree_eject(self, X, ftr_idxs):
next_idx = 0
while next_idx >= 0:
this_idx = next_idx
if not self.nodes[next_idx].feature in ftr_idxs:
return self.nodes[next_idx].value
next_idx = self.nodes[next_idx].predict(X)
return self.nodes[this_idx].value
# treeshap alg1 recursive method
def ts_alg1_recursive(self, X, ftr_idxs, start_idx):
if self.nodes[start_idx].feature < 0:
return self.nodes[start_idx].value
if self.nodes[start_idx].feature in ftr_idxs:
next_idx = self.nodes[start_idx].predict(X)
return self.ts_alg1_recursive(X, ftr_idxs, next_idx)
else:
left_idx = self.nodes[start_idx].child_left
left_wt = len(self.nodes[left_idx].sample_idxs)/len(self.nodes[start_idx].sample_idxs)
right_idx = self.nodes[start_idx].child_right
right_wt = len(self.nodes[right_idx].sample_idxs)/len(self.nodes[start_idx].sample_idxs)
return (left_wt*self.ts_alg1_recursive(X, ftr_idxs, left_idx) + right_wt*self.ts_alg1_recursive(X, ftr_idxs, right_idx))
# treeshap alg1 prediction given a set of non-missing features indexed by ftr_idxs
def predict_tree_ts_alg1(self, X, ftr_idxs):
return self.ts_alg1_recursive(X, ftr_idxs, 0)
# calc covariance matrix and anything else needed
def init_covar_imputer(self, data) -> None:
nsamples = len(data)
nftrs = len(data[0]['features'])
ftr_means = [0.0 for ii in range(nftrs)]
for isample in range(nsamples):
for iftr in range(nftrs):
ftr_means[iftr] += data[isample]['features'][iftr]
for iftr in range(nftrs):
ftr_means[iftr] = ftr_means[iftr]/nsamples
self.cov_mat = [[0.0 for jj in range(nftrs)] for ii in range(nftrs)]
for isample in range(nsamples):
for iftr in range(nftrs):
for jftr in range(nftrs):
self.cov_mat[iftr][jftr] += (data[isample]['features'][iftr] - ftr_means[iftr])*(data[isample]['features'][jftr] - ftr_means[jftr])/(ftr_means[iftr]*ftr_means[jftr])
for iftr in range(nftrs):
for jftr in range(nftrs):
self.cov_mat[iftr][jftr] = self.cov_mat[iftr][jftr]/nsamples
self.chol = np.linalg.cholesky(np.array(self.cov_mat))
self.ftr_lists = [[] for ii in range(nftrs)]
for isample in range(nsamples):
for iftr in range(nftrs):
self.ftr_lists[iftr].append(data[isample]['features'][iftr])
for iftr in range(nftrs):
self.ftr_lists[iftr] = np.sort(self.ftr_lists[iftr])
self.ftr_means = [0.0 for ii in range(nftrs)]
self.ftr_stds = [0.0 for ii in range(nftrs)]
for iftr in range(nftrs):
self.ftr_means[iftr] = statistics.mean(self.ftr_lists[iftr])
self.ftr_stds[iftr] = statistics.stdev(self.ftr_lists[iftr])
def get_sig_val_from_feature(self, ftr_idx, value):
return (value-self.ftr_means[ftr_idx])/self.ftr_stds[ftr_idx]
def get_feature_from_sig_val(self, ftr_idx, value):
return self.ftr_means[ftr_idx] + value*self.ftr_stds[ftr_idx]
def predict_tree_covar_impute(self, X, ftr_idxs, nthrows):
if len(ftr_idxs) == 0:
return 0
nftrs = len(X)
nsamples = len(self.ftr_lists[0])
throw_vec = [0.0 for ii in range(nftrs)]
for idx in ftr_idxs:
throw_vec[idx] = self.get_sig_val_from_feature(idx, X[idx])
throw_idxs = []
for iftr in range(nftrs):
if not iftr in ftr_idxs:
throw_idxs.append(iftr)
pred_res = 0.0
for ithrow in range(nthrows):
ftr_res = [0.0 for ii in range(nftrs)]
for idx in throw_idxs:
# throw_vec[idx] = self.ftr_lists[idx][random.randint(0,nsamples-1)]
throw_vec[idx] = random.gauss(0,1)
for iftr in range(nftrs):
for jftr in range(nftrs):
ftr_res[iftr] += throw_vec[jftr]*self.chol[iftr][jftr]
for iftr in range(nftrs):
ftr_res[iftr] = self.get_feature_from_sig_val(iftr, ftr_res[iftr])
for iftr in ftr_idxs:
ftr_res[iftr] = X[iftr]
pred_res += self.predict_tree(ftr_res)
return pred_res/nthrows
def predict_tree_covar_impute_old(self, X, ftr_idxs, nthrows):
if len(ftr_idxs) == 0:
return 0
nftrs = len(X)
nsamples = len(self.ftr_lists[0])
throw_vec = [0.0 for ii in range(nftrs)]
for idx in ftr_idxs:
throw_vec[idx] = self.get_sig_val_from_feature(idx, X[idx])
throw_idxs = []
for iftr in range(nftrs):
if not iftr in ftr_idxs:
throw_idxs.append(iftr)
ftr_res = [0.0 for ii in range(nftrs)]
for ithrow in range(nthrows):
for idx in throw_idxs:
# throw_vec[idx] = self.ftr_lists[idx][random.randint(0,nsamples-1)]
throw_vec[idx] = random.gauss(0,1)
for iftr in range(nftrs):
for jftr in range(nftrs):
ftr_res[iftr] += throw_vec[jftr]*self.chol[iftr][jftr]
for iftr in range(nftrs):
ftr_res[iftr] = self.get_feature_from_sig_val(iftr, ftr_res[iftr]/nthrows)
for iftr in ftr_idxs:
ftr_res[iftr] = X[iftr]
# maybe actually want average of predictions over throws rather than prediction of averaged throws
return self.predict_tree(ftr_res)
def init_ts_intervent(self, data):
nsamples = len(data)
nftrs = len(data[0]['features'])
self.ref_data = [[0.0 for jj in range(nftrs)] for ii in range(nsamples)]
for isample in range(nsamples):
for iftr in range(nftrs):
self.ref_data[isample][iftr] = data[isample]['features'][iftr]
def predict_tree_ts_intervent(self, X, ftr_idxs):
# expectation value of predicting with tree with missing features replaced by those in the training set
fill_idxs = []
nsamples = len(self.ref_data)
nftrs = len(self.ref_data[0])
for iftr in range(nftrs):
if not iftr in ftr_idxs:
fill_idxs.append(iftr)
this_X = [X[ii] for ii in range(nftrs)]
ave_pred = 0.0
for isample in range(nsamples):
for iftr in fill_idxs:
this_X[iftr] = self.ref_data[isample][iftr]
ave_pred += self.predict_tree(this_X)
return ave_pred/nsamples | [
"statistics.mean",
"statistics.stdev",
"numpy.sort",
"numpy.array",
"csv.reader",
"random.gauss"
] | [((2286, 2331), 'csv.reader', 'csv.reader', (['ifh'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(ifh, delimiter=',', quotechar='|')\n", (2296, 2331), False, 'import csv\n'), ((5714, 5736), 'numpy.array', 'np.array', (['self.cov_mat'], {}), '(self.cov_mat)\n', (5722, 5736), True, 'import numpy as np\n'), ((6015, 6044), 'numpy.sort', 'np.sort', (['self.ftr_lists[iftr]'], {}), '(self.ftr_lists[iftr])\n', (6022, 6044), True, 'import numpy as np\n'), ((6221, 6258), 'statistics.mean', 'statistics.mean', (['self.ftr_lists[iftr]'], {}), '(self.ftr_lists[iftr])\n', (6236, 6258), False, 'import statistics\n'), ((6293, 6331), 'statistics.stdev', 'statistics.stdev', (['self.ftr_lists[iftr]'], {}), '(self.ftr_lists[iftr])\n', (6309, 6331), False, 'import statistics\n'), ((7317, 7335), 'random.gauss', 'random.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (7329, 7335), False, 'import random\n'), ((8476, 8494), 'random.gauss', 'random.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (8488, 8494), False, 'import random\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn import preprocessing, cross_validation
import csv
import string
from collections import Counter
from tqdm import tqdm
import collections, re
import random
from random import randint
from sklearn.metrics import average_precision_score
import pandas as pd
from scipy import misc as cv2
import glob
import tensorflow as tf
from PIL import Image
from skimage import transform
import copy
from random import shuffle
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data,dropout,fully_connected
from tflearn.layers.estimator import regression
import os
import time
import imageio
import plotly.plotly as py
import plotly.graph_objs as go
path="/home/asim/Desktop/Screenshots/model.tflearn.meta"
class PlottingCallback(tflearn.callbacks.Callback):
def __init__(self, model, x,
layers_to_observe=(),
kernels=10,
inputs=1):
self.model = model
self.x = x
self.kernels = kernels
self.inputs = inputs
self.observers = [tflearn.DNN(l) for l in layers_to_observe]
def on_epoch_end(self, training_state):
outputs = [o.predict(self.x) for o in self.observers]
for i in range(self.inputs):
plt.figure(frameon=False)
plt.subplots_adjust(wspace=0.1, hspace=0.1)
ix = 1
for o in outputs:
for kernel in range(self.kernels):
plt.subplot(len(outputs), self.kernels, ix)
plt.imshow(o[i, :, :, kernel])
plt.axis('off')
ix += 1
plt.savefig('outputs-for-image:%i-at-epoch:%i.png'
% (i, training_state.epoch))
def generate_training_data(folder):
r=0
"Gets images for training, adds labels and returns training data"
print("Getting images for training..")
training_data = []
bag=[]
label=[]
with tqdm(total=len(glob.glob(folder+"/*.png"))) as pbar:
for img in glob.glob(folder+"/*.png"):
temp=[]
#if r>=10:
# break
if "fb" in img:
#tr=0
tr=[1,0,0,0,0,0]
n= cv2.imread(img)
elif "yt" in img:
#tr=1
tr=[0,1,0,0,0,0]
n= cv2.imread(img)
elif "stack" in img:
#tr=2
tr=[0,0,1,0,0,0]
n= cv2.imread(img)
elif "gmail" in img:
#tr=3
tr=[0,0,0,1,0,0]
n= cv2.imread(img)
elif "code" in img:
#tr=4
tr=[0,0,0,0,1,0]
n= cv2.imread(img)
elif "others" in img:
#tr=4
tr=[0,0,0,0,0,1]
n= cv2.imread(img)
else:
n= cv2.imread(img)
tr=[0]
temp.append(n)
temp.append(tr)
bag.append(temp)
pbar.update(1)
r+=1
return bag
def remove_files(imgpath):
#removing all test files after classifying them
toremove=imgpath
filelist = glob.glob(toremove+"/*.png")
print("Removing files from ",imgpath," ...")
with tqdm(total=len(filelist)) as pbar:
for f in filelist:
os.remove(f)
pbar.update(1)
if os.path.exists(path):
print("Loading the model..")
tf.reset_default_graph()
convnet=input_data(shape=[None,50,50,3],name='input')
convnet=conv_2d(convnet,32,5,activation='relu')
convnet=max_pool_2d(convnet,5)
max_0=conv_2d(convnet,64,5,activation='relu')
max_1=max_pool_2d(max_0,5)
convnet=conv_2d(max_1,32,5,activation='relu')
convnet=max_pool_2d(convnet,5)
max_2=fully_connected(convnet,128,activation='relu')
convnet=dropout(max_2,0.4)
max_3=fully_connected(convnet,6,activation='softmax')
convnet=regression(max_3,optimizer='adam',learning_rate=0.005,loss='categorical_crossentropy',name='ScreenshotClassifier')
model=tflearn.DNN(convnet,tensorboard_dir='log',tensorboard_verbose=3)
model.load('./model.tflearn')
else:
bag=generate_training_data("Screenshots")
random.shuffle(bag)
i=0
data=[]
labels=[]
for i in range(len(bag)): #sepearting features and labels
data.append(bag[i][0])
labels.append(bag[i][1])
del bag
i=0
X=[]
print("Resizing images")
with tqdm(total=len(data)) as p1bar:
for i in range(len(data)):
x=np.array(transform.resize(data[i],[50,50,3]),dtype='float32')
X.append(x)
p1bar.update(1)
del data
data=X
X_train, X_test, y_train, y_test=cross_validation.train_test_split(data,labels,test_size=0.1)
tf.reset_default_graph()
convnet=input_data(shape=[None,50,50,3],name='input')
convnet=conv_2d(convnet,32,5,activation='relu')
max_1=max_pool_2d(convnet,5)
convnet=conv_2d(max_1,64,5,activation='relu')
convnet=max_pool_2d(convnet,5)
convnet=conv_2d(convnet,32,5,activation='relu')
max_0=max_pool_2d(convnet,5)
convnet=fully_connected(max_0,128,activation='relu')
convnet=dropout(convnet,0.4)
convnet=fully_connected(convnet,6,activation='softmax')
convnet=regression(convnet,optimizer='adam',learning_rate=0.005,loss='categorical_crossentropy',name='ScreenshotClassifier')
model=tflearn.DNN(convnet,tensorboard_dir='log',tensorboard_verbose=3)
model.fit(X_train,y_train, n_epoch=20,validation_set=(X_test,y_test), snapshot_step=20,show_metric=True,
run_id='ScreenshotClassifier',callbacks=[PlottingCallback(model, X_test, (max_0))])
print("Saving the model")
model.save('model.tflearn')
del X_train
del y_train
del X_test
del y_test
#testing here
bag=generate_training_data("test2")
random.shuffle(bag)
i=0
data=[]
labels=[]
print("Getting test data..")
for i in range(len(bag)): #sepearting features and labels
data.append(bag[i][0]) #just images for test data, no labels
del bag
i=0
X=[]
print("Resizing images")
with tqdm(total=len(data)) as p1bar:
for i in range(len(data)):
#if i>=90:
# break
x=np.array(transform.resize(data[i],[50,50,3]),dtype='float32')
X.append(x)
p1bar.update(1)
X_test=X #for feeding to NN for predicting label
real_data=copy.deepcopy(X_test) #for displaying images in testing
j=len(X_test)
m=j
s=0
ot=0
fb=0
st=0
cod=0
gm=0
yt=0
imgpath="/home/asim/Desktop/Screenshots" #base path of all classes' folders
print("Predicting on test set..")
'''
observed = [max_0,max_1,max_2,max_3]
observers = [tflearn.DNN(v, session=model.session) for v in observed]
outputs = [m.predict(X_test) for m in observers]
print([d.shape for d in outputs])
kernel=1
for i in [5,55,92,149,240]:
#i+30
plt.imshow(outputs[0][i, :, :, kernel])
plt.show()
'''
'''
observed = [max_0,max_1,max_2]
observers = [tflearn.DNN(v, session=model.session) for v in observed]
outputs = [m.predict(X_test) for m in observers]
kernels=10
for i in range(1):
plt.figure(frameon=False)
plt.subplots_adjust(wspace=0.1, hspace=0.1)
ix = 1
for o in outputs:
for kernel in range(kernels):
plt.subplot(len(outputs),kernels, ix)
plt.imshow(o[0, :, :, kernel])
plt.axis('off')
ix += 1
plt.savefig('outputs-for-image:%i-at-epoch:%i.png'
% (i, training_state.epoch))
'''
#plt.savefig('output.png')
'''
for o in outputs:
for kernel in range(kernels):
plt.imshow(o[i, :, :, kernel])
plt.axis('off')
ix += 1
'''
remove_files(imgpath+"/fb")
remove_files(imgpath+"/yt")
remove_files(imgpath+"/gmail")
remove_files(imgpath+"/stack")
remove_files(imgpath+"/code")
remove_files(imgpath+"/others")
with tqdm(total=m) as p1bar:
while j>=0:
num_of_pics_in_graph=16
fig=plt.figure(figsize=(num_of_pics_in_graph,12))
i=0
for r in (X_test):
ts = time.time()
img_data=r
y=fig.add_subplot(4,4,i+1)
orig=img_data
model_out=model.predict([img_data])
model_out=model.predict([img_data])[0]
if np.argmax(model_out) ==0:
if os.path.exists(imgpath+"/fb"):
str_label='FB'
ts=str(ts)
ts=ts.replace('.','_')
p="/fb/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
else:
os.makedirs(imgpath+"/fb")
str_label='FB'
ts=str(ts)
ts=ts.replace('.','_')
p="/fb/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
fb+=1
elif np.argmax(model_out) ==1:
if os.path.exists(imgpath+"/yt"):
str_label='YT'
ts=str(ts)
ts=ts.replace('.','_')
p="/yt/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
else:
os.makedirs(imgpath+"/yt")
str_label='YT'
ts=str(ts)
ts=ts.replace('.','_')
p="/yt/".format(ts)+".png"
imageio.imwrite(imgpath+p, data[s])
yt+=1
elif np.argmax(model_out) ==2:
if os.path.exists(imgpath+"/stack"):
str_label='Stack'
ts=str(ts)
ts=ts.replace('.','_')
p="/stack/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
else:
os.makedirs(imgpath+"/stack")
str_label='Stack'
ts=str(ts)
ts=ts.replace('.','_')
p="/stack/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
st+=1
elif np.argmax(model_out) ==3:
if os.path.exists(imgpath+"/gmail"):
str_label='Gmail'
ts=str(ts)
ts=ts.replace('.','_')
p="/gmail/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
else:
os.makedirs(imgpath+"/gmail")
str_label='Gmail'
ts=str(ts)
ts=ts.replace('.','_')
p="/gmail/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
gm+=1
elif np.argmax(model_out) ==4:
if os.path.exists(imgpath+"/code"):
str_label='Code'
ts=str(ts)
ts=ts.replace('.','_')
p="/code/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
else:
os.makedirs(imgpath+"/gmail")
str_label='Gmail'
ts=str(ts)
ts=ts.replace('.','_')
p="/gmail/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
cod+=1
else:
if os.path.exists(imgpath+"/others"):
str_label='Others'
ts=str(ts)
ts=ts.replace('.','_')
p="/others/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
else:
os.makedirs(imgpath+"/others")
str_label='Others'
ts=str(ts)
ts=ts.replace('.','_')
p="/others/"+ts+".png"
imageio.imwrite(imgpath+p, data[s])
ot+=1
y.imshow(data[s]) #showing real image on figure
plt.title(str_label)
y.axes.get_xaxis().set_visible(False)
y.axes.get_yaxis().set_visible(False)
i+=1
j-=1
s+=1
p1bar.update(1)
if(j<=0):
j=-2
break
if(i>=15): #cause plt figure size
break
if(j>=0):
X_test=copy.deepcopy(X_test[15:])
#plt.show()
remove_files(imgpath+"/test")
labels = 'FB', 'YT', 'Code', 'Stack','Gmail','Others'
sizes = [fb,yt,cod,st,gm,ot]
colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue','brown','red']
explode = (0,0,0,0,0,0) # explode 1st slice
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
#plt.show()
| [
"tflearn.DNN",
"tflearn.layers.core.input_data",
"copy.deepcopy",
"os.remove",
"matplotlib.pyplot.imshow",
"os.path.exists",
"tflearn.layers.core.dropout",
"tflearn.layers.conv.max_pool_2d",
"scipy.misc.imread",
"matplotlib.pyplot.axis",
"tflearn.layers.estimator.regression",
"glob.glob",
"m... | [((3496, 3516), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3510, 3516), False, 'import os\n'), ((5971, 5990), 'random.shuffle', 'random.shuffle', (['bag'], {}), '(bag)\n', (5985, 5990), False, 'import random\n'), ((6521, 6542), 'copy.deepcopy', 'copy.deepcopy', (['X_test'], {}), '(X_test)\n', (6534, 6542), False, 'import copy\n'), ((12736, 12850), 'matplotlib.pyplot.pie', 'plt.pie', (['sizes'], {'explode': 'explode', 'labels': 'labels', 'colors': 'colors', 'autopct': '"""%1.1f%%"""', 'shadow': '(True)', 'startangle': '(140)'}), "(sizes, explode=explode, labels=labels, colors=colors, autopct=\n '%1.1f%%', shadow=True, startangle=140)\n", (12743, 12850), True, 'import matplotlib.pyplot as plt\n'), ((12855, 12872), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (12863, 12872), True, 'import matplotlib.pyplot as plt\n'), ((3290, 3320), 'glob.glob', 'glob.glob', (["(toremove + '/*.png')"], {}), "(toremove + '/*.png')\n", (3299, 3320), False, 'import glob\n'), ((3555, 3579), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3577, 3579), True, 'import tensorflow as tf\n'), ((3592, 3641), 'tflearn.layers.core.input_data', 'input_data', ([], {'shape': '[None, 50, 50, 3]', 'name': '"""input"""'}), "(shape=[None, 50, 50, 3], name='input')\n", (3602, 3641), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((3650, 3692), 'tflearn.layers.conv.conv_2d', 'conv_2d', (['convnet', '(32)', '(5)'], {'activation': '"""relu"""'}), "(convnet, 32, 5, activation='relu')\n", (3657, 3692), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((3702, 3725), 'tflearn.layers.conv.max_pool_2d', 'max_pool_2d', (['convnet', '(5)'], {}), '(convnet, 5)\n', (3713, 3725), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((3735, 3777), 'tflearn.layers.conv.conv_2d', 'conv_2d', (['convnet', '(64)', '(5)'], {'activation': '"""relu"""'}), "(convnet, 64, 5, activation='relu')\n", (3742, 3777), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((3785, 3806), 'tflearn.layers.conv.max_pool_2d', 'max_pool_2d', (['max_0', '(5)'], {}), '(max_0, 5)\n', (3796, 3806), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((3818, 3858), 'tflearn.layers.conv.conv_2d', 'conv_2d', (['max_1', '(32)', '(5)'], {'activation': '"""relu"""'}), "(max_1, 32, 5, activation='relu')\n", (3825, 3858), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((3868, 3891), 'tflearn.layers.conv.max_pool_2d', 'max_pool_2d', (['convnet', '(5)'], {}), '(convnet, 5)\n', (3879, 3891), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((3901, 3949), 'tflearn.layers.core.fully_connected', 'fully_connected', (['convnet', '(128)'], {'activation': '"""relu"""'}), "(convnet, 128, activation='relu')\n", (3916, 3949), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((3960, 3979), 'tflearn.layers.core.dropout', 'dropout', (['max_2', '(0.4)'], {}), '(max_2, 0.4)\n', (3967, 3979), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((3989, 4038), 'tflearn.layers.core.fully_connected', 'fully_connected', (['convnet', '(6)'], {'activation': '"""softmax"""'}), "(convnet, 6, activation='softmax')\n", (4004, 4038), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((4049, 4172), 'tflearn.layers.estimator.regression', 'regression', (['max_3'], {'optimizer': '"""adam"""', 'learning_rate': '(0.005)', 'loss': '"""categorical_crossentropy"""', 'name': '"""ScreenshotClassifier"""'}), "(max_3, optimizer='adam', learning_rate=0.005, loss=\n 'categorical_crossentropy', name='ScreenshotClassifier')\n", (4059, 4172), False, 'from tflearn.layers.estimator import regression\n'), ((4174, 4240), 'tflearn.DNN', 'tflearn.DNN', (['convnet'], {'tensorboard_dir': '"""log"""', 'tensorboard_verbose': '(3)'}), "(convnet, tensorboard_dir='log', tensorboard_verbose=3)\n", (4185, 4240), False, 'import tflearn\n'), ((4329, 4348), 'random.shuffle', 'random.shuffle', (['bag'], {}), '(bag)\n', (4343, 4348), False, 'import random\n'), ((4839, 4901), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['data', 'labels'], {'test_size': '(0.1)'}), '(data, labels, test_size=0.1)\n', (4872, 4901), False, 'from sklearn import preprocessing, cross_validation\n'), ((4905, 4929), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4927, 4929), True, 'import tensorflow as tf\n'), ((4942, 4991), 'tflearn.layers.core.input_data', 'input_data', ([], {'shape': '[None, 50, 50, 3]', 'name': '"""input"""'}), "(shape=[None, 50, 50, 3], name='input')\n", (4952, 4991), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((5000, 5042), 'tflearn.layers.conv.conv_2d', 'conv_2d', (['convnet', '(32)', '(5)'], {'activation': '"""relu"""'}), "(convnet, 32, 5, activation='relu')\n", (5007, 5042), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((5050, 5073), 'tflearn.layers.conv.max_pool_2d', 'max_pool_2d', (['convnet', '(5)'], {}), '(convnet, 5)\n', (5061, 5073), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((5085, 5125), 'tflearn.layers.conv.conv_2d', 'conv_2d', (['max_1', '(64)', '(5)'], {'activation': '"""relu"""'}), "(max_1, 64, 5, activation='relu')\n", (5092, 5125), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((5135, 5158), 'tflearn.layers.conv.max_pool_2d', 'max_pool_2d', (['convnet', '(5)'], {}), '(convnet, 5)\n', (5146, 5158), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((5171, 5213), 'tflearn.layers.conv.conv_2d', 'conv_2d', (['convnet', '(32)', '(5)'], {'activation': '"""relu"""'}), "(convnet, 32, 5, activation='relu')\n", (5178, 5213), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((5221, 5244), 'tflearn.layers.conv.max_pool_2d', 'max_pool_2d', (['convnet', '(5)'], {}), '(convnet, 5)\n', (5232, 5244), False, 'from tflearn.layers.conv import conv_2d, max_pool_2d\n'), ((5257, 5303), 'tflearn.layers.core.fully_connected', 'fully_connected', (['max_0', '(128)'], {'activation': '"""relu"""'}), "(max_0, 128, activation='relu')\n", (5272, 5303), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((5314, 5335), 'tflearn.layers.core.dropout', 'dropout', (['convnet', '(0.4)'], {}), '(convnet, 0.4)\n', (5321, 5335), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((5347, 5396), 'tflearn.layers.core.fully_connected', 'fully_connected', (['convnet', '(6)'], {'activation': '"""softmax"""'}), "(convnet, 6, activation='softmax')\n", (5362, 5396), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((5407, 5532), 'tflearn.layers.estimator.regression', 'regression', (['convnet'], {'optimizer': '"""adam"""', 'learning_rate': '(0.005)', 'loss': '"""categorical_crossentropy"""', 'name': '"""ScreenshotClassifier"""'}), "(convnet, optimizer='adam', learning_rate=0.005, loss=\n 'categorical_crossentropy', name='ScreenshotClassifier')\n", (5417, 5532), False, 'from tflearn.layers.estimator import regression\n'), ((5534, 5600), 'tflearn.DNN', 'tflearn.DNN', (['convnet'], {'tensorboard_dir': '"""log"""', 'tensorboard_verbose': '(3)'}), "(convnet, tensorboard_dir='log', tensorboard_verbose=3)\n", (5545, 5600), False, 'import tflearn\n'), ((7991, 8004), 'tqdm.tqdm', 'tqdm', ([], {'total': 'm'}), '(total=m)\n', (7995, 8004), False, 'from tqdm import tqdm\n'), ((2118, 2146), 'glob.glob', 'glob.glob', (["(folder + '/*.png')"], {}), "(folder + '/*.png')\n", (2127, 2146), False, 'import glob\n'), ((8075, 8121), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(num_of_pics_in_graph, 12)'}), '(figsize=(num_of_pics_in_graph, 12))\n', (8085, 8121), True, 'import matplotlib.pyplot as plt\n'), ((1154, 1168), 'tflearn.DNN', 'tflearn.DNN', (['l'], {}), '(l)\n', (1165, 1168), False, 'import tflearn\n'), ((1354, 1379), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (1364, 1379), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1435), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.1)', 'hspace': '(0.1)'}), '(wspace=0.1, hspace=0.1)\n', (1411, 1435), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1806), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('outputs-for-image:%i-at-epoch:%i.png' % (i, training_state.epoch))"], {}), "('outputs-for-image:%i-at-epoch:%i.png' % (i, training_state.epoch))\n", (1738, 1806), True, 'import matplotlib.pyplot as plt\n'), ((3451, 3463), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (3460, 3463), False, 'import os\n'), ((6345, 6383), 'skimage.transform.resize', 'transform.resize', (['data[i]', '[50, 50, 3]'], {}), '(data[i], [50, 50, 3])\n', (6361, 6383), False, 'from skimage import transform\n'), ((8177, 8188), 'time.time', 'time.time', ([], {}), '()\n', (8186, 8188), False, 'import time\n'), ((12066, 12086), 'matplotlib.pyplot.title', 'plt.title', (['str_label'], {}), '(str_label)\n', (12075, 12086), True, 'import matplotlib.pyplot as plt\n'), ((12443, 12469), 'copy.deepcopy', 'copy.deepcopy', (['X_test[15:]'], {}), '(X_test[15:])\n', (12456, 12469), False, 'import copy\n'), ((2314, 2329), 'scipy.misc.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2324, 2329), True, 'from scipy import misc as cv2\n'), ((4672, 4710), 'skimage.transform.resize', 'transform.resize', (['data[i]', '[50, 50, 3]'], {}), '(data[i], [50, 50, 3])\n', (4688, 4710), False, 'from skimage import transform\n'), ((8391, 8411), 'numpy.argmax', 'np.argmax', (['model_out'], {}), '(model_out)\n', (8400, 8411), True, 'import numpy as np\n'), ((8436, 8467), 'os.path.exists', 'os.path.exists', (["(imgpath + '/fb')"], {}), "(imgpath + '/fb')\n", (8450, 8467), False, 'import os\n'), ((1620, 1650), 'matplotlib.pyplot.imshow', 'plt.imshow', (['o[i, :, :, kernel]'], {}), '(o[i, :, :, kernel])\n', (1630, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1686), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1679, 1686), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2089), 'glob.glob', 'glob.glob', (["(folder + '/*.png')"], {}), "(folder + '/*.png')\n", (2070, 2089), False, 'import glob\n'), ((2434, 2449), 'scipy.misc.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2444, 2449), True, 'from scipy import misc as cv2\n'), ((8635, 8672), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (8650, 8672), False, 'import imageio\n'), ((8713, 8741), 'os.makedirs', 'os.makedirs', (["(imgpath + '/fb')"], {}), "(imgpath + '/fb')\n", (8724, 8741), False, 'import os\n'), ((8908, 8945), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (8923, 8945), False, 'import imageio\n'), ((8983, 9003), 'numpy.argmax', 'np.argmax', (['model_out'], {}), '(model_out)\n', (8992, 9003), True, 'import numpy as np\n'), ((9028, 9059), 'os.path.exists', 'os.path.exists', (["(imgpath + '/yt')"], {}), "(imgpath + '/yt')\n", (9042, 9059), False, 'import os\n'), ((2557, 2572), 'scipy.misc.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2567, 2572), True, 'from scipy import misc as cv2\n'), ((9227, 9264), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (9242, 9264), False, 'import imageio\n'), ((9305, 9333), 'os.makedirs', 'os.makedirs', (["(imgpath + '/yt')"], {}), "(imgpath + '/yt')\n", (9316, 9333), False, 'import os\n'), ((9508, 9545), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (9523, 9545), False, 'import imageio\n'), ((9583, 9603), 'numpy.argmax', 'np.argmax', (['model_out'], {}), '(model_out)\n', (9592, 9603), True, 'import numpy as np\n'), ((9628, 9662), 'os.path.exists', 'os.path.exists', (["(imgpath + '/stack')"], {}), "(imgpath + '/stack')\n", (9642, 9662), False, 'import os\n'), ((2680, 2695), 'scipy.misc.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2690, 2695), True, 'from scipy import misc as cv2\n'), ((9836, 9873), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (9851, 9873), False, 'import imageio\n'), ((9914, 9945), 'os.makedirs', 'os.makedirs', (["(imgpath + '/stack')"], {}), "(imgpath + '/stack')\n", (9925, 9945), False, 'import os\n'), ((10118, 10155), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (10133, 10155), False, 'import imageio\n'), ((10193, 10213), 'numpy.argmax', 'np.argmax', (['model_out'], {}), '(model_out)\n', (10202, 10213), True, 'import numpy as np\n'), ((10238, 10272), 'os.path.exists', 'os.path.exists', (["(imgpath + '/gmail')"], {}), "(imgpath + '/gmail')\n", (10252, 10272), False, 'import os\n'), ((2802, 2817), 'scipy.misc.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2812, 2817), True, 'from scipy import misc as cv2\n'), ((10446, 10483), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (10461, 10483), False, 'import imageio\n'), ((10524, 10555), 'os.makedirs', 'os.makedirs', (["(imgpath + '/gmail')"], {}), "(imgpath + '/gmail')\n", (10535, 10555), False, 'import os\n'), ((10728, 10765), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (10743, 10765), False, 'import imageio\n'), ((10803, 10823), 'numpy.argmax', 'np.argmax', (['model_out'], {}), '(model_out)\n', (10812, 10823), True, 'import numpy as np\n'), ((10848, 10881), 'os.path.exists', 'os.path.exists', (["(imgpath + '/code')"], {}), "(imgpath + '/code')\n", (10862, 10881), False, 'import os\n'), ((11431, 11466), 'os.path.exists', 'os.path.exists', (["(imgpath + '/others')"], {}), "(imgpath + '/others')\n", (11445, 11466), False, 'import os\n'), ((2926, 2941), 'scipy.misc.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2936, 2941), True, 'from scipy import misc as cv2\n'), ((2992, 3007), 'scipy.misc.imread', 'cv2.imread', (['img'], {}), '(img)\n', (3002, 3007), True, 'from scipy import misc as cv2\n'), ((11053, 11090), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (11068, 11090), False, 'import imageio\n'), ((11131, 11162), 'os.makedirs', 'os.makedirs', (["(imgpath + '/gmail')"], {}), "(imgpath + '/gmail')\n", (11142, 11162), False, 'import os\n'), ((11335, 11372), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (11350, 11372), False, 'import imageio\n'), ((11642, 11679), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (11657, 11679), False, 'import imageio\n'), ((11720, 11752), 'os.makedirs', 'os.makedirs', (["(imgpath + '/others')"], {}), "(imgpath + '/others')\n", (11731, 11752), False, 'import os\n'), ((11927, 11964), 'imageio.imwrite', 'imageio.imwrite', (['(imgpath + p)', 'data[s]'], {}), '(imgpath + p, data[s])\n', (11942, 11964), False, 'import imageio\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
TridentNet Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import argparse
import os
import sys
import torch
import tqdm
import cv2
import numpy as np
sys.path.append('detectron2')
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from utils.utils import mkdir, save_features
from utils.extract_utils import get_image_blob
from models import add_config
from models.bua.layers.nms import nms
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection2 Inference")
parser.add_argument(
"--config-file",
default="configs/bua-caffe/extract-bua-caffe-r101.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--mode", default="caffe", type=str, help="bua_caffe, ...")
parser.add_argument('--out-dir', dest='output_dir',
help='output directory for features',
default="features")
parser.add_argument('--image-dir', dest='image_dir',
help='directory with images',
default="image")
parser.add_argument(
"--resume",
action="store_true",
help="whether to attempt to resume from the checkpoint directory",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg = setup(args)
MIN_BOXES = 10
MAX_BOXES = 100
CONF_THRESH = 0.2
model = DefaultTrainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
# Extract features.
imglist = os.listdir(args.image_dir)
num_images = len(imglist)
print('Number of images: {}.'.format(num_images))
model.eval()
for im_file in tqdm.tqdm(imglist):
im = cv2.imread(os.path.join(args.image_dir, im_file))
dataset_dict = get_image_blob(im)
with torch.set_grad_enabled(False):
# boxes, scores, features_pooled = model([dataset_dict])
if cfg.MODEL.BUA.ATTRIBUTE_ON:
boxes, scores, features_pooled, attr_scores = model([dataset_dict])
else:
boxes, scores, features_pooled = model([dataset_dict])
dets = boxes[0].tensor.cpu() / dataset_dict['im_scale']
scores = scores[0].cpu()
feats = features_pooled[0].cpu()
max_conf = torch.zeros((scores.shape[0])).to(scores.device)
for cls_ind in range(1, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.3)
max_conf[keep] = torch.where(cls_scores[keep] > max_conf[keep],
cls_scores[keep],
max_conf[keep])
keep_boxes = torch.nonzero(max_conf >= CONF_THRESH).flatten()
if len(keep_boxes) < MIN_BOXES:
keep_boxes = torch.argsort(max_conf, descending=True)[:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = torch.argsort(max_conf, descending=True)[:MAX_BOXES]
image_feat = feats[keep_boxes]
image_bboxes = dets[keep_boxes]
image_objects_conf = np.max(scores[keep_boxes].numpy(), axis=1)
image_objects = np.argmax(scores[keep_boxes].numpy(), axis=1)
if cfg.MODEL.BUA.ATTRIBUTE_ON:
attr_scores = attr_scores[0].cpu()
image_attrs_conf = np.max(attr_scores[keep_boxes].numpy(), axis=1)
image_attrs = np.argmax(attr_scores[keep_boxes].numpy(), axis=1)
info = {
'image_id': im_file.split('.')[0],
'image_h': np.size(im, 0),
'image_w': np.size(im, 1),
'num_boxes': len(keep_boxes),
'objects_id': image_objects,
'objects_conf': image_objects_conf,
'attrs_id': image_attrs,
'attrs_conf': image_attrs_conf,
}
else:
info = {
'image_id': im_file.split('.')[0],
'image_h': np.size(im, 0),
'image_w': np.size(im, 1),
'num_boxes': len(keep_boxes),
'objects_id': image_objects,
'objects_conf': image_objects_conf
}
output_file = os.path.join(args.output_dir, im_file.split('.')[0])
np.savez_compressed(output_file, x=image_feat, bbox=image_bboxes, num_bbox=len(keep_boxes), image_h=np.size(im, 0), image_w=np.size(im, 1), info=info)
if __name__ == "__main__":
main()
| [
"utils.extract_utils.get_image_blob",
"os.listdir",
"detectron2.config.get_cfg",
"argparse.ArgumentParser",
"models.add_config",
"tqdm.tqdm",
"detectron2.engine.DefaultTrainer.build_model",
"os.path.join",
"numpy.size",
"detectron2.engine.default_setup",
"torch.nonzero",
"torch.argsort",
"de... | [((280, 309), 'sys.path.append', 'sys.path.append', (['"""detectron2"""'], {}), "('detectron2')\n", (295, 309), False, 'import sys\n'), ((910, 919), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (917, 919), False, 'from detectron2.config import get_cfg\n'), ((924, 945), 'models.add_config', 'add_config', (['args', 'cfg'], {}), '(args, cfg)\n', (934, 945), False, 'from models import add_config\n'), ((1044, 1068), 'detectron2.engine.default_setup', 'default_setup', (['cfg', 'args'], {}), '(cfg, args)\n', (1057, 1068), False, 'from detectron2.engine import DefaultTrainer, default_setup, launch\n'), ((1110, 1184), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Object Detection2 Inference"""'}), "(description='PyTorch Object Detection2 Inference')\n", (1133, 1184), False, 'import argparse\n'), ((2215, 2246), 'detectron2.engine.DefaultTrainer.build_model', 'DefaultTrainer.build_model', (['cfg'], {}), '(cfg)\n', (2241, 2246), False, 'from detectron2.engine import DefaultTrainer, default_setup, launch\n'), ((2411, 2437), 'os.listdir', 'os.listdir', (['args.image_dir'], {}), '(args.image_dir)\n', (2421, 2437), False, 'import os\n'), ((2559, 2577), 'tqdm.tqdm', 'tqdm.tqdm', (['imglist'], {}), '(imglist)\n', (2568, 2577), False, 'import tqdm\n'), ((2665, 2683), 'utils.extract_utils.get_image_blob', 'get_image_blob', (['im'], {}), '(im)\n', (2679, 2683), False, 'from utils.extract_utils import get_image_blob\n'), ((2251, 2304), 'detectron2.checkpoint.DetectionCheckpointer', 'DetectionCheckpointer', (['model'], {'save_dir': 'cfg.OUTPUT_DIR'}), '(model, save_dir=cfg.OUTPUT_DIR)\n', (2272, 2304), False, 'from detectron2.checkpoint import DetectionCheckpointer\n'), ((2603, 2640), 'os.path.join', 'os.path.join', (['args.image_dir', 'im_file'], {}), '(args.image_dir, im_file)\n', (2615, 2640), False, 'import os\n'), ((2698, 2727), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (2720, 2727), False, 'import torch\n'), ((3344, 3370), 'models.bua.layers.nms.nms', 'nms', (['dets', 'cls_scores', '(0.3)'], {}), '(dets, cls_scores, 0.3)\n', (3347, 3370), False, 'from models.bua.layers.nms import nms\n'), ((3404, 3489), 'torch.where', 'torch.where', (['(cls_scores[keep] > max_conf[keep])', 'cls_scores[keep]', 'max_conf[keep]'], {}), '(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep]\n )\n', (3415, 3489), False, 'import torch\n'), ((3174, 3202), 'torch.zeros', 'torch.zeros', (['scores.shape[0]'], {}), '(scores.shape[0])\n', (3185, 3202), False, 'import torch\n'), ((3609, 3647), 'torch.nonzero', 'torch.nonzero', (['(max_conf >= CONF_THRESH)'], {}), '(max_conf >= CONF_THRESH)\n', (3622, 3647), False, 'import torch\n'), ((3723, 3763), 'torch.argsort', 'torch.argsort', (['max_conf'], {'descending': '(True)'}), '(max_conf, descending=True)\n', (3736, 3763), False, 'import torch\n'), ((4450, 4464), 'numpy.size', 'np.size', (['im', '(0)'], {}), '(im, 0)\n', (4457, 4464), True, 'import numpy as np\n'), ((4489, 4503), 'numpy.size', 'np.size', (['im', '(1)'], {}), '(im, 1)\n', (4496, 4503), True, 'import numpy as np\n'), ((4836, 4850), 'numpy.size', 'np.size', (['im', '(0)'], {}), '(im, 0)\n', (4843, 4850), True, 'import numpy as np\n'), ((4875, 4889), 'numpy.size', 'np.size', (['im', '(1)'], {}), '(im, 1)\n', (4882, 4889), True, 'import numpy as np\n'), ((5219, 5233), 'numpy.size', 'np.size', (['im', '(0)'], {}), '(im, 0)\n', (5226, 5233), True, 'import numpy as np\n'), ((5243, 5257), 'numpy.size', 'np.size', (['im', '(1)'], {}), '(im, 1)\n', (5250, 5257), True, 'import numpy as np\n'), ((3843, 3883), 'torch.argsort', 'torch.argsort', (['max_conf'], {'descending': '(True)'}), '(max_conf, descending=True)\n', (3856, 3883), False, 'import torch\n')] |
#!/usr/local/bin/python3
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 13 05:32:46 2022
@author: josephDuque
"""
import sys, getopt
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from scipy import ndimage
# ----------------------------------------------------------
plt.rcParams.update({
"text.usetex": True,
"font.size" : 12,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
# for Palatino and other serif fonts use:
plt.rcParams.update({
"text.usetex": True,
"font.size" : 12,
"font.family": "serif",
"font.serif": ["Palatino"],
})
# ----------------------------------------------------------
#print("backend", plt.rcParams["backend"])
#plt.rcParams["backend"] = "TkAgg" # doesn't actually set the backend
#matplotlib.use("TkAgg")
print("backend", plt.rcParams["backend"])
#print("sys.float_info.dig = ", sys.float_info.dig)
#print("sys.float_info.mant_dig = ", sys.float_info.mant_dig)
lowEps = np.finfo(float).eps*np.float(100.0)
#print("lower precision limit=",lowEps)
# ----------------------------------------------------------
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('plotTrajectory.py -i <dataToPlotDile> -o <hardCopyFileName>');
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('plotTrajectory.py -i <dataToPlotDile> -o <hardCopyFileName>');
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print('File with data to plot <Input file> is "', inputfile)
print('Filename of hard copy file <Output file> is "', outputfile)
# --- Reading data
t0,posx,posy,posz,ux,uy,uz = \
np.loadtxt(open(inputfile,'rt').readlines()[:-1], delimiter='\t', skiprows=12, unpack=True);
# --- Creating plot
#fig = plt.figure()
#ax = fig.add_subplot(projection='3d')
#scatter = ax.scatter(posx,posy,posz,c=posz,cmap='viridis',alpha=0.75)
# legend1 = ax.legend(*scatter.legend_elements(),
# loc="upper left", title=r"$z-$Position of a Cyclotron Trajectory",fontsize=12)
#ax.add_artist(legend1)
fig = plt.figure(figsize=(6,4.5), dpi=144)
ax = Axes3D(fig);
line = plt.plot(posx,posy,posz,lw=0.2,c='k')[0]
ax.view_init(azim=44.5,elev=15.)
ax.grid(True)
ax.set_xlabel(r'$x-$Position',fontsize=12)
ax.set_ylabel(r'$y-$Position',fontsize=12);
ax.set_zlabel(r'$z-$Position',fontsize=12);
# string01 = '$\max(\phi) = ' + str(np.amax(col07)) + '$'
# string02 = '$\min(\phi) = ' + str(np.amin(col07)) + '$'
# ax.text(2, 80.0, r"$\Pi_1 = \frac{p\,\rho\,c_0^2}{\mu}$", color="k", fontsize=18)
# ax.text(2, 74.0, r"$\Pi_2 = \frac{p\,c_0}{h_0}$", color="k", fontsize=18)
# ax.text(2, 68.0, r"$\mu = 1.3e-2, \, \rho=1005.0$", color="k", fontsize=18)
# ax.text(2, 60.0, string01, color="k", fontsize=18)
# ax.text(2, 55.0, string02, color="k", fontsize=18)
plt.show()
# -----------------------------------------------------------------
# Main function call
if __name__ == "__main__":
if (len(sys.argv)>1):
main(sys.argv[1:]);
else:
print('Please provide input file to plot.')
print('plotTrajectory.py -i <dataToPlotDile> -o <hardCopyFileName>');
# End of main function call
# -----------------------------------------------------------------
| [
"getopt.getopt",
"numpy.float",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"sys.exit",
"numpy.finfo",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.show"
] | [((436, 562), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True, 'font.size': 12, 'font.family': 'sans-serif',\n 'font.sans-serif': ['Helvetica']}"], {}), "({'text.usetex': True, 'font.size': 12, 'font.family':\n 'sans-serif', 'font.sans-serif': ['Helvetica']})\n", (455, 562), True, 'import matplotlib.pyplot as plt\n'), ((620, 735), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True, 'font.size': 12, 'font.family': 'serif', 'font.serif':\n ['Palatino']}"], {}), "({'text.usetex': True, 'font.size': 12, 'font.family':\n 'serif', 'font.serif': ['Palatino']})\n", (639, 735), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1152), 'numpy.float', 'np.float', (['(100.0)'], {}), '(100.0)\n', (1145, 1152), True, 'import numpy as np\n'), ((2457, 2494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4.5)', 'dpi': '(144)'}), '(figsize=(6, 4.5), dpi=144)\n', (2467, 2494), True, 'import matplotlib.pyplot as plt\n'), ((2503, 2514), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (2509, 2514), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3271, 3281), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3279, 3281), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1132), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1125, 1132), True, 'import numpy as np\n'), ((1339, 1389), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hi:o:"""', "['ifile=', 'ofile=']"], {}), "(argv, 'hi:o:', ['ifile=', 'ofile='])\n", (1352, 1389), False, 'import sys, getopt\n'), ((2527, 2568), 'matplotlib.pyplot.plot', 'plt.plot', (['posx', 'posy', 'posz'], {'lw': '(0.2)', 'c': '"""k"""'}), "(posx, posy, posz, lw=0.2, c='k')\n", (2535, 2568), True, 'import matplotlib.pyplot as plt\n'), ((1502, 1513), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1510, 1513), False, 'import sys, getopt\n'), ((1653, 1663), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1661, 1663), False, 'import sys, getopt\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_inclinometers.ipynb (unless otherwise specified).
__all__ = ['construct_df_csv', 'drop_columns', 'update_type_date', 'rename_columns', 'add_column_up', 'df_multiindex',
'compute_average_A_B', 'compute_rotations', '__deformee__', 'column_integration', 'get_zero_measure_date',
'column_deformee_relative', 'run_computation_inklino', 'extract_df_datetube', 'build_fig_defo']
# Cell
import pandas as pd
import numpy
import io
import csv
from datetime import datetime, date
from google.colab import files
from copy import copy
from datetime import timedelta
from datetime import datetime
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
# Cell
def construct_df_csv(file_name):
return pd.read_csv(file_name, delimiter=';')
# Cell
def drop_columns(df):
to_drop = ['format_version',
'site',
# 'casing',
'latitude', 'longitude', 'type',
'direction', 'mode',
# 'runs',
'unit', 'step', 'length', 'azimuth',
'site_description', 'tube_description', 'os', 'manufacturer', 'model',
'type2', 'version', 'type3', 'serial', 'firmware', 'T', 'H', 'B', 'V',
'P', 'serial4', 'firmware5', 'hardware', 'unit6', 'factor',
'calibration', 'number', 'reference',
# 'date',
'delay', 'notes',
# 'type7',
'steps', 'number8',
# 'depth',
# 'A', 'B9',
# 'T10',
'H11', 'V12', 'S']
df = df.drop(to_drop, 1)
return df
# Cell
def update_type_date(df):
str_days = [date.split('T')[0] for date in df['date']] #On conserve uniqument les jours
dateFormatter = "%Y-%m-%d"
df['date'] = [datetime.strptime(day, dateFormatter) for day in str_days]
return df
# Cell
def rename_columns(df):
df = df.rename(columns={'B9': 'B',
'T10': 'temp',
'casing': 'tube'})
return df
# Cell
def add_column_up(df):
#valeur + <--> A1B1
df['info value'] = numpy.where(df['type7'] == 'A1B1', '+', '-')
df = df.drop(['type7','runs'], 1)
return df
# Cell
def df_multiindex(df):
df_pivot = df.pivot(index=['date','tube','depth'], columns=['info value'],
values=['A', 'B'])
return df_pivot
# Cell
def compute_average_A_B(df):
df[('A', 'average')] = (df['A']['+'] - df['A']['-']) / 2.
df[('B', 'average')] = (df['B']['+'] - df['B']['-']) / 2.
df = df.sort_index(axis=1)
return df
# Cell
def compute_rotations(df):
konstant_inklino = 20000.
df[('A','rotation')] = df[('A', 'average')] / konstant_inklino
df[('B','rotation')] = df[('B', 'average')] / konstant_inklino
df = df.sort_index(axis=1)
return df
# Cell
def __deformee__(list_incl):
"""
Retourne la liste du calcul de la déformée dans le sens (depth=0, depth=11.5)
"""
w = 0
list_w = []
for theta in list_incl[::-1]:
w += .5 * np.sin(theta)
list_w.append(w)
list_w.reverse()
return list_w
# Cell
def column_integration(df):
df = df.sort_index(axis=0)
# Pour s'assurer que les hauteurs coincident avec les valeurs de defo ajoutées
idx = pd.IndexSlice
# Parcourir pour les couple (date, tube)
elts_index = df.index.levels
dates, tubes = elts_index[0], elts_index[1]
for date in dates:
for tube in tubes:
df.loc[idx[date, tube], ('A','defo')] = __deformee__(df.loc[idx[date, tube], ('A','rotation')])
df.loc[idx[date, tube], ('B','defo')] = __deformee__(df.loc[idx[date, tube], ('B','rotation')])
df = df.sort_index(axis=1)
return df
# Cell
def get_zero_measure_date(dates, start_date):
"""
Given a start date returns the date of the measure closest in the past.
dates: list of sorted datetetime.dates
start_date: datetime.date
"""
null_date = min(dates)
for date in dates:
if date <= start_date:
null_date = date
return null_date
# Cell
def column_deformee_relative(df, start_date):
"""
Add a column "relative deformations". The reference value is the start_date
All measures before this date are deleted
"""
# Pour s'assurer que les hauteurs coincident avec les valeurs de defo ajoutées
# et évite les alertes de performance
df = df.sort_index(axis=0)
idx = pd.IndexSlice
elts_index = df.index.levels
dates, tubes = elts_index[0], elts_index[1]
# getting null measure date
date_0 = get_zero_measure_date(dates, start_date)
nullmessungen_A = {}
nullmessungen_B = {}
# setting relative displacements columns
# A priori la df sera toujours à deux tubes I1 et I2 mais permet de prevenir les cp
for tube in tubes:
nullmessungen_A[tube] = numpy.array(df.loc[idx[date_0, tube], ('A','defo')])
nullmessungen_B[tube] = numpy.array(df.loc[idx[date_0, tube], ('B','defo')])
for date in dates:
for tube in tubes:
df.loc[idx[date, tube], ('A','defo relat')] = numpy.array(df.loc[idx[date, tube], ('A','defo')]) - nullmessungen_A[tube]
df.loc[idx[date, tube], ('B','defo relat')] = numpy.array(df.loc[idx[date, tube], ('B','defo')]) - nullmessungen_B[tube]
# Remove measures before null measure
df = df.truncate(before=date_0)
return df
# Cell
def run_computation_inklino(inklino_file_name, start_date):
"""
Fonction de computation des mesures inclino
"""
# Importation sous df du fichier
df_inklino = construct_df_csv(inklino_file_name)
# Fonctions de traitement
df_inklino = drop_columns(df_inklino)
df_inklino = update_type_date(df_inklino)
df_inklino = rename_columns(df_inklino)
df_inklino = add_column_up(df_inklino)
# Fonction pivot
df_inklino = df_multiindex(df_inklino)
# compute rotations and deformations
df_inklino = compute_average_A_B(df_inklino)
df_inklino = compute_rotations(df_inklino)
# Add relative deformations
df_inklino = column_integration(df_inklino)
df_inklino = column_deformee_relative(df_inklino, start_date)
return df_inklino
# Cell
def extract_df_datetube(df, date, tube):
idx = pd.IndexSlice
return df.loc[idx[date, tube], :]
# Cell
def build_fig_defo(df, lettre='A'):
fig = go.Figure()
elts_index = df.index.levels
dates, _ = elts_index[0], elts_index[1]
x_min, x_max = 0, 0
# Add traces, one for each slider step
for date in dates:
df_1 = extract_df_datetube(df, date, 'I1')
df_2 = extract_df_datetube(df, date, 'I2')
display_date = date.strftime("%d/%m/%Y")
fig.add_trace(
go.Scatter(
visible=False,
mode='lines+markers',
line=dict(color="#D32F2F", width=1),
name="Unverklebt",
x=df_1[(lettre,'defo relat')],
y=df_1.index # depth
))
fig.add_trace(
go.Scatter(
visible=False,
mode='lines+markers',
line=dict(color="#4DD0E1", width=1),
name="Vorveklebt",
x=df_2[(lettre,'defo relat')],
y=df_2.index # depth
))
# Paramètres d'affichage
x_min = min(min(df_1[(lettre,'defo relat')]),
min(df_2[(lettre,'defo relat')]),
x_min
)
x_max = max(max(df_1[(lettre,'defo relat')]),
max(df_2[(lettre,'defo relat')]),
x_max
)
x_max = max(x_max, abs(x_min))
# Make 1st trace visible
fig.data[0].visible = True
fig.data[1].visible = True
# fig data est une liste de plot / on souhaite tracer les graphs 2 à 2
# Create and add slider
steps = []
for i in range(len(fig.data)//2):
date_essai = dates[i]
display_date = date_essai.strftime("%d/%m/%Y")
step = dict(
method="update",
args=[{"visible": [False] * len(fig.data)},
{"title": "Datum: " + display_date}], # layout attribute
)
step["args"][0]["visible"][2*i] = True # Toggle i'th trace to "visible"
step["args"][0]["visible"][2*i+1] = True
steps.append(step)
# step = {'method': 'update', 'args': [{'visible': [True, False, False, False]},
# {'title': 'Slider switched to date: 0'}]}
sliders = [dict(
active=10,
currentvalue={"prefix": "Step: "},
pad={"t": 50}, # Espace slider graph
steps=steps
)]
# Mise en page
fig.update_layout(
width=600,
height=700,
sliders=sliders,
margin=dict(l=40, r=30, t=30, b=30),
template='none',
title_font=dict(family="Rockwell", size=18),
legend=dict(
orientation="h",
yanchor="top",
y=0.93,
xanchor="center",
x=0.5
),
)
fig.update_xaxes(
domain=(0.2, 0.8),
range=[-x_max*1.1, x_max*1.1],
title='Bewegung ' + lettre + ' [m]'
)
fig.update_yaxes(
domain=(0, 0.8),
range=[12, 0],
title='Tiefe [m]'
)
return fig | [
"pandas.read_csv",
"numpy.where",
"datetime.datetime.strptime",
"datetime.date.split",
"plotly.graph_objects.Figure",
"numpy.array",
"numpy.sin",
"datetime.date.strftime"
] | [((790, 827), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'delimiter': '""";"""'}), "(file_name, delimiter=';')\n", (801, 827), True, 'import pandas as pd\n'), ((2134, 2178), 'numpy.where', 'numpy.where', (["(df['type7'] == 'A1B1')", '"""+"""', '"""-"""'], {}), "(df['type7'] == 'A1B1', '+', '-')\n", (2145, 2178), False, 'import numpy\n'), ((6207, 6218), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (6216, 6218), True, 'import plotly.graph_objects as go\n'), ((1821, 1858), 'datetime.datetime.strptime', 'datetime.strptime', (['day', 'dateFormatter'], {}), '(day, dateFormatter)\n', (1838, 1858), False, 'from datetime import datetime\n'), ((4760, 4813), 'numpy.array', 'numpy.array', (["df.loc[idx[date_0, tube], ('A', 'defo')]"], {}), "(df.loc[idx[date_0, tube], ('A', 'defo')])\n", (4771, 4813), False, 'import numpy\n'), ((4841, 4894), 'numpy.array', 'numpy.array', (["df.loc[idx[date_0, tube], ('B', 'defo')]"], {}), "(df.loc[idx[date_0, tube], ('B', 'defo')])\n", (4852, 4894), False, 'import numpy\n'), ((6492, 6517), 'datetime.date.strftime', 'date.strftime', (['"""%d/%m/%Y"""'], {}), "('%d/%m/%Y')\n", (6505, 6517), False, 'from datetime import datetime, date\n'), ((1698, 1713), 'datetime.date.split', 'date.split', (['"""T"""'], {}), "('T')\n", (1708, 1713), False, 'from datetime import datetime, date\n'), ((3030, 3043), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3036, 3043), True, 'import numpy as np\n'), ((4991, 5042), 'numpy.array', 'numpy.array', (["df.loc[idx[date, tube], ('A', 'defo')]"], {}), "(df.loc[idx[date, tube], ('A', 'defo')])\n", (5002, 5042), False, 'import numpy\n'), ((5118, 5169), 'numpy.array', 'numpy.array', (["df.loc[idx[date, tube], ('B', 'defo')]"], {}), "(df.loc[idx[date, tube], ('B', 'defo')])\n", (5129, 5169), False, 'import numpy\n')] |
import unittest
import numpy as np
import SimpleITK as sitk
import pymia.filtering.filter as pymia_fltr
import pymia.filtering.preprocessing as pymia_fltr_prep
class TestNormalizeZScore(unittest.TestCase):
def setUp(self):
# set up image
image = sitk.Image((4, 1), sitk.sitkUInt8)
image.SetPixel((0, 0), 1)
image.SetPixel((1, 0), 2)
image.SetPixel((2, 0), 3)
image.SetPixel((3, 0), 4)
self.image = image
# test_case = [1, 2, 3, 4]
# not in R, so tested by using:
# (test_case[i] - mean(test_case, axis=0)) / sqrt(var(test_case) * 3/4)
self.desired = np.array([[-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999]], np.float64)
def test_normalization(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image)
out_arr = sitk.GetArrayFromImage(out)
np.testing.assert_array_almost_equal(self.desired, out_arr, decimal=12)
def test_normalization_with_param(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image, pymia_fltr.FilterParams())
out_arr = sitk.GetArrayFromImage(out)
np.testing.assert_array_almost_equal(self.desired, out_arr, decimal=12)
def test_image_properties(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image)
self.assertEqual(self.image.GetSize(), out.GetSize())
self.assertEqual(self.image.GetOrigin(), out.GetOrigin())
self.assertEqual(self.image.GetSpacing(), out.GetSpacing())
self.assertEqual(self.image.GetDirection(), out.GetDirection())
self.assertEqual(self.image.GetDimension(), out.GetDimension())
self.assertEqual(self.image.GetNumberOfComponentsPerPixel(), out.GetNumberOfComponentsPerPixel())
self.assertEqual(sitk.sitkFloat64, out.GetPixelID())
| [
"numpy.testing.assert_array_almost_equal",
"SimpleITK.Image",
"SimpleITK.GetArrayFromImage",
"numpy.array",
"pymia.filtering.preprocessing.NormalizeZScore",
"pymia.filtering.filter.FilterParams"
] | [((271, 305), 'SimpleITK.Image', 'sitk.Image', (['(4, 1)', 'sitk.sitkUInt8'], {}), '((4, 1), sitk.sitkUInt8)\n', (281, 305), True, 'import SimpleITK as sitk\n'), ((652, 753), 'numpy.array', 'np.array', (['[[-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999]]', 'np.float64'], {}), '([[-1.3416407864999, -0.44721359549996, 0.44721359549996, \n 1.3416407864999]], np.float64)\n', (660, 753), True, 'import numpy as np\n'), ((798, 831), 'pymia.filtering.preprocessing.NormalizeZScore', 'pymia_fltr_prep.NormalizeZScore', ([], {}), '()\n', (829, 831), True, 'import pymia.filtering.preprocessing as pymia_fltr_prep\n'), ((888, 915), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['out'], {}), '(out)\n', (910, 915), True, 'import SimpleITK as sitk\n'), ((925, 996), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['self.desired', 'out_arr'], {'decimal': '(12)'}), '(self.desired, out_arr, decimal=12)\n', (961, 996), True, 'import numpy as np\n'), ((1057, 1090), 'pymia.filtering.preprocessing.NormalizeZScore', 'pymia_fltr_prep.NormalizeZScore', ([], {}), '()\n', (1088, 1090), True, 'import pymia.filtering.preprocessing as pymia_fltr_prep\n'), ((1174, 1201), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['out'], {}), '(out)\n', (1196, 1201), True, 'import SimpleITK as sitk\n'), ((1211, 1282), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['self.desired', 'out_arr'], {'decimal': '(12)'}), '(self.desired, out_arr, decimal=12)\n', (1247, 1282), True, 'import numpy as np\n'), ((1335, 1368), 'pymia.filtering.preprocessing.NormalizeZScore', 'pymia_fltr_prep.NormalizeZScore', ([], {}), '()\n', (1366, 1368), True, 'import pymia.filtering.preprocessing as pymia_fltr_prep\n'), ((1129, 1154), 'pymia.filtering.filter.FilterParams', 'pymia_fltr.FilterParams', ([], {}), '()\n', (1152, 1154), True, 'import pymia.filtering.filter as pymia_fltr\n')] |
import pickle
import os
import numpy as np
class Params(object):
""" A simple dictionary that has its keys as attributes available. """
def __init__(self):
pass
def __str__(self):
s = ""
for name in sorted(self.__dict__.keys()):
s += "%-18s %s\n" % (name + ":", self.__dict__[name])
return s
def __repr__(self):
return self.__str__()
def save(path, var):
"""
Saves the variable ``var`` to the given path. The file format depends on the file extension.
List of supported file types:
- .pkl: pickle
- .npy: numpy
- .txt: text file, one element per line. ``var`` must be a string or list of strings.
"""
if path.endswith(".pkl"):
with open(path, 'wb') as f:
pickle.dump(var, f, 2)
elif path.endswith(".npy"):
np.save(path, var)
elif path.endswith(".txt"):
with open(path, 'w') as f:
if isinstance(var, basestring):
f.write(var)
else:
for i in var:
f.write(i)
f.write('\n')
else:
raise NotImplementedError("Unknown extension: " + os.path.splitext(path)[1])
def load(path):
"""
Loads the content of a file. It is mainly a convenience function to
avoid adding the ``open()`` contexts. File type detection is based on extensions.
Can handle the following types:
- .pkl: pickles
- .txt: text files, result is a list of strings ending whitespace removed
:param path: path to the file
"""
if path.endswith('.pkl'):
with open(path, 'rb') as f:
return pickle.load(f)
elif path.endswith('.txt'):
with open(path, 'r') as f:
return [x.rstrip('\n\r') for x in list(f)]
else:
raise NotImplementedError("Unknown extension: " + os.path.splitext(path)[1])
def ensuredir(path):
"""
Creates a folder if it doesn't exist.
:param path: path to the folder to create
"""
if not os.path.exists(path):
os.makedirs(path)
| [
"os.path.exists",
"pickle.dump",
"os.makedirs",
"pickle.load",
"os.path.splitext",
"numpy.save"
] | [((2105, 2125), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2119, 2125), False, 'import os\n'), ((2136, 2153), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2147, 2153), False, 'import os\n'), ((815, 837), 'pickle.dump', 'pickle.dump', (['var', 'f', '(2)'], {}), '(var, f, 2)\n', (826, 837), False, 'import pickle\n'), ((880, 898), 'numpy.save', 'np.save', (['path', 'var'], {}), '(path, var)\n', (887, 898), True, 'import numpy as np\n'), ((1720, 1734), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1731, 1734), False, 'import pickle\n'), ((1930, 1952), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1946, 1952), False, 'import os\n'), ((1230, 1252), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1246, 1252), False, 'import os\n')] |
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial, wraps
import os
import signal
import subprocess
import sys
import types
from typing import Callable, Iterable, Iterator, List, Mapping, Optional, Tuple, TypeVar, Union
import humanize
from more_itertools import flatten, one, unique_everseen, windowed
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from potoo import debug_print
import potoo.numpy
from potoo.util import get_cols, get_rows, or_else
# Convenient shorthands for interactive use -- not recommended for durable code that needs to be read and maintained
DF = pd.DataFrame
S = pd.Series
X = TypeVar('X')
#
# Global options
#
# Mutate these for manual control
# - https://pandas.pydata.org/pandas-docs/stable/options.html
# - TODO In ipykernel you have to manually set_display() after changing any of these
# - Workaround: use pd.set_option for the display_* settings
ipykernel_display_max_rows = 1000 # For pd df output
ipykernel_display_width = 10000 # For pd df output
ipykernel_lines = 75 # Does this affect anything?
ipykernel_columns = 120 # For ipython pretty printing (not dfs)
display_width = 0 # Default: 80; 0 means use get_terminal_size, ''/None means unlimited
display_max_rows = 0 # Default: 60; 0 means use get_terminal_size, ''/None means unlimited
display_max_columns = 250 # Default: 20
display_max_colwidth = lambda cols: 200 # Default: 50; go big for dense bq cells
display_precision = 3 # Default: 6; better magic than _float_format
def set_display_max_colwidth(x=display_max_colwidth):
global display_max_colwidth
if isinstance(x, types.FunctionType):
display_max_colwidth = x
elif isinstance(x, float):
display_max_colwidth = lambda cols: int(cols * x)
elif isinstance(x, int):
display_max_colwidth = lambda cols: x
return display_max_colwidth
def set_display_precision(x=display_precision):
global display_precision
display_precision = x
return display_precision
def set_display(**kwargs):
"Make everything nice"
# XXX I couldn't find a way to make auto-detect work with both ipython (terminal) + ipykernel (atom)
# # Unset $LINES + $COLUMNS so pandas will detect changes in terminal size after process start
# # - https://github.com/pandas-dev/pandas/blob/473a7f3/pandas/io/formats/terminal.py#L32-L33
# # - https://github.com/python/cpython/blob/7028e59/Lib/shutil.py#L1071-L1079
# # - TODO These used to be '' instead of del. Revert back if this change causes problems.
# os.environ.pop('LINES', None)
# os.environ.pop('COLUMNS', None)
# HACK This is all horrible and I hate it. After much trial and error I settled on this as a way to make both
# ipython (terminal) and ipykernel (atom) work.
try:
size = os.get_terminal_size(sys.__stdout__.fileno()) # [TODO Why didn't I use shutil.get_terminal_size here?]
except OSError:
# If ipykernel
lines = ipykernel_lines
columns = ipykernel_columns
_display_width = display_width or ipykernel_display_width or columns
_display_max_rows = display_max_rows or ipykernel_display_max_rows or lines
else:
# If terminal
lines = size.lines - 8
columns = size.columns
_display_width = display_width or columns
_display_max_rows = display_max_rows or lines
# Let kwargs override any of these params that we just inferred
lines = kwargs.get('lines', lines)
columns = kwargs.get('columns', columns)
_display_width = kwargs.get('_display_width', _display_width)
_display_max_rows = kwargs.get('_display_max_rows', _display_max_rows)
# For ipython pretty printing (not dfs)
os.environ['LINES'] = str(lines)
os.environ['COLUMNS'] = str(columns)
potoo.numpy.set_display()
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.set_option.html
# - TODO Any good way to %page by default?
# - here: pd.set_option('display.width', 10000)
# - repl: pd.DataFrame({i:range(100) for i in range(100)})
pd.set_option('display.width', _display_width)
pd.set_option('display.max_rows', _display_max_rows)
pd.set_option('display.max_columns', display_max_columns)
pd.set_option('display.max_colwidth', display_max_colwidth(get_cols()))
pd.set_option('display.precision', display_precision) # Default: 6; better magic than _float_format
# pd.set_option('display._float_format', _float_format(10, 3)) # Default: magic in pandas.formats.format
def set_display_on_sigwinch():
"set_display on window change (SIGWINCH)"
signal.signal(signal.SIGWINCH, lambda sig, frame: set_display())
set_display() # And ensure it's set to begin with
# TODO Check out `with pd.option_context`
@contextmanager
def with_options(options):
saved = {}
for k, v in options.items():
saved[k] = pd.get_option(k)
pd.set_option(k, v)
try:
yield
finally:
for k, v in saved.items():
pd.set_option(k, v)
#
# Utils
#
# display() within a df pipeline, e.g.
#
# df2 = (df
# .pipe(df_display, ...)
# ...
# )
#
def df_display(df, *xs: any):
if not xs:
xs = [lambda df: df]
for x in xs:
if hasattr(x, '__call__'):
x = x(df)
if isinstance(x, str):
# print(x)
display({'text/plain': x}, raw=True) # display() instead of print() to match flush behavior
else:
if not isinstance(x, tuple):
x = (x,)
display(*x) # Less reliable flush, e.g. for single-line strs (which don't make it here), and maybe others...
# ipy_print(*x) # Forces text/plain instead of text/html (e.g. df colors and spacing)
return df
def quantiles(x, q=4, **kwargs):
(_x_labeled, bins) = pd.qcut(
x,
q=q,
retbins=True, # Return bins as extra output
**{
'duplicates': 'drop', 'labels': False, # Return shorter list (e.g. [0]) i/o throwing when bin edges aren't unique
**kwargs,
},
)
return bins
def df_rows(df) -> Iterator['Row']:
"""Shorthand for a very common idiom"""
return (row for i, row in df.iterrows())
def df_map_rows(df, f: Callable[['Row'], 'Row'], *args, **kwargs) -> pd.DataFrame:
"""Shorthand for a very common idiom"""
return df.apply(axis=1, func=f, *args, **kwargs)
def series_assign(s: pd.Series, **kwargs) -> pd.Series:
"""Like df.assign but for Series"""
s = s.copy()
for k, v in kwargs.items():
s.at[k] = v if not callable(v) else v(s.at[k])
return s
def df_assign_first(df, **kwargs) -> pd.DataFrame:
"""Like df.assign but also reposition the assigned cols to be first"""
return (df
.assign(**kwargs)
.pipe(df_reorder_cols, first=kwargs.keys())
)
def df_map_col(df, **kwargs) -> pd.DataFrame:
"""
Map col values by the given function
- A shorthand for a very common usage of df.assign / df.col.map
"""
return df.assign(**{
c: df[c].map(f)
for c, f in kwargs.items()
})
# XXX Deprecated: remove after updating callers
def df_col_map(*args, **kwargs) -> pd.DataFrame:
return df_map_col(*args, **kwargs)
# Based on https://github.com/pandas-dev/pandas/issues/8517#issuecomment-247785821
# - Not very performant, use sparingly...
def df_flatmap(df: pd.DataFrame, f: Callable[['Row'], Union[pd.DataFrame, Iterable['Row']]]) -> pd.DataFrame:
return pd.DataFrame(
OrderedDict(row_out)
for _, row_in in df.iterrows()
for f_out in [f(row_in)]
for row_out in (
(row_out for i, row_out in f_out.iterrows()) if isinstance(f_out, pd.DataFrame) else
f_out
)
)
def df_summary(
df: Union[pd.DataFrame, pd.Series], # A df, or a series that will be coerced into a 1-col df
n=10, # Show first n rows of df (0 for none, None for all)
k=10, # Show random k rows of df (0 for none, None for all)
random_state=None, # For df.sample
# Summaries that might have a different dtype than the column they summarize (e.g. count, mean)
stats=[
# Use dtype.name (str) instead of dtype (complicated object that causes trouble)
('dtype', lambda df: [dtype.name for dtype in df.dtypes]),
# ('sizeof', lambda df: _sizeof_df_cols(df).map(partial(humanize.naturalsize, binary=True))),
('sizeof', lambda df: _sizeof_df_cols(df)),
('len', lambda df: len(df)),
# 'count', # Prefer isnull (= len - count)
('isnull', lambda df: df.isnull().sum()),
# df.apply + or_else to handle unhashable types
('nunique', lambda df: df.apply(lambda c: or_else(np.nan, lambda: c.nunique()))),
# df.apply + or_else these else they subset the cols to just the numerics, which quietly messes up col ordering
# - dtype.base else 'category' dtypes break np.issubdtype [https://github.com/pandas-dev/pandas/issues/9581]
('mean', lambda df: df.apply(func=lambda c: c.mean() if np.issubdtype(c.dtype.base, np.number) else np.nan)),
('std', lambda df: df.apply(func=lambda c: c.std() if np.issubdtype(c.dtype.base, np.number) else np.nan)),
],
# Summaries that have the same dtype as the column they summarize (e.g. quantile values)
prototypes=[
('min', lambda df: _df_quantile(df, 0, interpolation='lower')),
('25%', lambda df: _df_quantile(df, .25, interpolation='lower')),
('50%', lambda df: _df_quantile(df, .5, interpolation='lower')),
('75%', lambda df: _df_quantile(df, .75, interpolation='lower')),
('max', lambda df: _df_quantile(df, 1, interpolation='higher')),
],
):
"""A more flexible version of df.describe, with more information by default"""
# Coerce series to df
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
# Surface non-default indexes as cols in stats
if not df.index.identical(pd.RangeIndex(len(df))):
try:
df = df.reset_index() # Surface indexes as cols in stats
except:
# Oops, index is already a col [`drop=df.index.name in df.columns` is unreliable b/c df.index.names ...]
df = df.reset_index(drop=True)
stats = [(f, lambda df, f=f: getattr(df, f)()) if isinstance(f, str) else f for f in stats]
prototypes = [(f, lambda df, f=f: getattr(df, f)()) if isinstance(f, str) else f for f in prototypes]
return (
# Make a df from: stats + prototypes + first n rows + random k rows
pd.concat([
pd.DataFrame(OrderedDict({k: f(df) for k, f in stats + prototypes})).T,
df[:n],
df[n:].sample(
n=min(k, len(df[n:])),
replace=False,
random_state=random_state,
).sort_index(),
])
# Reorder cols to match input (some aggs like mean/std throw out non-numeric cols, which messes up order)
[df.columns]
# Pull stats up into col index, so that our col dtypes can match the input col dtypes
# - [Added later] Also prototypes, to separate from df[:n] rows
# - FIXME dtypes get mixed up (e.g. False/0) in the transpose
# - WARNING Seems deeply rooted -- coercing each index value wasn't sufficient to fix, via:
# MultiIndex.map(lambda (k, *xs): (k, *tuple(df[k].dtype.type(x) for x in xs)))
.T.set_index([k for k, f in stats + prototypes], append=True).T
# Transpose for fixed width (stats) and variable height (input cols)
# - [Nope: transposing cols mixes dtypes such that mixed str/int/float undermines display.precision smarts]
# .T
)
def _df_quantile(df, q=.5, interpolation='linear'):
"""Like pd.DataFrame.quantile but handles ordered categoricals"""
return df.apply(lambda c: _series_quantile(c, q=q, interpolation=interpolation))
def _series_quantile(s, *args, **kwargs):
"""Like pd.Series.quantile but handles ordered categoricals"""
if s.dtype.name == 'category':
cat_code = s.cat.codes.quantile(*args, **kwargs)
return s.dtype.categories[cat_code] if cat_code != -1 else None
else:
try:
return s.quantile(*args, **kwargs)
except:
# e.g. a column of non-uniform np.array's will fail like:
# ValueError: operands could not be broadcast together with shapes (6599624,) (459648,)
return np.nan
def _sizeof_df_cols(df: pd.DataFrame) -> 'Column[int]':
return df.memory_usage(index=False, deep=True)
# XXX Looks like df.memory_usage(deep=True) is more accurate (previous attempts were missing deep=True)
# def _sizeof_df_cols(df: pd.DataFrame) -> 'Column[int]':
# """
# sizeof is hard, but make our best effort:
# - Use dask.sizeof.sizeof instead of sys.getsizeof, since the latter is unreliable for pandas/numpy objects
# - Use df.applymap, since dask.sizeof.sizeof appears to not do this right [why? seems wrong...]
# """
# try:
# import dask.sizeof
# except:
# return df.apply(lambda c: None)
# else:
# return df.applymap(dask.sizeof.sizeof).sum()
def df_value_counts(
df: pd.DataFrame,
exprs=None, # Cols to surface, as expressions understood by df.eval(expr) (default: df.columns)
limit=10, # Limit rows
exclude_max_n=1, # Exclude cols where max n ≤ exclude_max_n
fillna='', # Fill na cells (for seeing); pass None to leave na cols as NaN (for processing)
unique_names=False, # Give all cols unique names (for processing) instead of reusing 'n' (for seeing)
**kwargs, # kwargs for .value_counts (e.g. dropna)
) -> pd.DataFrame:
"""Series.value_counts() extended over a whole DataFrame (with a few compromises in hygiene)"""
exprs = exprs if exprs is not None else df.columns
return (df
.pipe(df_remove_unused_categories)
.pipe(df_cat_to_str)
.pipe(lambda df: (pd.concat(axis=1, objs=[
ns
for expr_opts in exprs
for expr, opts in [expr_opts if isinstance(expr_opts, tuple) else (expr_opts, dict())]
for ns in [(df
.eval(expr)
.value_counts(**kwargs)
)]
if ns.iloc[0] > exclude_max_n
for ns in [(ns
.pipe(lambda s: (
# NOTE We "sort_index" when "sort_values=True" because the "values" are in the index, as opposed to
# the "counts", which are the default sort
s.sort_values(ascending=opts.get('ascending', False)) if not opts.get('sort_values') else
s.sort_index(ascending=opts.get('ascending', True))
))
.iloc[:limit]
.to_frame()
.rename(columns=lambda x: f'n_{expr}' if unique_names else 'n')
.reset_index()
.rename(columns={'index': expr})
)]
])))
.fillna(fillna)
)
def df_reorder_cols(df: pd.DataFrame, first: List[str] = [], last: List[str] = []) -> pd.DataFrame:
first_last = set(first) | set(last)
return df.reindex(columns=list(first) + [c for c in df.columns if c not in first_last] + list(last))
def df_transform_columns(df: pd.DataFrame, f: Callable[[List[str]], List[str]]) -> pd.DataFrame:
df = df.copy()
df.columns = f(df.columns)
return df
def df_transform_column_names(df: pd.DataFrame, f: Callable[[str], str]) -> pd.DataFrame:
return df_transform_columns(df, lambda cs: [f(c) for c in df.columns])
def df_transform_index(df: pd.DataFrame, f: Callable[[List[str]], List[str]]) -> pd.DataFrame:
df = df.copy()
df.index = f(df.index)
return df
def df_set_index_name(df: pd.DataFrame, name: str) -> pd.DataFrame:
return df_transform_index(df, lambda index: index.rename(name))
def df_remove_unused_categories(df: pd.DataFrame) -> pd.DataFrame:
"""
Do col.remove_unused_categories() for all categorical columns
"""
return df.assign(**{
k: df[k].cat.remove_unused_categories()
for k in df.columns
if df[k].dtype.name == 'category'
})
def df_ordered_cats_like(df: pd.DataFrame, **col_names_to_cats) -> pd.DataFrame:
"""
More flexible than df.astype({'foo': cat_dtype, ...}) / df_ordered_cat(df, ...)
- In addition to cat dtypes, allows cols with cat dtype, lists of cat values, and functions that return any of those
- Like .astype(), preserves unused cat values (caller can use df_remove_unused_categories if desired)
"""
return (df
.assign(**{
col_name: df[col_name].pipe(as_ordered_cat_like, cats)
for col_name, cats in col_names_to_cats.items()
})
)
def as_ordered_cat_like(s: pd.Series, cats) -> pd.Series:
"""
More flexible than s.astype(cat_dtype) / as_ordered_cat(s, cat_values)
- In addition to cat dtypes, allows cols with cat dtype, lists of cat values, and functions that return any of those
- Like .astype(), preserves unused cat values (caller can use df_remove_unused_categories if desired)
"""
# Allow functions (of the input col)
if callable(cats):
cats = cats(s)
# Allow cols with categorical dtype
# - Fail on cols with non-categorical dtype
if isinstance(cats, pd.Series):
cats = cats.dtypes.categories
# Allow categorical dtypes
# - TODO Is there a robust way to isinstance(cats, [np.dtype, pd.dtype]) so we can fail on non-categorical dtypes?
if isinstance(cats, pd.api.types.CategoricalDtype):
cats = cats.categories
# At this point cats should be an iter of cat values
# - Dedupe them for the user, since CategoricalDtype rejects duplicate cat values
return as_ordered_cat(
s,
ordered_cats=list(unique_everseen(cats)),
)
# XXX Try migrating callers to df_ordered_cats_like to see if we can kill this less-usable one
# FIXME Multiple *args appears broken: `.pipe(df_ordered_cat, 'x', 'y')`
# - Workaround: `.pipe(df_ordered_cat, 'x').pipe(df_ordered_cat, 'y')`
def df_ordered_cat(df: pd.DataFrame, *args, transform=lambda x: x, **kwargs) -> pd.DataFrame:
"""
Map many str series to ordered category series
"""
cats = dict(
**{k: lambda df: df[k].unique() for k in args},
**kwargs,
)
return df.assign(**{
k: as_ordered_cat(df[k], list(transform(
x(df) if isinstance(x, types.FunctionType) else x
)))
for k, x in cats.items()
})
def as_ordered_cat(s: pd.Series, ordered_cats: List[str] = None) -> pd.Series:
"""
Map a str series to an ordered category series
- If ordered_cats isn't given, s.unique() is used
"""
return s.astype(CategoricalDtype(ordered_cats or list(s.unique()), ordered=True))
def df_cat_to_str(df: pd.DataFrame) -> pd.DataFrame:
"""
Map any categorical columns to str columns (see cat_to_str for details)
"""
return df.apply(cat_to_str, axis=0)
def cat_to_str(s: pd.Series) -> pd.Series:
"""
If s is a category dtype, map it to a str. This is useful when you want to avoid bottlenecks on large cats:
- s.apply(f) will apply f to each value in s _and_ each value in the category, to make the new output category dtype
- cat_to_str(s).apply(f) will apply f only to each value in s, since there's no output category dtype to compute
"""
return s.astype('str') if s.dtype.name == 'category' else s
# XXX after migrating callers to new name
def df_reverse_cat(*args, **kwargs):
return df_reverse_cats(*args, **kwargs)
def df_reverse_cats(df: pd.DataFrame, *col_names) -> pd.DataFrame:
"""
Reverse the cat.categories values of each (ordered) category column given in col_names
- Useful e.g. for reversing plotnine axes: https://github.com/has2k1/plotnine/issues/116#issuecomment-365911195
"""
return df_transform_cats(df, **{
col_name: reversed
for col_name in col_names
})
def df_transform_cats(
df: pd.DataFrame,
**col_name_to_f,
) -> pd.DataFrame:
"""
Transform the cat.categories values to f(cat.categories) for each category column given in col_names
"""
return df.assign(**{col_name: transform_cat(df[col_name], f=f) for col_name, f in col_name_to_f.items()})
def transform_cat(
s: pd.Series,
f: Callable[[List[str]], Iterable[str]] = lambda xs: xs,
ordered: bool = None,
) -> pd.Series:
"""
Transform the category values of a categorical series
"""
return s.astype('str').astype(CategoricalDtype(
categories=list(f(s.dtype.categories)),
ordered=ordered if ordered is not None else s.dtype.ordered,
))
def reverse_cat(s: pd.Series) -> pd.Series:
"""
Reverse the category values of a categorical series
- Useful e.g. for reversing plotnine axes: https://github.com/has2k1/plotnine/issues/116#issuecomment-365911195
"""
return transform_cat(s, reversed)
def df_ensure(df, **kwargs):
"""
df.assign only the columns that aren't already present
"""
return df.assign(**{
k: v
for k, v in kwargs.items()
if k not in df
})
return df
def df_require_nonempty(df, e: Union[str, Exception]) -> pd.DataFrame:
"""
Raise if df is empty, else return df. Useful in pipelines, e.g.
(df
...
.pipe(df_require_nonempty, f'No requested things found: x[{x}], y[{y}]') # -> ValueError
...
.pipe(df_require_nonempty, AssertionError(f'Oops, my fault'))
...
)
"""
if df.empty:
if isinstance(e, str):
e = ValueError(e)
raise e
return df
# XXX Obviated by df_ensure?
# def produces_cols(*cols):
# cols = [c for c in cols if c != ...]
# def decorator(f):
# @wraps(f)
# def g(*args, **kwargs) -> pd.DataFrame:
# df = _find_df_in_args(*args, **kwargs)
# _refresh = kwargs.pop('_refresh', False)
# if _refresh or not cols or any(c not in df for c in cols):
# df = f(*args, **kwargs)
# return df
# return g
# return decorator
def requires_cols(*required_cols):
required_cols = [c for c in required_cols if c != ...]
def decorator(f):
@wraps(f)
def g(*args, **kwargs) -> any:
input = _find_first_df_or_series_in_args(*args, **kwargs)
input_cols = input.columns if isinstance(input, pd.DataFrame) else input.index # df.columns or series.index
if not set(required_cols) <= set(input_cols):
raise ValueError(f'requires_col: required_cols[{required_cols}] not all in input_cols[{input_cols}]')
return f(*args, **kwargs)
return g
return decorator
def _find_first_df_or_series_in_args(*args, **kwargs):
for x in [*args, *kwargs.values()]:
if isinstance(x, (pd.DataFrame, pd.Series)):
return x
else:
raise ValueError('No df or series found in args')
def requires_nonempty_rows(f):
@wraps(f)
def g(*args, **kwargs) -> any:
input = _find_first_df_or_series_in_args(*args, **kwargs)
input_cols = input.columns if isinstance(input, pd.DataFrame) else input.index # df.columns or series.index
if input.empty:
raise ValueError(f'requires_nonempty_rows: rows are empty ({input})')
return f(*args, **kwargs)
return g
def df_require_index_is_trivial(df: pd.DataFrame) -> pd.DataFrame:
require_index_is_trivial(df.index)
return df
def require_index_is_trivial(index: pd.Index) -> pd.Index:
pd.testing.assert_index_equal(index, pd.RangeIndex(len(index)))
return index
def df_style_cell(*styles: Union[
Tuple[Callable[['cell'], bool], 'style'],
Tuple['cell', 'style'],
Callable[['cell'], Optional['style']],
]) -> Callable[['cell'], 'style']:
"""
Shorthand for df.style.applymap(...). Example usage:
df.style.applymap(df_style_cell(
(lambda x: 0 < x < 1, 'color: red'),
(0, 'color: green'),
lambda x: 'background: %s' % to_rgb_hex(x),
))
"""
def f(x):
y = None
for style in styles:
if isinstance(style, tuple) and isinstance(style[0], types.FunctionType) and style[0](x):
y = style[1]
elif isinstance(style, tuple) and x == style[0]:
y = style[1]
elif isinstance(style, types.FunctionType):
y = style(x)
if y:
break
return y or ''
return f
#
# io
#
def pd_read_fwf(
filepath_or_buffer,
widths: Optional[Union[List[int], 'infer']] = None,
unused_char='\a', # For widths='infer': any char not present in the file, used to initially parse raw lines
**kwargs,
) -> pd.DataFrame:
"""
Like pd.read_fwf, except:
- Add support for widths='infer', which infers col widths from the header row (assuming no spaces in header names)
"""
if widths == 'infer':
# Read raw lines
# - Use pd.read_* (with kwargs) so we get all the file/str/compression/encoding goodies
[header_line, *body_lines] = (
pd.read_csv(filepath_or_buffer, **kwargs, header=None, sep=unused_char)
.pipe(lambda df: map(one, df.to_records(index=False)))
)
[header_line, *body_lines]
# Compute col widths
# - header_line determines widths for all but the last col, which might have values that extend past the header
# - Incorporate body_lines to determine the width of the last col
widths_cum = [
i + 1
for (i, c), (_, d) in windowed(enumerate(header_line), 2)
if c != ' ' and d == ' '
]
widths_cum = [
0,
*widths_cum,
max(len(line) for line in [header_line, *body_lines]),
]
widths = [
y - x
for x, y in windowed(widths_cum, 2)
]
return pd.read_fwf(filepath_or_buffer,
widths=widths,
**kwargs,
)
def df_to_fwf_df(df: pd.DataFrame, reset_index=True, fresh_col_name='_index') -> pd.DataFrame:
"""
Transform a df so that its .to_string() is copy/pastable to a .fwf format
- HACK This function returns a new df that ipython/jupyter will display in a copy/paste-able form
- TODO Make a function df_to_fwf that actually does the real df->str/file
"""
assert fresh_col_name not in df.columns, f"Refusing to overwrite your col named {fresh_col_name!r}"
if df.index.name is not None and reset_index:
df = df.reset_index()
return (df
.assign(**{fresh_col_name: ''})
.set_index(fresh_col_name)
.pipe(df_set_index_name, None)
# Cosmetic: ensure 2 spaces between columns in .to_string()
# - The behavior of .to_string() is 2 spaces of margin around cells, but only 1 space of margin around headers
# - Padding each header string with 1 (leading) space effects 2 spaces of margin around both cells and headers
# - This isn't necessary for pd.read_fwf to work, but it's helpful to the human interacting with the file
.rename(columns=lambda c: ' ' + c)
)
#
# "Plotting", i.e. styling df html via mpl/plotnine color palettes
#
# TODO Add df_color_col for continuous values (this one is just for discrete values)
def df_col_color_d(
df,
_join=',',
_stack=False,
_extend_cmap=False,
_html_color_kwargs=dict(),
**col_cmaps,
) -> pd.DataFrame:
"""Color the (discrete) values in a df column (like plotnine.scale_color_cmap_d for tables)"""
# Lazy imports so we don't hard-require these heavy-ish libs
from IPython.display import HTML
from mizani.palettes import cmap_d_pal
from potoo.plot import mpl_cmap_repeat
# Break cyclic import
from potoo.ipython import df_cell_display, df_cell_stack
def iter_or_singleton(x: Union[Iterable[X], X]) -> Iterable[X]:
return [x] if not hasattr(x, '__len__') or isinstance(x, str) else x
def color_col(s: pd.Series, cmap):
s = cat_to_str(s) # Else iter_or_singleton tries to make a category of lists, which barfs when it tries to hash
vs = list(unique_everseen(
v
for v in flatten(s.map(iter_or_singleton))
if pd.notnull(v)
))
# TODO Allow user to control this ordering, like plotnine allows with category dtypes
vs = sorted(vs)
if _extend_cmap:
cmap = mpl_cmap_repeat(len(vs), cmap)
colors = dict(zip(vs, cmap_d_pal(cmap)(len(vs))))
# FIXME 'text/plain' gets '' from HTML(...) [repro-able? why does this happen?]
join = lambda xs: df_cell_stack(xs) if _stack else df_cell_display(HTML(_join.join(xs)))
return s.apply(lambda v: join(
_html_color(v, colors, **_html_color_kwargs)
for v in iter_or_singleton(v)
))
return df.assign(**{
col: color_col(df[col], cmap)
for col, cmap in col_cmaps.items()
})
def df_cell_color(
x_html: any,
colors: Mapping[any, str],
**kwargs,
) -> 'df_cell':
from potoo.ipython import df_cell_display # Break cyclic import
return df_cell_display(HTML(_html_color(x_html, colors, **kwargs)))
def _html_color(
x_html: any,
colors: Mapping[any, str],
elem='span',
attr='color',
) -> str:
color = colors.get(x_html, 'inherit')
return '<%(elem)s style="%(attr)s: %(color)s">%(x_html)s</span>' % locals()
#
# sql/bq
#
# TODO What's the right way to manage sessions and txns?
def pd_read_sql(session, sql):
session.rollback()
try:
return pd.read_sql(sql, session.connection())
finally:
session.rollback()
# TODO -> potoo.sqlalchemy
def raw_sql(session, sql):
return (dict(x.items()) for x in session.execute(sql))
def pd_read_bq(
query,
project_id=None,
dialect='standard',
# read_gbq=pd.io.gbq.read_gbq, # Sequential IO, slow
# read_gbq=potoo.pandas_io_gbq_par_io.read_gbq, # Parallel IO (via dask), ballpark ~4x faster than sequential IO
read_gbq=None, # Lazily loads potoo.pandas_io_gbq_par_io.read_gbq
**kwargs
):
"""
Example usage:
df = pd_read_bq('''
select ...
from ...
''')
Docs:
- http://pandas.pydata.org/pandas-docs/stable/generated/pandas.io.gbq.read_gbq.html
- https://cloud.google.com/bigquery/docs/
"""
if read_gbq is None:
import potoo.pandas_io_gbq_par_io
read_gbq = potoo.pandas_io_gbq_par_io.read_gbq
return read_gbq(
query=query,
dialect=dialect,
project_id=project_id or bq_default_project(),
**kwargs
)
def bq_default_project():
return subprocess.check_output(
'gcloud config get-value project 2>/dev/null',
shell=True,
).decode('utf8').strip()
| [
"subprocess.check_output",
"collections.OrderedDict",
"pandas.get_option",
"more_itertools.unique_everseen",
"potoo.util.get_cols",
"pandas.qcut",
"pandas.read_csv",
"functools.wraps",
"pandas.set_option",
"potoo.ipython.df_cell_stack",
"mizani.palettes.cmap_d_pal",
"numpy.issubdtype",
"more... | [((698, 710), 'typing.TypeVar', 'TypeVar', (['"""X"""'], {}), "('X')\n", (705, 710), False, 'from typing import Callable, Iterable, Iterator, List, Mapping, Optional, Tuple, TypeVar, Union\n'), ((4243, 4289), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '_display_width'], {}), "('display.width', _display_width)\n", (4256, 4289), True, 'import pandas as pd\n'), ((4301, 4353), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '_display_max_rows'], {}), "('display.max_rows', _display_max_rows)\n", (4314, 4353), True, 'import pandas as pd\n'), ((4362, 4419), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'display_max_columns'], {}), "('display.max_columns', display_max_columns)\n", (4375, 4419), True, 'import pandas as pd\n'), ((4501, 4554), 'pandas.set_option', 'pd.set_option', (['"""display.precision"""', 'display_precision'], {}), "('display.precision', display_precision)\n", (4514, 4554), True, 'import pandas as pd\n'), ((6024, 6111), 'pandas.qcut', 'pd.qcut', (['x'], {'q': 'q', 'retbins': '(True)'}), "(x, q=q, retbins=True, **{'duplicates': 'drop', 'labels': False, **\n kwargs})\n", (6031, 6111), True, 'import pandas as pd\n'), ((23503, 23511), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (23508, 23511), False, 'from functools import partial, wraps\n'), ((26481, 26537), 'pandas.read_fwf', 'pd.read_fwf', (['filepath_or_buffer'], {'widths': 'widths'}), '(filepath_or_buffer, widths=widths, **kwargs)\n', (26492, 26537), True, 'import pandas as pd\n'), ((5072, 5088), 'pandas.get_option', 'pd.get_option', (['k'], {}), '(k)\n', (5085, 5088), True, 'import pandas as pd\n'), ((5097, 5116), 'pandas.set_option', 'pd.set_option', (['k', 'v'], {}), '(k, v)\n', (5110, 5116), True, 'import pandas as pd\n'), ((10173, 10189), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (10185, 10189), True, 'import pandas as pd\n'), ((22735, 22743), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (22740, 22743), False, 'from functools import partial, wraps\n'), ((2978, 3001), 'sys.__stdout__.fileno', 'sys.__stdout__.fileno', ([], {}), '()\n', (2999, 3001), False, 'import sys\n'), ((4484, 4494), 'potoo.util.get_cols', 'get_cols', ([], {}), '()\n', (4492, 4494), False, 'from potoo.util import get_cols, get_rows, or_else\n'), ((5200, 5219), 'pandas.set_option', 'pd.set_option', (['k', 'v'], {}), '(k, v)\n', (5213, 5219), True, 'import pandas as pd\n'), ((7729, 7749), 'collections.OrderedDict', 'OrderedDict', (['row_out'], {}), '(row_out)\n', (7740, 7749), False, 'from collections import OrderedDict\n'), ((18217, 18238), 'more_itertools.unique_everseen', 'unique_everseen', (['cats'], {}), '(cats)\n', (18232, 18238), False, 'from more_itertools import flatten, one, unique_everseen, windowed\n'), ((25665, 25736), 'pandas.read_csv', 'pd.read_csv', (['filepath_or_buffer'], {'header': 'None', 'sep': 'unused_char'}), '(filepath_or_buffer, **kwargs, header=None, sep=unused_char)\n', (25676, 25736), True, 'import pandas as pd\n'), ((26435, 26458), 'more_itertools.windowed', 'windowed', (['widths_cum', '(2)'], {}), '(widths_cum, 2)\n', (26443, 26458), False, 'from more_itertools import flatten, one, unique_everseen, windowed\n'), ((29222, 29239), 'potoo.ipython.df_cell_stack', 'df_cell_stack', (['xs'], {}), '(xs)\n', (29235, 29239), False, 'from potoo.ipython import df_cell_display, df_cell_stack\n'), ((29080, 29096), 'mizani.palettes.cmap_d_pal', 'cmap_d_pal', (['cmap'], {}), '(cmap)\n', (29090, 29096), False, 'from mizani.palettes import cmap_d_pal\n'), ((31335, 31421), 'subprocess.check_output', 'subprocess.check_output', (['"""gcloud config get-value project 2>/dev/null"""'], {'shell': '(True)'}), "('gcloud config get-value project 2>/dev/null',\n shell=True)\n", (31358, 31421), False, 'import subprocess\n'), ((28832, 28845), 'pandas.notnull', 'pd.notnull', (['v'], {}), '(v)\n', (28842, 28845), True, 'import pandas as pd\n'), ((9346, 9384), 'numpy.issubdtype', 'np.issubdtype', (['c.dtype.base', 'np.number'], {}), '(c.dtype.base, np.number)\n', (9359, 9384), True, 'import numpy as np\n'), ((9464, 9502), 'numpy.issubdtype', 'np.issubdtype', (['c.dtype.base', 'np.number'], {}), '(c.dtype.base, np.number)\n', (9477, 9502), True, 'import numpy as np\n')] |
"""Script defining SMALMesh, an object capable of rendering a mesh version of the SMAL Model, for optimising the fit to other, existing meshes.
With modifications now to work with:
- newest SMAL Model
- Newly define scale factor parameters"""
from absl import flags
from pytorch3d.structures import Meshes
import sys, os
sys.path.append(os.path.dirname(sys.path[0]))
from smbld_model.smal_model.smal_torch import SMAL
import torch
from smbld_model.smal_model.smal_torch import batch_rodrigues
import numpy as np
import pickle
from vis import stack_as_batch, try_mkdir
from pytorch_arap.pytorch_arap.arap import ARAPMeshes
from smbld_model.config import SMPL_MODEL_PATH, SMPL_DATA_PATH
nn = torch.nn
opts = flags.FLAGS
kappa_map = {
"front_left_leg": 7,
"front_right_leg" : 11,
"rear_left_leg": 17,
"rear_right_leg": 21,
"tail": 25,
"core": 1, # NOTE: this is linked to head/front legs, will have to reduce them by an equal amount
"neck": 15, # Head is connected to this
"head": 16,
"left_ear": 33,
"right_ear":34,
}
def batch_global_rigid_transformation(Rs, Js, parent, rotate_base = False, betas_extra=None, device="cuda"):
"""
Computes absolute joint locations given pose.
rotate_base: if True, rotates the global rotation by 90 deg in x axis.
if False, this is the original SMPL coordinate.
Args:
Rs: N x 24 x 3 x 3 rotation vector of K joints
Js: N x 24 x 3, joint locations before posing
parent: 24 holding the parent id for each index
Returns
new_J : `Tensor`: N x 24 x 3 location of absolute joints
A : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.
"""
# Now Js is N x 24 x 3 x 1
Js = Js.unsqueeze(-1)
N = Rs.shape[0]
if rotate_base:
print('Flipping the SMPL coordinate frame!!!!')
rot_x = torch.Tensor([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
rot_x = torch.reshape(torch.repeat(rot_x, [N, 1]), [N, 3, 3]) # In tf it was tile
root_rotation = torch.matmul(Rs[:, 0, :, :], rot_x)
else:
root_rotation = Rs[:, 0, :, :]
Js_orig = Js.clone()
scaling_factors = torch.ones(N, parent.shape[0], 3).to(device)
if betas_extra is not None:
scaling_factors = betas_extra.reshape(-1, 35, 3)
# debug_only
# scaling_factors[:, 25:32, 0] = 0.2
# scaling_factors[:, 7, 2] = 2.0
scale_factors_3x3 = torch.diag_embed(scaling_factors, dim1=-2, dim2=-1)
def make_A(R, t):
# Rs is N x 3 x 3, ts is N x 3 x 1
R_homo = torch.nn.functional.pad(R, (0,0,0,1,0,0))
t_homo = torch.cat([t, torch.ones([N, 1, 1]).to(device)], 1)
return torch.cat([R_homo, t_homo], 2)
A0 = make_A(root_rotation, Js[:, 0])
results = [A0]
for i in range(1, parent.shape[0]):
j_here = Js[:, i] - Js[:, parent[i]]
s_par_inv = torch.inverse(scale_factors_3x3[:, parent[i]])
rot = Rs[:, i]
s = scale_factors_3x3[:, i]
rot_new = s_par_inv @ rot @ s
A_here = make_A(rot_new, j_here)
res_here = torch.matmul(
results[parent[i]], A_here)
results.append(res_here)
# 10 x 24 x 4 x 4
results = torch.stack(results, dim=1)
# scale updates
new_J = results[:, :, :3, 3]
# --- Compute relative A: Skinning is based on
# how much the bone moved (not the final location of the bone)
# but (final_bone - init_bone)
# ---
Js_w0 = torch.cat([Js_orig, torch.zeros([N, 35, 1, 1]).to(device)], 2)
init_bone = torch.matmul(results, Js_w0)
# Append empty 4 x 3:
init_bone = torch.nn.functional.pad(init_bone, (3,0,0,0,0,0,0,0))
A = results - init_bone
return new_J, A
class SMBLDMesh(SMAL, nn.Module):
"""SMAL Model, with addition of scale factors to individual body parts"""
def __init__(self, n_batch = 1, fixed_betas = False, device="cuda", shape_family_id = 1,
model_path = SMPL_MODEL_PATH, data_path = SMPL_DATA_PATH, num_betas=20, **kwargs):
SMAL.__init__(self, model_path=model_path, data_path=data_path, opts = opts, shape_family_id=shape_family_id,
align = False)
nn.Module.__init__(self)
self.use_smal_betas = True
self.n_batch = n_batch
self.device = device
self.v_template= self.v_template.to(device)
self.faces = self.f
faces_single = torch.from_numpy(self.faces.astype(np.float32)).to(device)
self.faces_batch = stack_as_batch(faces_single, n_batch)
self.n_verts = self.v_template.shape[0]
#parameters
self.global_rot = nn.Parameter(torch.full((n_batch, 3), 0.0, device = device, requires_grad=True))
self.joint_rot = nn.Parameter(torch.full((n_batch, 34, 3), 0.0, device = device, requires_grad=True))
self.trans = nn.Parameter(torch.full((n_batch, 3,), 0.0, device = device, requires_grad=True))
self.scale_factors = torch.nn.Parameter(torch.ones((self.parents.shape[0])),
requires_grad = True)
# This sets up a new set of betas that define the scale factor parameters
self.num_beta_shape = self.n_betas = 20
self.num_betascale = 7
leg_joints = list(range(7,11)) + list(range(11,15)) + list(range(17,21)) + list(range(21,25))
tail_joints = list(range(25, 32))
ear_joints = [33, 34]
beta_scale_mask = torch.zeros(35, 3, 7).to(device)
beta_scale_mask[leg_joints, [2], [0]] = 1.0 # Leg lengthening
beta_scale_mask[leg_joints, [0], [1]] = 1.0 # Leg fatness
beta_scale_mask[leg_joints, [1], [1]] = 1.0 # Leg fatness
beta_scale_mask[tail_joints, [0], [2]] = 1.0 # Tail lengthening
beta_scale_mask[tail_joints, [1], [3]] = 1.0 # Tail fatness
beta_scale_mask[tail_joints, [2], [3]] = 1.0 # Tail fatness
beta_scale_mask[ear_joints, [1], [4]] = 1.0 # Ear y
beta_scale_mask[ear_joints, [2], [5]] = 1.0 # Ear z
self.beta_scale_mask = torch.transpose(beta_scale_mask.reshape(35*3, self.num_betascale), 0, 1)
self.fixed_betas = fixed_betas
self.num_betas = num_betas # number of used betas
max_betas = self.shapedirs.shape[0]
assert max_betas >= self.num_betas, f"Insufficient number of betas in shapedir (Requested {self.num_betas}, shapedir has {max_betas})"
# Load mean betas from SMAL model
with open(data_path, "rb") as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
smal_data = u.load()
shape_family = self.shape_family_id # Canine is family=1
if shape_family == -1:
self.mean_betas = torch.zeros((41)).to(device)
else:
loaded_betas = smal_data['cluster_means'][shape_family]
if len(loaded_betas) < max_betas:
loaded_betas = np.pad(loaded_betas, (0, self.num_betas-len(loaded_betas))) # pad with 0s to max shape
self.mean_betas = torch.FloatTensor(loaded_betas).to(device)
multi_betas = self.mean_betas[:self.num_betas]
multi_betas_scale = torch.zeros(self.num_betascale).float().to(device)
multi_betas = torch.cat([multi_betas, multi_betas_scale], dim = 0)
if self.fixed_betas:
self.multi_betas = nn.Parameter(multi_betas.repeat(1, 1))
else:
self.multi_betas = nn.Parameter(multi_betas.repeat(self.n_batch, 1))
self.deform_verts = nn.Parameter(torch.zeros((n_batch, self.n_verts, 3), device=device, requires_grad=True))
self.smbld_shape = [self.global_rot, self.trans, self.multi_betas]
self.smbld_params = [self.global_rot, self.joint_rot, self.trans, self.multi_betas] # params of SMBDL model
self.deform_params = [self.deform_verts]
self.meshes = self.get_meshes()
def get_verts(self, return_joints=False):
"""Returns vertices and faces of SMAL Model"""
# For reference on running the forward() method of SMAL model, see smal3d_renderer.py
smal_params = self.parameters()
# Split betas by standard betas, and scale factor betas
all_betas = self.multi_betas
betas_pred = all_betas[:, :self.num_betas] # usual betas
betas_logscale = all_betas[:, self.num_betas:] # Scale factor betas
betas_scale_pred = torch.exp(betas_logscale @ self.beta_scale_mask) # Scale SF betas correctly
#betas = betas_pred.repeat(self.n_batch, 1) # Stack Betas correctly if fixed across batch
#sf = self.scale_factors.repeat(self.n_batch, 1) # Stack Betas correctly if fixed across batch
verts, joints_3d, R = self(betas_pred,
torch.cat((self.global_rot, self.joint_rot.view(self.n_batch, -1)), dim = 1),
betas_scale_pred.to(self.device), trans=self.trans, deform_verts=self.deform_verts)
if return_joints:
return verts, self.faces_batch, joints_3d
return verts, self.faces_batch # each of these have shape (n_batch, n_vert/faces, 3)
def get_meshes(self):
"""Returns Meshes object of all SMAL meshes."""
self.meshes = ARAPMeshes(*self.get_verts(), device=self.device)
return self.meshes
def __call__(self, beta, theta, betas_extra, deform_verts=None, trans=None, get_skin=True):
if self.use_smal_betas: # Always use smal betas
nBetas = beta.shape[1]
else:
nBetas = 0
# 1. Add shape blend shapes
if nBetas > 0:
if deform_verts is None:
v_shaped = self.v_template.to(self.device) + torch.reshape(torch.matmul(beta.cpu(), self.shapedirs[:nBetas, :]),
[-1, self.size[0], self.size[1]]).to(self.device)
else:
v_shaped = self.v_template + deform_verts + torch.reshape(torch.matmul(beta, self.shapedirs[:nBetas, :].to(self.device)),
[-1, self.size[0], self.size[1]]).to(self.device)
else:
if deform_verts is None:
v_shaped = self.v_template.unsqueeze(0)
else:
v_shaped = self.v_template + deform_verts
# 2. Infer shape-dependent joint locations.
Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor.to(self.device))
Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor.to(self.device))
Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor.to(self.device))
J = torch.stack([Jx, Jy, Jz], dim=2)
# 3. Add pose blend shapes
# N x 24 x 3 x 3
Rs = torch.reshape(batch_rodrigues(torch.reshape(theta, [self.n_batch * 35, 3]).cpu()), [-1, 35, 3, 3]).to(self.device)
# Ignore global rotation.
pose_feature = torch.reshape(Rs[:, 1:, :, :].to(self.device) - torch.eye(3).to(self.device), [-1, 306]) # torch.eye(3).cuda(device=self.opts.gpu_id)
v_posed = torch.reshape(
torch.matmul(pose_feature, self.posedirs.to(self.device)),
[-1, self.size[0], self.size[1]]) + v_shaped.to(self.device)
# 4. Get the global joint location
self.J_transformed, A = batch_global_rigid_transformation(Rs, J, self.parents,
betas_extra=betas_extra, device=self.device)
# 5. Do skinning:
num_batch = theta.shape[0]
weights_t = self.weights.repeat([num_batch, 1]).to(self.device)
W = torch.reshape(weights_t, [num_batch, -1, 35])
T = torch.reshape(
torch.matmul(W, torch.reshape(A, [num_batch, 35, 16])),
[num_batch, -1, 4, 4])
v_posed_homo = torch.cat(
[v_posed, torch.ones([num_batch, v_posed.shape[1], 1]).to(self.device)], 2) #.cuda(device=self.opts.gpu_id)
v_homo = torch.matmul(T, v_posed_homo.unsqueeze(-1))
verts = v_homo[:, :, :3, 0]
if trans is None:
trans = torch.zeros((num_batch, 3)).to(self.device)#.cuda(device=self.opts.gpu_id)
verts = verts + trans[:, None, :]
# Get joints:
self.J_regressor = self.J_regressor.to(self.device)
joint_x = torch.matmul(verts[:, :, 0], self.J_regressor)
joint_y = torch.matmul(verts[:, :, 1], self.J_regressor)
joint_z = torch.matmul(verts[:, :, 2], self.J_regressor)
joints = torch.stack([joint_x, joint_y, joint_z], dim=2)
if get_skin:
return verts, joints, Rs
else:
return joints
def save_npz(self, out_dir, title="", labels=None):
"""Given a directory, saves a .npz file of all params
labels: optional list of size n_batch, to save as labels for all entries"""
out = {}
for param in ["global_rot", "joint_rot", "multi_betas", "trans", "deform_verts"]:
out[param] = getattr(self, param).cpu().detach().numpy()
v, f = self.get_verts()
out["verts"] = v.cpu().detach().numpy()
out["faces"] = f.cpu().detach().numpy()
out["labels"] = labels
out_title = "smbld_params.npz"
if title != "":
out_title = out_title.replace(".npz", f"_{title}.npz")
try_mkdir(out_dir)
np.savez(os.path.join(out_dir, out_title), **out)
def load_from_npz(self, loc):
"""Given the location of a .npz file, load previous model"""
data = np.load(loc)
for param in ["global_rot", "joint_rot", "multi_betas", "trans"]:
tensor = torch.from_numpy(data[param]).to(self.device)
getattr(self, param).data = tensor | [
"torch.exp",
"torch.from_numpy",
"smbld_model.smal_model.smal_torch.SMAL.__init__",
"torch.nn.functional.pad",
"numpy.load",
"torch.eye",
"torch.matmul",
"torch.diag_embed",
"torch.Tensor",
"vis.try_mkdir",
"torch.repeat",
"os.path.dirname",
"torch.reshape",
"torch.cat",
"torch.full",
... | [((340, 368), 'os.path.dirname', 'os.path.dirname', (['sys.path[0]'], {}), '(sys.path[0])\n', (355, 368), False, 'import sys, os\n'), ((2420, 2471), 'torch.diag_embed', 'torch.diag_embed', (['scaling_factors'], {'dim1': '(-2)', 'dim2': '(-1)'}), '(scaling_factors, dim1=-2, dim2=-1)\n', (2436, 2471), False, 'import torch\n'), ((3230, 3257), 'torch.stack', 'torch.stack', (['results'], {'dim': '(1)'}), '(results, dim=1)\n', (3241, 3257), False, 'import torch\n'), ((3567, 3595), 'torch.matmul', 'torch.matmul', (['results', 'Js_w0'], {}), '(results, Js_w0)\n', (3579, 3595), False, 'import torch\n'), ((3638, 3698), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['init_bone', '(3, 0, 0, 0, 0, 0, 0, 0)'], {}), '(init_bone, (3, 0, 0, 0, 0, 0, 0, 0))\n', (3661, 3698), False, 'import torch\n'), ((1856, 1905), 'torch.Tensor', 'torch.Tensor', (['[[1, 0, 0], [0, -1, 0], [0, 0, -1]]'], {}), '([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n', (1868, 1905), False, 'import torch\n'), ((2020, 2055), 'torch.matmul', 'torch.matmul', (['Rs[:, 0, :, :]', 'rot_x'], {}), '(Rs[:, 0, :, :], rot_x)\n', (2032, 2055), False, 'import torch\n'), ((2555, 2601), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['R', '(0, 0, 0, 1, 0, 0)'], {}), '(R, (0, 0, 0, 1, 0, 0))\n', (2578, 2601), False, 'import torch\n'), ((2681, 2711), 'torch.cat', 'torch.cat', (['[R_homo, t_homo]', '(2)'], {}), '([R_homo, t_homo], 2)\n', (2690, 2711), False, 'import torch\n'), ((2883, 2929), 'torch.inverse', 'torch.inverse', (['scale_factors_3x3[:, parent[i]]'], {}), '(scale_factors_3x3[:, parent[i]])\n', (2896, 2929), False, 'import torch\n'), ((3097, 3137), 'torch.matmul', 'torch.matmul', (['results[parent[i]]', 'A_here'], {}), '(results[parent[i]], A_here)\n', (3109, 3137), False, 'import torch\n'), ((4045, 4169), 'smbld_model.smal_model.smal_torch.SMAL.__init__', 'SMAL.__init__', (['self'], {'model_path': 'model_path', 'data_path': 'data_path', 'opts': 'opts', 'shape_family_id': 'shape_family_id', 'align': '(False)'}), '(self, model_path=model_path, data_path=data_path, opts=opts,\n shape_family_id=shape_family_id, align=False)\n', (4058, 4169), False, 'from smbld_model.smal_model.smal_torch import SMAL\n'), ((4516, 4553), 'vis.stack_as_batch', 'stack_as_batch', (['faces_single', 'n_batch'], {}), '(faces_single, n_batch)\n', (4530, 4553), False, 'from vis import stack_as_batch, try_mkdir\n'), ((7289, 7339), 'torch.cat', 'torch.cat', (['[multi_betas, multi_betas_scale]'], {'dim': '(0)'}), '([multi_betas, multi_betas_scale], dim=0)\n', (7298, 7339), False, 'import torch\n'), ((8443, 8491), 'torch.exp', 'torch.exp', (['(betas_logscale @ self.beta_scale_mask)'], {}), '(betas_logscale @ self.beta_scale_mask)\n', (8452, 8491), False, 'import torch\n'), ((10639, 10671), 'torch.stack', 'torch.stack', (['[Jx, Jy, Jz]'], {'dim': '(2)'}), '([Jx, Jy, Jz], dim=2)\n', (10650, 10671), False, 'import torch\n'), ((11616, 11661), 'torch.reshape', 'torch.reshape', (['weights_t', '[num_batch, -1, 35]'], {}), '(weights_t, [num_batch, -1, 35])\n', (11629, 11661), False, 'import torch\n'), ((12307, 12353), 'torch.matmul', 'torch.matmul', (['verts[:, :, 0]', 'self.J_regressor'], {}), '(verts[:, :, 0], self.J_regressor)\n', (12319, 12353), False, 'import torch\n'), ((12372, 12418), 'torch.matmul', 'torch.matmul', (['verts[:, :, 1]', 'self.J_regressor'], {}), '(verts[:, :, 1], self.J_regressor)\n', (12384, 12418), False, 'import torch\n'), ((12437, 12483), 'torch.matmul', 'torch.matmul', (['verts[:, :, 2]', 'self.J_regressor'], {}), '(verts[:, :, 2], self.J_regressor)\n', (12449, 12483), False, 'import torch\n'), ((12501, 12548), 'torch.stack', 'torch.stack', (['[joint_x, joint_y, joint_z]'], {'dim': '(2)'}), '([joint_x, joint_y, joint_z], dim=2)\n', (12512, 12548), False, 'import torch\n'), ((13337, 13355), 'vis.try_mkdir', 'try_mkdir', (['out_dir'], {}), '(out_dir)\n', (13346, 13355), False, 'from vis import stack_as_batch, try_mkdir\n'), ((13534, 13546), 'numpy.load', 'np.load', (['loc'], {}), '(loc)\n', (13541, 13546), True, 'import numpy as np\n'), ((1936, 1963), 'torch.repeat', 'torch.repeat', (['rot_x', '[N, 1]'], {}), '(rot_x, [N, 1])\n', (1948, 1963), False, 'import torch\n'), ((2154, 2187), 'torch.ones', 'torch.ones', (['N', 'parent.shape[0]', '(3)'], {}), '(N, parent.shape[0], 3)\n', (2164, 2187), False, 'import torch\n'), ((4663, 4727), 'torch.full', 'torch.full', (['(n_batch, 3)', '(0.0)'], {'device': 'device', 'requires_grad': '(True)'}), '((n_batch, 3), 0.0, device=device, requires_grad=True)\n', (4673, 4727), False, 'import torch\n'), ((4769, 4837), 'torch.full', 'torch.full', (['(n_batch, 34, 3)', '(0.0)'], {'device': 'device', 'requires_grad': '(True)'}), '((n_batch, 34, 3), 0.0, device=device, requires_grad=True)\n', (4779, 4837), False, 'import torch\n'), ((4875, 4939), 'torch.full', 'torch.full', (['(n_batch, 3)', '(0.0)'], {'device': 'device', 'requires_grad': '(True)'}), '((n_batch, 3), 0.0, device=device, requires_grad=True)\n', (4885, 4939), False, 'import torch\n'), ((4994, 5027), 'torch.ones', 'torch.ones', (['self.parents.shape[0]'], {}), '(self.parents.shape[0])\n', (5004, 5027), False, 'import torch\n'), ((6537, 6557), 'pickle._Unpickler', 'pickle._Unpickler', (['f'], {}), '(f)\n', (6554, 6557), False, 'import pickle\n'), ((7579, 7653), 'torch.zeros', 'torch.zeros', (['(n_batch, self.n_verts, 3)'], {'device': 'device', 'requires_grad': '(True)'}), '((n_batch, self.n_verts, 3), device=device, requires_grad=True)\n', (7590, 7653), False, 'import torch\n'), ((13373, 13405), 'os.path.join', 'os.path.join', (['out_dir', 'out_title'], {}), '(out_dir, out_title)\n', (13385, 13405), False, 'import sys, os\n'), ((5464, 5485), 'torch.zeros', 'torch.zeros', (['(35)', '(3)', '(7)'], {}), '(35, 3, 7)\n', (5475, 5485), False, 'import torch\n'), ((11717, 11754), 'torch.reshape', 'torch.reshape', (['A', '[num_batch, 35, 16]'], {}), '(A, [num_batch, 35, 16])\n', (11730, 11754), False, 'import torch\n'), ((3508, 3534), 'torch.zeros', 'torch.zeros', (['[N, 35, 1, 1]'], {}), '([N, 35, 1, 1])\n', (3519, 3534), False, 'import torch\n'), ((12089, 12116), 'torch.zeros', 'torch.zeros', (['(num_batch, 3)'], {}), '((num_batch, 3))\n', (12100, 12116), False, 'import torch\n'), ((13643, 13672), 'torch.from_numpy', 'torch.from_numpy', (['data[param]'], {}), '(data[param])\n', (13659, 13672), False, 'import torch\n'), ((2628, 2649), 'torch.ones', 'torch.ones', (['[N, 1, 1]'], {}), '([N, 1, 1])\n', (2638, 2649), False, 'import torch\n'), ((6763, 6778), 'torch.zeros', 'torch.zeros', (['(41)'], {}), '(41)\n', (6774, 6778), False, 'import torch\n'), ((7088, 7119), 'torch.FloatTensor', 'torch.FloatTensor', (['loaded_betas'], {}), '(loaded_betas)\n', (7105, 7119), False, 'import torch\n'), ((7215, 7246), 'torch.zeros', 'torch.zeros', (['self.num_betascale'], {}), '(self.num_betascale)\n', (7226, 7246), False, 'import torch\n'), ((10966, 10978), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (10975, 10978), False, 'import torch\n'), ((11848, 11892), 'torch.ones', 'torch.ones', (['[num_batch, v_posed.shape[1], 1]'], {}), '([num_batch, v_posed.shape[1], 1])\n', (11858, 11892), False, 'import torch\n'), ((10775, 10819), 'torch.reshape', 'torch.reshape', (['theta', '[self.n_batch * 35, 3]'], {}), '(theta, [self.n_batch * 35, 3])\n', (10788, 10819), False, 'import torch\n')] |
import logging
import os
import sys
import numpy as np
from hexrd import config
from hexrd import constants as cnst
from hexrd import instrument
from hexrd.fitgrains import fit_grains
from hexrd.transforms import xfcapi
descr = 'Extracts G vectors, grain position and strain'
example = """
examples:
hexrd fit-grains configuration.yml
"""
def configure_parser(sub_parsers):
p = sub_parsers.add_parser('fit-grains', description=descr, help=descr)
p.add_argument(
'yml', type=str,
help='YAML configuration file'
)
p.add_argument(
'-g', '--grains', type=str, default=None,
help="comma-separated list of IDs to refine, defaults to all"
)
p.add_argument(
'-q', '--quiet', action='store_true',
help="don't report progress in terminal"
)
p.add_argument(
'-c', '--clean', action='store_true',
help='overwrites existing analysis, uses initial orientations'
)
p.add_argument(
'-f', '--force', action='store_true',
help='overwrites existing analysis'
)
p.add_argument(
'-p', '--profile', action='store_true',
help='runs the analysis with cProfile enabled',
)
p.set_defaults(func=execute)
def write_results(fit_results, cfg, grains_filename='grains.out'):
instr = cfg.instrument.hedm
# make output directories
if not os.path.exists(cfg.analysis_dir):
os.mkdir(cfg.analysis_dir)
for det_key in instr.detectors:
os.mkdir(os.path.join(cfg.analysis_dir, det_key))
else:
# make sure panel dirs exist under analysis dir
for det_key in instr.detectors:
if not os.path.exists(os.path.join(cfg.analysis_dir, det_key)):
os.mkdir(os.path.join(cfg.analysis_dir, det_key))
gw = instrument.GrainDataWriter(
os.path.join(cfg.analysis_dir, grains_filename)
)
for fit_result in fit_results:
gw.dump_grain(*fit_result)
gw.close()
def execute(args, parser):
# load the configuration settings
cfgs = config.open(args.yml)
# configure logging to the console:
log_level = logging.DEBUG if args.debug else logging.INFO
if args.quiet:
log_level = logging.ERROR
logger = logging.getLogger('hexrd')
logger.setLevel(log_level)
ch = logging.StreamHandler()
ch.setLevel(logging.CRITICAL if args.quiet else log_level)
cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S')
ch.setFormatter(cf)
logger.addHandler(ch)
# if find-orientations has not already been run, do so:
quats_f = os.path.join(
cfgs[0].working_dir,
'accepted_orientations_%s.dat' % cfgs[0].analysis_id
)
if os.path.exists(quats_f):
try:
qbar = np.loadtxt(quats_f).T
except(IOError):
raise(RuntimeError,
"error loading indexing results '%s'" % quats_f)
else:
logger.info("Missing %s, running find-orientations", quats_f)
logger.removeHandler(ch)
from hexrd.findorientations import find_orientations
results = find_orientations(cfgs[0])
qbar = results['qbar']
logger.addHandler(ch)
logger.info('=== begin fit-grains ===')
clobber = args.force or args.clean
for cfg in cfgs:
# prepare the analysis directory
if os.path.exists(cfg.analysis_dir) and not clobber:
logger.error(
'Analysis "%s" at %s already exists.'
' Change yml file or specify "force"',
cfg.analysis_name, cfg.analysis_dir
)
sys.exit()
# make output directories
instr = cfg.instrument.hedm
if not os.path.exists(cfg.analysis_dir):
os.makedirs(cfg.analysis_dir)
for det_key in instr.detectors:
os.mkdir(os.path.join(cfg.analysis_dir, det_key))
else:
# make sure panel dirs exist under analysis dir
for det_key in instr.detectors:
if not os.path.exists(os.path.join(cfg.analysis_dir, det_key)):
os.mkdir(os.path.join(cfg.analysis_dir, det_key))
logger.info('*** begin analysis "%s" ***', cfg.analysis_name)
# configure logging to file for this particular analysis
logfile = os.path.join(
cfg.working_dir,
cfg.analysis_name,
'fit-grains.log'
)
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(log_level)
ff = logging.Formatter(
'%(asctime)s - %(name)s - %(message)s',
'%m-%d %H:%M:%S'
)
fh.setFormatter(ff)
logger.info("logging to %s", logfile)
logger.addHandler(fh)
if args.profile:
import cProfile as profile
import pstats
from io import StringIO
pr = profile.Profile()
pr.enable()
grains_filename = os.path.join(
cfg.analysis_dir, 'grains.out'
)
# some conditions for arg handling
existing_analysis = os.path.exists(grains_filename)
new_with_estimate = not existing_analysis \
and cfg.fit_grains.estimate is not None
new_without_estimate = not existing_analysis \
and cfg.fit_grains.estimate is None
force_with_estimate = args.force \
and cfg.fit_grains.estimate is not None
force_without_estimate = args.force and cfg.fit_grains.estimate is None
# handle args
if args.clean or force_without_estimate or new_without_estimate:
# need accepted orientations from indexing in this case
if args.clean:
logger.info(
"'clean' specified; ignoring estimate and using default"
)
elif force_without_estimate:
logger.info(
"'force' option specified, but no initial estimate; "
+ "using default"
)
try:
gw = instrument.GrainDataWriter(grains_filename)
for i_g, q in enumerate(qbar.T):
phi = 2*np.arccos(q[0])
n = xfcapi.unitRowVector(q[1:])
grain_params = np.hstack(
[phi*n, cnst.zeros_3, cnst.identity_6x1]
)
gw.dump_grain(int(i_g), 1., 0., grain_params)
gw.close()
except(IOError):
raise(RuntimeError,
"indexing results '%s' not found!"
% 'accepted_orientations_' + cfg.analysis_id + '.dat')
elif force_with_estimate or new_with_estimate:
grains_filename = cfg.fit_grains.estimate
elif existing_analysis and not (clean or force):
raise(RuntimeError,
"fit results '%s' exist, " % grains_filename
+ "but --clean or --force options not specified")
grains_table = np.loadtxt(grains_filename, ndmin=2)
# process the data
gid_list = None
if args.grains is not None:
gid_list = [int(i) for i in args.grains.split(',')]
pass
cfg.fit_grains.qbar = qbar
fit_results = fit_grains(
cfg,
grains_table,
show_progress=not args.quiet,
ids_to_refine=gid_list,
)
if args.profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats(50)
logger.info('%s', s.getvalue())
# stop logging for this particular analysis
fh.flush()
fh.close()
logger.removeHandler(fh)
logger.info('*** end analysis "%s" ***', cfg.analysis_name)
write_results(fit_results, cfg, grains_filename)
logger.info('=== end fit-grains ===')
# stop logging to the console
ch.flush()
ch.close()
logger.removeHandler(ch)
| [
"logging.getLogger",
"logging.StreamHandler",
"numpy.arccos",
"numpy.hstack",
"hexrd.transforms.xfcapi.unitRowVector",
"sys.exit",
"hexrd.findorientations.find_orientations",
"os.path.exists",
"logging.FileHandler",
"os.mkdir",
"hexrd.instrument.GrainDataWriter",
"cProfile.Profile",
"hexrd.f... | [((2088, 2109), 'hexrd.config.open', 'config.open', (['args.yml'], {}), '(args.yml)\n', (2099, 2109), False, 'from hexrd import config\n'), ((2279, 2305), 'logging.getLogger', 'logging.getLogger', (['"""hexrd"""'], {}), "('hexrd')\n", (2296, 2305), False, 'import logging\n'), ((2346, 2369), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2367, 2369), False, 'import logging\n'), ((2442, 2509), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(message)s"""', '"""%y-%m-%d %H:%M:%S"""'], {}), "('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S')\n", (2459, 2509), False, 'import logging\n'), ((2635, 2727), 'os.path.join', 'os.path.join', (['cfgs[0].working_dir', "('accepted_orientations_%s.dat' % cfgs[0].analysis_id)"], {}), "(cfgs[0].working_dir, 'accepted_orientations_%s.dat' % cfgs[0].\n analysis_id)\n", (2647, 2727), False, 'import os\n'), ((2756, 2779), 'os.path.exists', 'os.path.exists', (['quats_f'], {}), '(quats_f)\n', (2770, 2779), False, 'import os\n'), ((1406, 1438), 'os.path.exists', 'os.path.exists', (['cfg.analysis_dir'], {}), '(cfg.analysis_dir)\n', (1420, 1438), False, 'import os\n'), ((1448, 1474), 'os.mkdir', 'os.mkdir', (['cfg.analysis_dir'], {}), '(cfg.analysis_dir)\n', (1456, 1474), False, 'import os\n'), ((1871, 1918), 'os.path.join', 'os.path.join', (['cfg.analysis_dir', 'grains_filename'], {}), '(cfg.analysis_dir, grains_filename)\n', (1883, 1918), False, 'import os\n'), ((3151, 3177), 'hexrd.findorientations.find_orientations', 'find_orientations', (['cfgs[0]'], {}), '(cfgs[0])\n', (3168, 3177), False, 'from hexrd.findorientations import find_orientations\n'), ((4370, 4436), 'os.path.join', 'os.path.join', (['cfg.working_dir', 'cfg.analysis_name', '"""fit-grains.log"""'], {}), "(cfg.working_dir, cfg.analysis_name, 'fit-grains.log')\n", (4382, 4436), False, 'import os\n'), ((4500, 4538), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {'mode': '"""w"""'}), "(logfile, mode='w')\n", (4519, 4538), False, 'import logging\n'), ((4583, 4658), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(message)s"""', '"""%m-%d %H:%M:%S"""'], {}), "('%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S')\n", (4600, 4658), False, 'import logging\n'), ((5027, 5071), 'os.path.join', 'os.path.join', (['cfg.analysis_dir', '"""grains.out"""'], {}), "(cfg.analysis_dir, 'grains.out')\n", (5039, 5071), False, 'import os\n'), ((5166, 5197), 'os.path.exists', 'os.path.exists', (['grains_filename'], {}), '(grains_filename)\n', (5180, 5197), False, 'import os\n'), ((7100, 7136), 'numpy.loadtxt', 'np.loadtxt', (['grains_filename'], {'ndmin': '(2)'}), '(grains_filename, ndmin=2)\n', (7110, 7136), True, 'import numpy as np\n'), ((7364, 7452), 'hexrd.fitgrains.fit_grains', 'fit_grains', (['cfg', 'grains_table'], {'show_progress': '(not args.quiet)', 'ids_to_refine': 'gid_list'}), '(cfg, grains_table, show_progress=not args.quiet, ids_to_refine=\n gid_list)\n', (7374, 7452), False, 'from hexrd.fitgrains import fit_grains\n'), ((3397, 3429), 'os.path.exists', 'os.path.exists', (['cfg.analysis_dir'], {}), '(cfg.analysis_dir)\n', (3411, 3429), False, 'import os\n'), ((3664, 3674), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3672, 3674), False, 'import sys\n'), ((3761, 3793), 'os.path.exists', 'os.path.exists', (['cfg.analysis_dir'], {}), '(cfg.analysis_dir)\n', (3775, 3793), False, 'import os\n'), ((3807, 3836), 'os.makedirs', 'os.makedirs', (['cfg.analysis_dir'], {}), '(cfg.analysis_dir)\n', (3818, 3836), False, 'import os\n'), ((4958, 4975), 'cProfile.Profile', 'profile.Profile', ([], {}), '()\n', (4973, 4975), True, 'import cProfile as profile\n'), ((7578, 7597), 'io.StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (7595, 7597), False, 'from io import StringIO\n'), ((1536, 1575), 'os.path.join', 'os.path.join', (['cfg.analysis_dir', 'det_key'], {}), '(cfg.analysis_dir, det_key)\n', (1548, 1575), False, 'import os\n'), ((2813, 2832), 'numpy.loadtxt', 'np.loadtxt', (['quats_f'], {}), '(quats_f)\n', (2823, 2832), True, 'import numpy as np\n'), ((6133, 6176), 'hexrd.instrument.GrainDataWriter', 'instrument.GrainDataWriter', (['grains_filename'], {}), '(grains_filename)\n', (6159, 6176), False, 'from hexrd import instrument\n'), ((1717, 1756), 'os.path.join', 'os.path.join', (['cfg.analysis_dir', 'det_key'], {}), '(cfg.analysis_dir, det_key)\n', (1729, 1756), False, 'import os\n'), ((1784, 1823), 'os.path.join', 'os.path.join', (['cfg.analysis_dir', 'det_key'], {}), '(cfg.analysis_dir, det_key)\n', (1796, 1823), False, 'import os\n'), ((3906, 3945), 'os.path.join', 'os.path.join', (['cfg.analysis_dir', 'det_key'], {}), '(cfg.analysis_dir, det_key)\n', (3918, 3945), False, 'import os\n'), ((6294, 6321), 'hexrd.transforms.xfcapi.unitRowVector', 'xfcapi.unitRowVector', (['q[1:]'], {}), '(q[1:])\n', (6314, 6321), False, 'from hexrd.transforms import xfcapi\n'), ((6357, 6410), 'numpy.hstack', 'np.hstack', (['[phi * n, cnst.zeros_3, cnst.identity_6x1]'], {}), '([phi * n, cnst.zeros_3, cnst.identity_6x1])\n', (6366, 6410), True, 'import numpy as np\n'), ((7615, 7641), 'pstats.Stats', 'pstats.Stats', (['pr'], {'stream': 's'}), '(pr, stream=s)\n', (7627, 7641), False, 'import pstats\n'), ((4103, 4142), 'os.path.join', 'os.path.join', (['cfg.analysis_dir', 'det_key'], {}), '(cfg.analysis_dir, det_key)\n', (4115, 4142), False, 'import os\n'), ((4174, 4213), 'os.path.join', 'os.path.join', (['cfg.analysis_dir', 'det_key'], {}), '(cfg.analysis_dir, det_key)\n', (4186, 4213), False, 'import os\n'), ((6254, 6269), 'numpy.arccos', 'np.arccos', (['q[0]'], {}), '(q[0])\n', (6263, 6269), True, 'import numpy as np\n')] |
import cv2
import numpy
import pyopencl
from proc_tex.OpenCLCellNoise2D import OpenCLCellNoise2D
import proc_tex.texture_transforms
from proc_tex.texture_transforms import tex_scale_to_region, tex_to_dtype
if __name__ == '__main__':
cl_context = pyopencl.create_some_context()
texture = OpenCLCellNoise2D(cl_context, 4, 1)
texture = tex_to_dtype(tex_scale_to_region(texture), numpy.uint16,
scale=65535)
eval_pts = texture.gen_eval_pts((1024, 1024), numpy.array([[0,1], [0,1]]))
image = texture.to_image(None, None, eval_pts=eval_pts)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
texture.to_video(None, None, 120, 30, './example.webm', pix_fmt='gray16le',
codec_params=['-lossless', '0'], eval_pts=eval_pts)
| [
"proc_tex.OpenCLCellNoise2D.OpenCLCellNoise2D",
"pyopencl.create_some_context",
"proc_tex.texture_transforms.tex_scale_to_region",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey"
] | [((250, 280), 'pyopencl.create_some_context', 'pyopencl.create_some_context', ([], {}), '()\n', (278, 280), False, 'import pyopencl\n'), ((293, 328), 'proc_tex.OpenCLCellNoise2D.OpenCLCellNoise2D', 'OpenCLCellNoise2D', (['cl_context', '(4)', '(1)'], {}), '(cl_context, 4, 1)\n', (310, 328), False, 'from proc_tex.OpenCLCellNoise2D import OpenCLCellNoise2D\n'), ((552, 578), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (562, 578), False, 'import cv2\n'), ((581, 595), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (592, 595), False, 'import cv2\n'), ((601, 624), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (622, 624), False, 'import cv2\n'), ((354, 382), 'proc_tex.texture_transforms.tex_scale_to_region', 'tex_scale_to_region', (['texture'], {}), '(texture)\n', (373, 382), False, 'from proc_tex.texture_transforms import tex_scale_to_region, tex_to_dtype\n'), ((463, 492), 'numpy.array', 'numpy.array', (['[[0, 1], [0, 1]]'], {}), '([[0, 1], [0, 1]])\n', (474, 492), False, 'import numpy\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.misc import imresize
from pycocotools import mask as COCOmask
def segmToMask(segm, h, w):
"""
segm : coco annotated segmentation
output: mask ndarray uint8 (im_height, im_width), range {0,1}
"""
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = COCOmask.frPyObjects(segm, h, w)
rle = COCOmask.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = COCOmask.frPyObjects(segm, h, w)
else:
raise NotImplementedError
m = COCOmask.decode(rle) # binary mask (numpy 2D array)
return m
def clip_np_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
boxes: ndarray float32 (n, 4) [xyxy]
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def recover_masks(masks, rois, ih, iw, interp='bilinear'):
"""Decode 14x14 masks into final masks
Params
- masks : of shape (N, 14, 14) float32, ranging [0, 1]
- rois : of shape (N, 4) [x1, y1, x2, y2] float32. Note there is no batch_ids in rois!
- ih : image height
- iw : image width
- interp: bilinear or nearest
Returns
- recovered_masks : of shape (N, ih, iw) uint8, range [0, 255]
"""
assert rois.shape[0] == masks.shape[0], '%s rois vs %d masks'%(rois.shape[0], masks.shape[0])
num_rois = rois.shape[0]
recovered_masks = np.zeros((num_rois, ih, iw), dtype=np.uint8) # (num_rois, ih, iw)
rois = clip_np_boxes(rois, (ih, iw))
for i in np.arange(num_rois):
# read mask of (14, 14) float32
mask = masks[i, :, :]
# range [0, 255] float32
mask *= 255.
# resize will convert it to uint8 [0, 255]
h, w = int(rois[i, 3] - rois[i, 1] + 1), int(rois[i, 2] - rois[i, 0] + 1)
x, y = int(rois[i, 0]), int(rois[i, 1])
mask = imresize(mask, (h, w), interp=interp) # (roi_h, roi_w) uint8
# paint
recovered_masks[i, y:y+h, x:x+w] = mask
return recovered_masks
def recover_cls_masks(masks, rois, ih, iw, interp='bilinear'):
"""Decode 14x14 masks into final masks
Arguments
- masks : (N, C, 14, 14) float32, ranging [0,1]
- rois : (N, 4) [xyxy] float32
- ih : image height
- iw : image width
- interp: bilinear or nearest
Returns
- recovered_masks : (N, ih, iw) uint8, range [0, 255]
"""
assert rois.shape[0] == masks.shape[0], '%s rois vs %d masks'%(rois.shape[0], masks.shape[0])
num_rois = rois.shape[0]
num_classes = masks.shape[1]
recovered_masks = np.zeros((num_rois, num_classes, ih, iw), dtype=np.uint8) # (num_rois, ih, iw)
rois = clip_np_boxes(rois, (ih, iw))
for i in np.arange(num_rois):
# read mask of (C, 14, 14) float32
mask = masks[i, :, :, :]
# range [0, 255] float32
mask *= 255.
# resize
h, w = int(rois[i, 3] - rois[i, 1] + 1), int(rois[i, 2] - rois[i, 0] + 1)
x, y = int(rois[i, 0]), int(rois[i, 1])
for c in range(num_classes):
m = mask[c] # (14, 14)
m = imresize(m, (h, w), interp=interp) # (roi_h, roi_w) uint8
recovered_masks[i, c, y:y+h, x:x+w] = m
return recovered_masks
| [
"numpy.minimum",
"pycocotools.mask.decode",
"pycocotools.mask.frPyObjects",
"numpy.zeros",
"pycocotools.mask.merge",
"scipy.misc.imresize",
"numpy.arange"
] | [((706, 726), 'pycocotools.mask.decode', 'COCOmask.decode', (['rle'], {}), '(rle)\n', (721, 726), True, 'from pycocotools import mask as COCOmask\n'), ((1849, 1893), 'numpy.zeros', 'np.zeros', (['(num_rois, ih, iw)'], {'dtype': 'np.uint8'}), '((num_rois, ih, iw), dtype=np.uint8)\n', (1857, 1893), True, 'import numpy as np\n'), ((1965, 1984), 'numpy.arange', 'np.arange', (['num_rois'], {}), '(num_rois)\n', (1974, 1984), True, 'import numpy as np\n'), ((2947, 3004), 'numpy.zeros', 'np.zeros', (['(num_rois, num_classes, ih, iw)'], {'dtype': 'np.uint8'}), '((num_rois, num_classes, ih, iw), dtype=np.uint8)\n', (2955, 3004), True, 'import numpy as np\n'), ((3076, 3095), 'numpy.arange', 'np.arange', (['num_rois'], {}), '(num_rois)\n', (3085, 3095), True, 'import numpy as np\n'), ((494, 526), 'pycocotools.mask.frPyObjects', 'COCOmask.frPyObjects', (['segm', 'h', 'w'], {}), '(segm, h, w)\n', (514, 526), True, 'from pycocotools import mask as COCOmask\n'), ((537, 557), 'pycocotools.mask.merge', 'COCOmask.merge', (['rles'], {}), '(rles)\n', (551, 557), True, 'from pycocotools import mask as COCOmask\n'), ((934, 977), 'numpy.minimum', 'np.minimum', (['boxes[:, 0::4]', '(im_shape[1] - 1)'], {}), '(boxes[:, 0::4], im_shape[1] - 1)\n', (944, 977), True, 'import numpy as np\n'), ((1024, 1067), 'numpy.minimum', 'np.minimum', (['boxes[:, 1::4]', '(im_shape[0] - 1)'], {}), '(boxes[:, 1::4], im_shape[0] - 1)\n', (1034, 1067), True, 'import numpy as np\n'), ((1123, 1166), 'numpy.minimum', 'np.minimum', (['boxes[:, 2::4]', '(im_shape[1] - 1)'], {}), '(boxes[:, 2::4], im_shape[1] - 1)\n', (1133, 1166), True, 'import numpy as np\n'), ((1222, 1265), 'numpy.minimum', 'np.minimum', (['boxes[:, 3::4]', '(im_shape[0] - 1)'], {}), '(boxes[:, 3::4], im_shape[0] - 1)\n', (1232, 1265), True, 'import numpy as np\n'), ((2274, 2311), 'scipy.misc.imresize', 'imresize', (['mask', '(h, w)'], {'interp': 'interp'}), '(mask, (h, w), interp=interp)\n', (2282, 2311), False, 'from scipy.misc import imresize\n'), ((628, 660), 'pycocotools.mask.frPyObjects', 'COCOmask.frPyObjects', (['segm', 'h', 'w'], {}), '(segm, h, w)\n', (648, 660), True, 'from pycocotools import mask as COCOmask\n'), ((3418, 3452), 'scipy.misc.imresize', 'imresize', (['m', '(h, w)'], {'interp': 'interp'}), '(m, (h, w), interp=interp)\n', (3426, 3452), False, 'from scipy.misc import imresize\n')] |
import sys,os
import argparse
import numpy as np
import json
import heapq
import random
import numbers
# utils
def flatten(l):
""" Merges a list of lists into a single list. """
return [item for sublist in l for item in sublist]
class AveragePrecisionCalculator(object):
"""Calculate the average precision and average precision at n."""
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
This class is used to calculate the average precision for a single label.
Args:
top_n: A positive Integer specifying the average precision at n, or
None to use all provided data points.
Raises:
ValueError: An error occurred when the top_n is not a positive integer.
"""
if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
raise ValueError("top_n must be a positive integer or None.")
self._top_n = top_n # average precision at n
self._total_positives = 0 # total number of positives have seen
self._heap = [] # max heap of (prediction, actual)
@property
def heap_size(self):
"""Gets the heap size maintained in the class."""
return len(self._heap)
@property
def num_accumulated_positives(self):
"""Gets the number of positive samples that have been accumulated."""
return self._total_positives
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
then it's possible some true positives were missed in them. In that case,
you can provide 'num_positives' in order to accurately track recall.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if not num_positives is None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError("'num_positives' was provided but it wan't a nonzero number.")
if not num_positives is None:
self._total_positives += num_positives
else:
self._total_positives += np.size(np.where(actuals > 0))
topk = self._top_n
heap = self._heap
for i in range(np.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def clear(self):
"""Clear the accumulated predictions."""
self._heap = []
self._total_positives = 0
def peek_ap_at_n(self):
"""Peek the non-interpolated average precision at n.
Returns:
The non-interpolated average precision at n (default 0).
If n is larger than the length of the ranked list,
the average precision will be returned.
"""
if self.heap_size <= 0:
return 0
predlists = np.array(list(zip(*self._heap)))
ap = self.ap_at_n(predlists[0],
predlists[1],
n=self._top_n,
total_num_positives=self._total_positives)
return ap
@staticmethod
def ap(predictions, actuals):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
return AveragePrecisionCalculator.ap_at_n(predictions,
actuals,
n=None)
@staticmethod
def ap_at_n(predictions, actuals, n=20, total_num_positives=None):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
n: the top n items to be considered in ap@n.
total_num_positives : (optionally) you can specify the number of total
positive
in the list. If specified, it will be used in calculation.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when
1) the format of the input is not the numpy 1-D array;
2) the shape of predictions and actuals does not match;
3) the input n is not a positive integer.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if n is not None:
if not isinstance(n, int) or n <= 0:
raise ValueError("n must be 'None' or a positive integer."
" It was '%s'." % n)
ap = 0.0
predictions = np.array(predictions)
actuals = np.array(actuals)
# add a shuffler to avoid overestimating the ap
predictions, actuals = AveragePrecisionCalculator._shuffle(predictions,
actuals)
sortidx = sorted(
range(len(predictions)),
key=lambda k: predictions[k],
reverse=True)
if total_num_positives is None:
numpos = np.size(np.where(actuals > 0))
else:
numpos = total_num_positives
if numpos == 0:
return 0
if n is not None:
numpos = min(numpos, n)
delta_recall = 1.0 / numpos
poscount = 0.0
# calculate the ap
r = len(sortidx)
if n is not None:
r = min(r, n)
for i in range(r):
if actuals[sortidx[i]] > 0:
poscount += 1
ap += poscount / (i + 1) * delta_recall
return ap
@staticmethod
def _shuffle(predictions, actuals):
random.seed(0)
suffidx = random.sample(range(len(predictions)), len(predictions))
predictions = predictions[suffidx]
actuals = actuals[suffidx]
return predictions, actuals
@staticmethod
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = np.max(predictions) - np.min(predictions)
ret = (predictions - np.min(predictions)) / np.max(denominator,
epsilon)
return ret
def calculate_gap(predictions, actuals, top_k=6):
gap_calculator = AveragePrecisionCalculator()
sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, actuals, top_k)
gap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))
return gap_calculator.peek_ap_at_n()
def top_k_by_class(predictions, labels, k=20):
if k <= 0:
raise ValueError("k must be a positive integer.")
k = min(k, predictions.shape[1])
num_classes = predictions.shape[1]
prediction_triplets= []
for video_index in range(predictions.shape[0]):
prediction_triplets.extend(top_k_triplets(predictions[video_index],labels[video_index], k))
out_predictions = [[] for v in range(num_classes)]
out_labels = [[] for v in range(num_classes)]
for triplet in prediction_triplets:
out_predictions[triplet[0]].append(triplet[1])
out_labels[triplet[0]].append(triplet[2])
out_true_positives = [np.sum(labels[:,i]) for i in range(num_classes)]
return out_predictions, out_labels, out_true_positives
def top_k_triplets(predictions, labels, k=20):
"""Get the top_k for a 1-d numpy array. Returns a sparse list of tuples in
(prediction, class) format"""
m = len(predictions)
k = min(k, m)
indices = np.argpartition(predictions, -k)[-k:]
return [(index, predictions[index], labels[index]) for index in indices]
def get_tag_id_dict(tag_id_file):
tag_id_dict={}
with open(tag_id_file, 'r') as lnf:
for line in lnf:
tag, idx = line.strip().split('\t')
tag_id_dict[tag] = int(idx)
return tag_id_dict
def convert_to_hot(tag_list, scores, tag_dict):
hot_list = np.zeros(len(tag_dict))
for i in range(len(tag_list)):
hot_list[int(tag_dict[tag_list[i]])] = float(scores[i])
return hot_list
def parse_gt_json(gt_json, tag_dict):
gt_dict = {}
with open(gt_json, "r", encoding='utf-8') as f:
gts = json.load(f)
for key in gts:
x = []
for ann in gts[key]["annotations"]:
x.extend(ann['labels'])
x = list(set(x))
gt_dict[key] = convert_to_hot(x, np.ones(len(x)), tag_dict)
return gt_dict
def parse_input_json(input_json, tag_dict):
pred_dict = {}
videos_list = []
with open(input_json, "r", encoding='utf-8') as f:
pred_result = json.load(f)
for video in pred_result:
videos_list.append(video)
pred_dict[video] = convert_to_hot(pred_result[video]["result"][0]["labels"],
pred_result[video]["result"][0]["scores"],tag_dict)
return pred_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--pred_json', type=str, default="test100_pred.json")
parser.add_argument('--tag_id_file', type=str, default="tag-id-tagging.txt")
parser.add_argument('--gt_json', type=str, default="test100.json")
parser.add_argument('--top_k', type=int, default=20)
args = parser.parse_args()
assert os.path.exists(args.tag_id_file), "dict file {} not found".format(args.tag_id_file)
tag_dict = get_tag_id_dict(args.tag_id_file)
pred_dict = parse_input_json(args.pred_json, tag_dict)
gt_dict = parse_gt_json(args.gt_json, tag_dict)
assert(pred_dict.keys() == gt_dict.keys())
preds, labels = [], []
for k in pred_dict:
preds.append(pred_dict[k])
labels.append(gt_dict[k])
preds = np.stack(preds)
labels = np.stack(labels)
gap = calculate_gap(preds, labels, top_k = args.top_k)
print("The GAP result is {:.3f}".format(gap))
| [
"os.path.exists",
"argparse.ArgumentParser",
"numpy.argpartition",
"numpy.where",
"numpy.size",
"random.seed",
"numpy.max",
"numpy.array",
"numpy.stack",
"numpy.sum",
"heapq.heappop",
"numpy.min",
"json.load",
"heapq.heappush"
] | [((10535, 10560), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10558, 10560), False, 'import argparse\n'), ((10896, 10928), 'os.path.exists', 'os.path.exists', (['args.tag_id_file'], {}), '(args.tag_id_file)\n', (10910, 10928), False, 'import sys, os\n'), ((11330, 11345), 'numpy.stack', 'np.stack', (['preds'], {}), '(preds)\n', (11338, 11345), True, 'import numpy as np\n'), ((11359, 11375), 'numpy.stack', 'np.stack', (['labels'], {}), '(labels)\n', (11367, 11375), True, 'import numpy as np\n'), ((5947, 5968), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (5955, 5968), True, 'import numpy as np\n'), ((5983, 6000), 'numpy.array', 'np.array', (['actuals'], {}), '(actuals)\n', (5991, 6000), True, 'import numpy as np\n'), ((6872, 6886), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (6883, 6886), False, 'import random\n'), ((8821, 8841), 'numpy.sum', 'np.sum', (['labels[:, i]'], {}), '(labels[:, i])\n', (8827, 8841), True, 'import numpy as np\n'), ((9136, 9168), 'numpy.argpartition', 'np.argpartition', (['predictions', '(-k)'], {}), '(predictions, -k)\n', (9151, 9168), True, 'import numpy as np\n'), ((9810, 9822), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9819, 9822), False, 'import json\n'), ((10218, 10230), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10227, 10230), False, 'import json\n'), ((2809, 2829), 'numpy.size', 'np.size', (['predictions'], {}), '(predictions)\n', (2816, 2829), True, 'import numpy as np\n'), ((7670, 7689), 'numpy.max', 'np.max', (['predictions'], {}), '(predictions)\n', (7676, 7689), True, 'import numpy as np\n'), ((7692, 7711), 'numpy.min', 'np.min', (['predictions'], {}), '(predictions)\n', (7698, 7711), True, 'import numpy as np\n'), ((7760, 7788), 'numpy.max', 'np.max', (['denominator', 'epsilon'], {}), '(denominator, epsilon)\n', (7766, 7788), True, 'import numpy as np\n'), ((2721, 2742), 'numpy.where', 'np.where', (['(actuals > 0)'], {}), '(actuals > 0)\n', (2729, 2742), True, 'import numpy as np\n'), ((2883, 2933), 'heapq.heappush', 'heapq.heappush', (['heap', '(predictions[i], actuals[i])'], {}), '(heap, (predictions[i], actuals[i]))\n', (2897, 2933), False, 'import heapq\n'), ((6377, 6398), 'numpy.where', 'np.where', (['(actuals > 0)'], {}), '(actuals > 0)\n', (6385, 6398), True, 'import numpy as np\n'), ((7737, 7756), 'numpy.min', 'np.min', (['predictions'], {}), '(predictions)\n', (7743, 7756), True, 'import numpy as np\n'), ((3023, 3042), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (3036, 3042), False, 'import heapq\n'), ((3053, 3103), 'heapq.heappush', 'heapq.heappush', (['heap', '(predictions[i], actuals[i])'], {}), '(heap, (predictions[i], actuals[i]))\n', (3067, 3103), False, 'import heapq\n')] |
import satlas.profiles
import numpy as np
def test_gaussian():
mu = 0
fwhm = 1
sigma = fwhm / (2*np.sqrt(2*np.log(2)))
amp = 1
prof = satlas.profiles.Gaussian(mu=mu, fwhm=fwhm, amp=amp, ampIsArea=False)
return np.isclose(prof(mu), amp)
def test_lorentzian():
mu = 0
fwhm = 1
gamma = fwhm / 2
amp = 1
prof = satlas.profiles.Lorentzian(mu=mu, fwhm=fwhm, amp=amp, ampIsArea=False)
return np.isclose(prof(mu), amp)
def test_voigt():
mu = 0
fwhm = 1
gamma = fwhm / 2
amp = 1
prof = satlas.profiles.Voigt(mu=mu, fwhm=fwhm, amp=amp, ampIsArea=False)
return np.isclose(prof(mu), amp)
print(test_gaussian())
print(test_lorentzian())
print(test_voigt()) | [
"numpy.log"
] | [((120, 129), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (126, 129), True, 'import numpy as np\n')] |
"""
File: show_results.py
Author: <NAME>
TFG
"""
import argparse
import os
import keras
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
from sklearn import metrics
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import roc_curve, auc
from vis.utils import utils
from vis.visualization import visualize_saliency
from show_results_binary import get_class
def plot_roc_curve(y_score, y, fname):
"""
Plots ROC curve
Parameters
----------
y_score: Predicted class
y: True class
fname: File name where the ROC curves will be stored
Returns
-------
"""
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y, y_score)
auc_keras = auc(fpr_keras, tpr_keras)
plt.figure(1)
plt.plot([1.02, -0.02], [-0.02, 1.02], 'k--', lw=2)
plt.xlim([1.02, -0.02])
plt.ylim([-0.02, 1.02])
plt.plot(1 - fpr_keras, tpr_keras, label='AUC = {0:0.2f}'.format(auc_keras))
plt.xlabel('Specificity')
plt.ylabel('Sensitivity')
plt.title('ROC curve')
plt.legend(loc='best')
plt.savefig(fname)
def plot_saliency_map(model, x, fname):
"""
Plots the model's average saliency map on the test set
Parameters
----------
model: Deep-learning binary model
x: Test images
fname: File name to store the saliency map
Returns
-------
"""
# Find the index of the to be visualized layer above
layer_index = utils.find_layer_idx(model, 'dense_3')
# Swap softmax with linear to get better results
model.layers[layer_index].activation = keras.activations.linear
model = utils.apply_modifications(model)
# Calculate saliency_map and visualize it
saliency = np.zeros((512, 512))
m = 100
for i in range(m): # Get input
input_image = x[i]
print(i)
saliency += visualize_saliency(model, layer_index, filter_indices=0, seed_input=input_image)
saliency /= m
fig = plt.figure()
cax = plt.imshow(((saliency - saliency.min()) / (saliency.max() - saliency.min()) * 255).astype(np.uint8),
cmap='jet')
cbar = fig.colorbar(cax, ticks=[0, 110, 220])
cbar.ax.set_yticklabels(['Low', 'Medium', 'High']) # horizontal colorbar
plt.savefig(fname)
def plot_tsne(model, x, y, fname):
"""
Plots t-SNE graphic on the train set
Parameters
----------
model: deep-learning binary model
x: train images
y: train labels
fname: file name where the t-SNE plot will be saved
Returns
-------
"""
# First apply PCA to reduce to 30 dims
pca = PCA(n_components=30)
# Then TSNE to reduce to 2 dims with 1000 iterations and learning rate of 200
tsne = TSNE(n_components=2, n_iter=1000, learning_rate=200)
# Get the output of layer 'dense_1' (1024 features) to reduce the dimension of that output
layer_name = 'dense_1'
intermediate_output = model.get_layer(layer_name).output
intermediate_model = keras.Model(inputs=model.input, outputs=intermediate_output)
intermediate_model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
loss='binary_crossentropy',
metrics=['acc'])
# Get the features generated when passing X data
features = intermediate_model.predict(x)
# Apply PCA and t-SNE
pca_result = pca.fit_transform(features)
tsne_result = tsne.fit_transform(pca_result)
# Prepare data to be visualized
tsne_data = dict()
tsne_data['tsne-2d-one'] = tsne_result[:, 0]
tsne_data['tsne-2d-two'] = tsne_result[:, 1]
tsne_data['y'] = get_class(y)
# Visualize the data reduced to 2 dimensions
plt.figure(figsize=(16, 10))
sns.scatterplot(
x="tsne-2d-one", y="tsne-2d-two",
hue="y",
palette=sns.hls_palette(2, l=.6, s=.7),
data=tsne_data,
legend="full",
alpha=0.3
)
plt.savefig(fname)
def plot_cm(y_test, y_pred):
"""
Show Specificity, sensitivity, precision, f1-score, TP, TN, FP, FN of each predicted class
Parameters
----------
y_test: True class
y_pred: Predicted class
Returns
-------
"""
y_prd = [y > 0.5 for y in y_pred]
y_prd = np.array(y_prd)
tn, fp, fn, tp = metrics.confusion_matrix(y_test, y_prd).ravel()
specificity = tn / (tn + fp)
sensitivity = metrics.recall_score(y_test, y_prd) # tp / (tp + fn)
precision = metrics.precision_score(y_test, y_prd)
f1_score = metrics.f1_score(y_test, y_prd)
print('############################################')
print('Sensitivity: ', sensitivity)
print('Specificity: ', specificity)
print('Precision: ', precision)
print('F1-Score: ', f1_score)
print('TP: ', tp)
print('TN: ', tn)
print('FP: ', fp)
print('FN: ', fn)
print()
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--directory", default=None, help="path to the directory where the images are stored")
ap.add_argument("-m", "--model", default=None, help="path to the file where the model is stored")
ap.add_argument("-o", "--output", default='ad_vs_mci_vs_cn2.png',
help="output filename where the images will be stored")
args = ap.parse_args()
base_dir = None
model_file = None
output = args.output
if args.directory is not None:
if not os.path.isdir(args.directory):
print("Directory \'%s\' does not exist" % args.directory)
return
base_dir = args.directory
else:
print("You must specify the directory where the images are stored (see help).")
return
if args.model is not None:
if not os.path.isfile(args.model):
print("File \'%s\' does not exist" % args.model)
return
model_file = args.model
else:
print("You must specify the file where the model is stored (see help).")
return
# Load the model architecture and its weights
model = load_model(model_file)
print(model.summary())
train_datagen = ImageDataGenerator(
rotation_range=8,
shear_range=np.pi / 16,
width_shift_range=0.10,
height_shift_range=0.10,
zoom_range=0.08,
horizontal_flip=False,
vertical_flip=False,
)
test_datagen = ImageDataGenerator()
# Set the batch size and calculate the number of steps per epoch
input_size = 512
batch_size = 8
train_dir = os.path.join(base_dir, 'train')
test_dir = os.path.join(base_dir, 'test')
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(input_size, input_size),
batch_size=batch_size,
class_mode='binary',
shuffle=True
)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(input_size, input_size),
batch_size=batch_size,
class_mode='binary',
shuffle=True
)
print(test_generator.class_indices)
nb_train_samples = len(train_generator.filenames)
nb_test_samples = len(test_generator.filenames)
x_train = []
y_train = []
x_test = []
y_test = []
batches = 0
for x_batch, y_batch in train_generator:
for i in range(len(y_batch)): # Get input
x_train.append(x_batch[i])
y_train.append(y_batch[i])
batches += 1
if batches >= nb_train_samples / batch_size:
# we need to break the loop by hand because
# the generator loops indefinitely
break
batches = 0
for x_batch, y_batch in test_generator:
for i in range(len(y_batch)): # Get input
x_test.append(x_batch[i])
y_test.append(y_batch[i])
batches += 1
if batches >= nb_test_samples / batch_size:
# we need to break the loop by hand because
# the generator loops indefinitely
break
print(test_generator.classes)
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
# Get the score of the model with test dataset
_, train_accuracy = model.evaluate(x_train, y_train, batch_size=batch_size)
_, test_accuracy = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Train accuracy: %.3f, Test accuracy: %.3f' % (train_accuracy, test_accuracy))
print(output)
y_pred = model.predict(x_test)
y_pred = 1 - y_pred
y_test2 = np.zeros(y_test.shape)
idx_ad = np.where(y_test == 0)[0]
y_test2[idx_ad] = 1
y_test = y_test2
plot_cm(y_test, y_pred)
print('Plotting ROC curve...')
plot_roc_curve(y_pred, y_test, fname='ROC_curve-' + output)
print('Plotting t-SNE...')
plot_tsne(model, x_train, y_train, fname='t_SNE-' + output)
print('Plotting saliency map...')
plot_saliency_map(model, x_test, fname='saliency_map-' + output)
if __name__ == '__main__':
"""
Match input image or current life video feed with the selected template
"""
# GPU memory growth and just use GPU 0
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # only see the gpu 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
main()
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"keras.preprocessing.image.ImageDataGenerator",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"vis.visualization.visualize_saliency",
"sklearn.metrics.roc_curve",
"argparse.ArgumentParser",
"keras.Model",
"skl... | [((929, 950), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'y_score'], {}), '(y, y_score)\n', (938, 950), False, 'from sklearn.metrics import roc_curve, auc\n'), ((967, 992), 'sklearn.metrics.auc', 'auc', (['fpr_keras', 'tpr_keras'], {}), '(fpr_keras, tpr_keras)\n', (970, 992), False, 'from sklearn.metrics import roc_curve, auc\n'), ((998, 1011), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1008, 1011), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1067), 'matplotlib.pyplot.plot', 'plt.plot', (['[1.02, -0.02]', '[-0.02, 1.02]', '"""k--"""'], {'lw': '(2)'}), "([1.02, -0.02], [-0.02, 1.02], 'k--', lw=2)\n", (1024, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1095), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[1.02, -0.02]'], {}), '([1.02, -0.02])\n', (1080, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1123), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.02, 1.02]'], {}), '([-0.02, 1.02])\n', (1108, 1123), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1234), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Specificity"""'], {}), "('Specificity')\n", (1219, 1234), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1264), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sensitivity"""'], {}), "('Sensitivity')\n", (1249, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1269, 1291), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC curve"""'], {}), "('ROC curve')\n", (1278, 1291), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1318), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1306, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1341), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (1334, 1341), True, 'import matplotlib.pyplot as plt\n'), ((1730, 1768), 'vis.utils.utils.find_layer_idx', 'utils.find_layer_idx', (['model', '"""dense_3"""'], {}), "(model, 'dense_3')\n", (1750, 1768), False, 'from vis.utils import utils\n'), ((1903, 1935), 'vis.utils.utils.apply_modifications', 'utils.apply_modifications', (['model'], {}), '(model)\n', (1928, 1935), False, 'from vis.utils import utils\n'), ((1998, 2018), 'numpy.zeros', 'np.zeros', (['(512, 512)'], {}), '((512, 512))\n', (2006, 2018), True, 'import numpy as np\n'), ((2243, 2255), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2253, 2255), True, 'import matplotlib.pyplot as plt\n'), ((2532, 2550), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (2543, 2550), True, 'import matplotlib.pyplot as plt\n'), ((2928, 2948), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(30)'}), '(n_components=30)\n', (2931, 2948), False, 'from sklearn.decomposition import PCA\n'), ((3043, 3095), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'n_iter': '(1000)', 'learning_rate': '(200)'}), '(n_components=2, n_iter=1000, learning_rate=200)\n', (3047, 3095), False, 'from sklearn.manifold import TSNE\n'), ((3306, 3366), 'keras.Model', 'keras.Model', ([], {'inputs': 'model.input', 'outputs': 'intermediate_output'}), '(inputs=model.input, outputs=intermediate_output)\n', (3317, 3366), False, 'import keras\n'), ((3947, 3959), 'show_results_binary.get_class', 'get_class', (['y'], {}), '(y)\n', (3956, 3959), False, 'from show_results_binary import get_class\n'), ((4014, 4042), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (4024, 4042), True, 'import matplotlib.pyplot as plt\n'), ((4246, 4264), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (4257, 4264), True, 'import matplotlib.pyplot as plt\n'), ((4596, 4611), 'numpy.array', 'np.array', (['y_prd'], {}), '(y_prd)\n', (4604, 4611), True, 'import numpy as np\n'), ((4733, 4768), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_test', 'y_prd'], {}), '(y_test, y_prd)\n', (4753, 4768), False, 'from sklearn import metrics\n'), ((4803, 4841), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_test', 'y_prd'], {}), '(y_test, y_prd)\n', (4826, 4841), False, 'from sklearn import metrics\n'), ((4857, 4888), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_test', 'y_prd'], {}), '(y_test, y_prd)\n', (4873, 4888), False, 'from sklearn import metrics\n'), ((5220, 5245), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5243, 5245), False, 'import argparse\n'), ((6376, 6398), 'keras.models.load_model', 'load_model', (['model_file'], {}), '(model_file)\n', (6386, 6398), False, 'from keras.models import load_model\n'), ((6446, 6622), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(8)', 'shear_range': '(np.pi / 16)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'zoom_range': '(0.08)', 'horizontal_flip': '(False)', 'vertical_flip': '(False)'}), '(rotation_range=8, shear_range=np.pi / 16,\n width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.08,\n horizontal_flip=False, vertical_flip=False)\n', (6464, 6622), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6700, 6720), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '()\n', (6718, 6720), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6848, 6879), 'os.path.join', 'os.path.join', (['base_dir', '"""train"""'], {}), "(base_dir, 'train')\n", (6860, 6879), False, 'import os\n'), ((6895, 6925), 'os.path.join', 'os.path.join', (['base_dir', '"""test"""'], {}), "(base_dir, 'test')\n", (6907, 6925), False, 'import os\n'), ((8371, 8388), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (8379, 8388), True, 'import numpy as np\n'), ((8402, 8418), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (8410, 8418), True, 'import numpy as np\n'), ((8433, 8450), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (8441, 8450), True, 'import numpy as np\n'), ((8464, 8480), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (8472, 8480), True, 'import numpy as np\n'), ((8870, 8892), 'numpy.zeros', 'np.zeros', (['y_test.shape'], {}), '(y_test.shape)\n', (8878, 8892), True, 'import numpy as np\n'), ((9553, 9569), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (9567, 9569), True, 'import tensorflow as tf\n'), ((9624, 9649), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (9634, 9649), True, 'import tensorflow as tf\n'), ((9654, 9671), 'keras.backend.tensorflow_backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (9665, 9671), False, 'from keras.backend.tensorflow_backend import set_session\n'), ((2132, 2217), 'vis.visualization.visualize_saliency', 'visualize_saliency', (['model', 'layer_index'], {'filter_indices': '(0)', 'seed_input': 'input_image'}), '(model, layer_index, filter_indices=0, seed_input=input_image\n )\n', (2150, 2217), False, 'from vis.visualization import visualize_saliency\n'), ((8906, 8927), 'numpy.where', 'np.where', (['(y_test == 0)'], {}), '(y_test == 0)\n', (8914, 8927), True, 'import numpy as np\n'), ((3408, 3440), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (3429, 3440), False, 'import keras\n'), ((4139, 4171), 'seaborn.hls_palette', 'sns.hls_palette', (['(2)'], {'l': '(0.6)', 's': '(0.7)'}), '(2, l=0.6, s=0.7)\n', (4154, 4171), True, 'import seaborn as sns\n'), ((4634, 4673), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_test', 'y_prd'], {}), '(y_test, y_prd)\n', (4658, 4673), False, 'from sklearn import metrics\n'), ((5753, 5782), 'os.path.isdir', 'os.path.isdir', (['args.directory'], {}), '(args.directory)\n', (5766, 5782), False, 'import os\n'), ((6067, 6093), 'os.path.isfile', 'os.path.isfile', (['args.model'], {}), '(args.model)\n', (6081, 6093), False, 'import os\n')] |
from numpy import array
from sympy import sin, cos, pi, exp
def flux(u, q, w, v, x, t, mu, eta):
z = x[0];
r = x[1];
f = mu*r*q;
return f;
def source(u, q, w, v, x, t, mu, eta):
z = x[0];
r = x[1];
s = array([sin(r)/exp(z)]);
return s;
def fbou(u, q, w, v, x, t, mu, eta, uhat, n, tau):
f = flux(u, q, w, v, x, t, mu, eta);
tm = f[0]*n[0] + f[1]*n[1] + tau[0]*(u[0]-uhat[0]);
fb = array([0.0, tm, tm, tm]);
return fb;
def ubou(u, q, w, v, x, t, mu, eta, uhat, n, tau):
z = x[0];
r = x[1];
uexact = exp(-z)*cos(r);
ub = array([u, uexact, uexact, uexact]);
return ub;
def initu(x, mu, eta):
u0 = array([0.0]);
return u0;
| [
"sympy.exp",
"numpy.array",
"sympy.cos",
"sympy.sin"
] | [((428, 452), 'numpy.array', 'array', (['[0.0, tm, tm, tm]'], {}), '([0.0, tm, tm, tm])\n', (433, 452), False, 'from numpy import array\n'), ((587, 621), 'numpy.array', 'array', (['[u, uexact, uexact, uexact]'], {}), '([u, uexact, uexact, uexact])\n', (592, 621), False, 'from numpy import array\n'), ((676, 688), 'numpy.array', 'array', (['[0.0]'], {}), '([0.0])\n', (681, 688), False, 'from numpy import array\n'), ((562, 569), 'sympy.exp', 'exp', (['(-z)'], {}), '(-z)\n', (565, 569), False, 'from sympy import sin, cos, pi, exp\n'), ((570, 576), 'sympy.cos', 'cos', (['r'], {}), '(r)\n', (573, 576), False, 'from sympy import sin, cos, pi, exp\n'), ((239, 245), 'sympy.sin', 'sin', (['r'], {}), '(r)\n', (242, 245), False, 'from sympy import sin, cos, pi, exp\n'), ((246, 252), 'sympy.exp', 'exp', (['z'], {}), '(z)\n', (249, 252), False, 'from sympy import sin, cos, pi, exp\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.coordinates import Angle
from ...utils.testing import requires_data, requires_dependency
from ...datasets import gammapy_extra
from ..obs_table import ObservationTable
from ..obs_group import ObservationGroups, ObservationGroupAxis
def make_test_obs_groups():
zenith = Angle([0, 30, 60, 90], 'deg')
ntels = [3, 4]
obs_groups = ObservationGroups([
ObservationGroupAxis('ZENITH', zenith, fmt='edges'),
ObservationGroupAxis('N_TELS', ntels, fmt='values'),
])
return obs_groups
def make_test_obs_table():
# test group obs list
infile = gammapy_extra.filename('test_datasets/obs/test_observation_table.fits')
obs_table = ObservationTable.read(infile)
# wrap azimuth angles to [-90, 270) deg
# to match definition of azimuth grouping axis
obs_table['AZ'] = Angle(obs_table['AZ']).wrap_at(Angle(270., 'deg'))
obs_table['ZENITH'] = Angle(90, 'deg') - obs_table['ALT']
return obs_table
@requires_data('gammapy-extra')
def test_obsgroup():
obs_groups = make_test_obs_groups()
obs_table = make_test_obs_table()
assert ((0 <= obs_groups.obs_groups_table['GROUP_ID']) &
(obs_groups.obs_groups_table['GROUP_ID'] < obs_groups.n_groups)).all()
# group obs list
obs_table_grouped = obs_groups.apply(obs_table)
# assert consistency of the grouping
assert len(obs_table) == len(obs_table_grouped)
assert ((0 <= obs_table_grouped['GROUP_ID']) &
(obs_table_grouped['GROUP_ID'] < obs_groups.n_groups)).all()
# check grouping for one group
group_id = 5
obs_table_group_5 = obs_groups.get_group_of_observations(obs_table_grouped, group_id)
zenith_min = obs_groups.obs_groups_table['ZENITH_MIN'][group_id]
zenith_max = obs_groups.obs_groups_table['ZENITH_MAX'][group_id]
n_tels = obs_groups.obs_groups_table['N_TELS'][group_id]
assert ((zenith_min <= obs_table_group_5['ZENITH']) &
(obs_table_group_5['ZENITH'] < zenith_max)).all()
assert (n_tels == obs_table_group_5['N_TELS']).all()
# check on inverse mask (i.e. all other groups)
obs_table_grouped_not5 = obs_groups.get_group_of_observations(obs_table_grouped,
group_id,
inverted=True)
assert (((zenith_min > obs_table_grouped_not5['ZENITH']) |
(obs_table_grouped_not5['ZENITH'] >= zenith_max)) |
(n_tels != obs_table_grouped_not5['N_TELS'])).all()
# check sum of selections
assert len(obs_table_group_5) + len(obs_table_grouped_not5) == len(obs_table_grouped)
@requires_dependency('pyyaml')
@requires_data('gammapy-extra')
def test_obsgroup_io():
obs_groups = make_test_obs_groups()
filename = 'obs_groups.ecsv'
obs_groups.write(filename)
obs_groups2 = ObservationGroups.read(filename)
# test that obs groups read from file match the ones defined
assert obs_groups.n_groups == obs_groups2.n_groups
assert obs_groups.axes[1].name == obs_groups2.axes[1].name
assert_allclose(obs_groups.obs_groups_table['ZENITH_MAX'], obs_groups2.obs_groups_table['ZENITH_MAX'])
def test_obsgroup_axis():
"""Test create a few obs group axis objects"""
alt = Angle([0, 30, 60, 90], 'deg')
alt_obs_group_axis = ObservationGroupAxis('ALT', alt, fmt='edges')
assert alt_obs_group_axis.n_bins == len(alt) - 1
az = Angle([-90, 90, 270], 'deg')
az_obs_group_axis = ObservationGroupAxis('AZ', az, fmt='edges')
assert az_obs_group_axis.n_bins == len(az) - 1
ntels = np.array([3, 4])
ntels_obs_group_axis = ObservationGroupAxis('N_TELS', ntels, fmt='values')
assert ntels_obs_group_axis.n_bins == len(ntels)
| [
"numpy.array",
"numpy.testing.assert_allclose",
"astropy.coordinates.Angle"
] | [((496, 525), 'astropy.coordinates.Angle', 'Angle', (['[0, 30, 60, 90]', '"""deg"""'], {}), "([0, 30, 60, 90], 'deg')\n", (501, 525), False, 'from astropy.coordinates import Angle\n'), ((3301, 3408), 'numpy.testing.assert_allclose', 'assert_allclose', (["obs_groups.obs_groups_table['ZENITH_MAX']", "obs_groups2.obs_groups_table['ZENITH_MAX']"], {}), "(obs_groups.obs_groups_table['ZENITH_MAX'], obs_groups2.\n obs_groups_table['ZENITH_MAX'])\n", (3316, 3408), False, 'from numpy.testing import assert_allclose\n'), ((3494, 3523), 'astropy.coordinates.Angle', 'Angle', (['[0, 30, 60, 90]', '"""deg"""'], {}), "([0, 30, 60, 90], 'deg')\n", (3499, 3523), False, 'from astropy.coordinates import Angle\n'), ((3658, 3686), 'astropy.coordinates.Angle', 'Angle', (['[-90, 90, 270]', '"""deg"""'], {}), "([-90, 90, 270], 'deg')\n", (3663, 3686), False, 'from astropy.coordinates import Angle\n'), ((3819, 3835), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (3827, 3835), True, 'import numpy as np\n'), ((1068, 1087), 'astropy.coordinates.Angle', 'Angle', (['(270.0)', '"""deg"""'], {}), "(270.0, 'deg')\n", (1073, 1087), False, 'from astropy.coordinates import Angle\n'), ((1114, 1130), 'astropy.coordinates.Angle', 'Angle', (['(90)', '"""deg"""'], {}), "(90, 'deg')\n", (1119, 1130), False, 'from astropy.coordinates import Angle\n'), ((1037, 1059), 'astropy.coordinates.Angle', 'Angle', (["obs_table['AZ']"], {}), "(obs_table['AZ'])\n", (1042, 1059), False, 'from astropy.coordinates import Angle\n')] |
# coding: UTF-8
import time
import torch
import numpy as np
from train_eval import train
from importlib import import_module
import argparse
from utils import built_train_dataset, built_dev_dataset, get_time_dif
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
parser = argparse.ArgumentParser(description='Chinese Ner Pytorch')
parser.add_argument('--doing', type=str, required=True, help='choose a action: train,test,predict')
parser.add_argument('--model', type=str, required=True, help='choose a model: Bert,Albert,Xlnet,Gpt-2')
args = parser.parse_args()
if __name__ == '__main__':
model_name = args.model
x = import_module('Models.' + model_name)
config = x.Config()
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True # 保证每次结果一样
start_time = time.time()
print("Loading Datas...")
train_dataset = built_train_dataset(config)
dev_dataset = built_dev_dataset(config)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
if args.doing=='train':
model = x.Model(config).to(config.device)
train(config, model, train_dataset, dev_dataset)
if args.doing=='predict':
model = x.Model(config).to(config.device)
predict(config,model,)
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"utils.get_time_dif",
"importlib.import_module",
"argparse.ArgumentParser",
"utils.built_train_dataset",
"train_eval.train",
"numpy.random.seed",
"time.time",
"utils.built_dev_dataset"
] | [((274, 332), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chinese Ner Pytorch"""'}), "(description='Chinese Ner Pytorch')\n", (297, 332), False, 'import argparse\n'), ((630, 667), 'importlib.import_module', 'import_module', (["('Models.' + model_name)"], {}), "('Models.' + model_name)\n", (643, 667), False, 'from importlib import import_module\n'), ((696, 713), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (710, 713), True, 'import numpy as np\n'), ((718, 738), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (735, 738), False, 'import torch\n'), ((743, 772), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(1)'], {}), '(1)\n', (769, 772), False, 'import torch\n'), ((849, 860), 'time.time', 'time.time', ([], {}), '()\n', (858, 860), False, 'import time\n'), ((911, 938), 'utils.built_train_dataset', 'built_train_dataset', (['config'], {}), '(config)\n', (930, 938), False, 'from utils import built_train_dataset, built_dev_dataset, get_time_dif\n'), ((957, 982), 'utils.built_dev_dataset', 'built_dev_dataset', (['config'], {}), '(config)\n', (974, 982), False, 'from utils import built_train_dataset, built_dev_dataset, get_time_dif\n'), ((998, 1022), 'utils.get_time_dif', 'get_time_dif', (['start_time'], {}), '(start_time)\n', (1010, 1022), False, 'from utils import built_train_dataset, built_dev_dataset, get_time_dif\n'), ((1145, 1193), 'train_eval.train', 'train', (['config', 'model', 'train_dataset', 'dev_dataset'], {}), '(config, model, train_dataset, dev_dataset)\n', (1150, 1193), False, 'from train_eval import train\n')] |
from __future__ import division
import numpy as np
from numpy.linalg import solve
def cov_mat(x1, x2, a, b):
return a * np.exp(-b * (x1[:, np.newaxis] - x2)**2)
def reg_cov_mat(x, a, b, c):
return cov_mat(x, x, a, b) + c * np.eye(x.shape[0])
def compute_means_covs(ts, t_ref, gp_parms, winsize=0, mean_shift=True):
"""
Compute the posterior GP means and covariance matrices.
ts: time series
t_ref: reference time points the posterior GP is marginalized over
gp_parms: GP hyperparameters and the noise term
winsize: window size, 0 for using the full Gaussian (over t_ref)
"""
a, b, c = gp_parms
K_test = cov_mat(t_ref, t_ref, a, b)
n_ts = len(ts)
n_sample = len(t_ref)
if winsize == 0:
post_means = np.empty((n_ts, n_sample))
post_covs = np.empty((n_ts, n_sample, n_sample))
else:
n_kernel = n_sample - winsize + 1
post_means = np.empty((n_ts, n_kernel, winsize))
post_covs = np.empty((n_ts, n_kernel, winsize, winsize))
for idx, (t, y) in enumerate(ts):
mean_list, cov_list = [], []
K_train = reg_cov_mat(t, a, b, c)
K_train_test = cov_mat(t, t_ref, a, b)
Ktr_inv_Ktt = solve(K_train, K_train_test)
if mean_shift:
mu = np.mean(y)
mean_test = mu + Ktr_inv_Ktt.T.dot(y - mu)
else:
mean_test = Ktr_inv_Ktt.T.dot(y)
full_cov = K_test - K_train_test.T.dot(Ktr_inv_Ktt)
if winsize == 0:
post_means[idx] = mean_test
post_covs[idx] = full_cov
else:
for i in xrange(n_sample - winsize + 1):
post_means[idx, i] = mean_test[i:(i + winsize)]
post_covs[idx, i] = full_cov[i:(i + winsize), i:(i + winsize)]
return post_means, post_covs
| [
"numpy.mean",
"numpy.eye",
"numpy.linalg.solve",
"numpy.exp",
"numpy.empty"
] | [((126, 168), 'numpy.exp', 'np.exp', (['(-b * (x1[:, np.newaxis] - x2) ** 2)'], {}), '(-b * (x1[:, np.newaxis] - x2) ** 2)\n', (132, 168), True, 'import numpy as np\n'), ((770, 796), 'numpy.empty', 'np.empty', (['(n_ts, n_sample)'], {}), '((n_ts, n_sample))\n', (778, 796), True, 'import numpy as np\n'), ((817, 853), 'numpy.empty', 'np.empty', (['(n_ts, n_sample, n_sample)'], {}), '((n_ts, n_sample, n_sample))\n', (825, 853), True, 'import numpy as np\n'), ((927, 962), 'numpy.empty', 'np.empty', (['(n_ts, n_kernel, winsize)'], {}), '((n_ts, n_kernel, winsize))\n', (935, 962), True, 'import numpy as np\n'), ((983, 1027), 'numpy.empty', 'np.empty', (['(n_ts, n_kernel, winsize, winsize)'], {}), '((n_ts, n_kernel, winsize, winsize))\n', (991, 1027), True, 'import numpy as np\n'), ((1215, 1243), 'numpy.linalg.solve', 'solve', (['K_train', 'K_train_test'], {}), '(K_train, K_train_test)\n', (1220, 1243), False, 'from numpy.linalg import solve\n'), ((235, 253), 'numpy.eye', 'np.eye', (['x.shape[0]'], {}), '(x.shape[0])\n', (241, 253), True, 'import numpy as np\n'), ((1284, 1294), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1291, 1294), True, 'import numpy as np\n')] |
# Adapted from https://github.com/pmocz/nbody-python/blob/master/nbody.py
# TODO: Add GPL-3.0 License
import numpy as np
"""
Create Your Own N-body Simulation (With Python)
<NAME> (2020) Princeton Univeristy, @PMocz
Simulate orbits of stars interacting due to gravity
Code calculates pairwise forces according to Newton's Law of Gravity
"""
def getAcc(pos, mass, G, softening):
"""
Calculate the acceleration on each particle due to Newton's Law
pos is an N x 3 matrix of positions
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
softening is the softening length
a is N x 3 matrix of accelerations
"""
# positions r = [x,y,z] for all particles
x = pos[:, 0:1]
y = pos[:, 1:2]
z = pos[:, 2:3]
# matrix that stores all pairwise particle separations: r_j - r_i
dx = x.T - x
dy = y.T - y
dz = z.T - z
# matrix that stores 1/r^3 for all particle pairwise particle separations
inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2)
inv_r3[inv_r3 > 0] = inv_r3[inv_r3 > 0]**(-1.5)
ax = G * (dx * inv_r3) @ mass
ay = G * (dy * inv_r3) @ mass
az = G * (dz * inv_r3) @ mass
# pack together the acceleration components
a = np.hstack((ax, ay, az))
return a
def getEnergy(pos, vel, mass, G):
"""
Get kinetic energy (KE) and potential energy (PE) of simulation
pos is N x 3 matrix of positions
vel is N x 3 matrix of velocities
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
KE is the kinetic energy of the system
PE is the potential energy of the system
"""
# Kinetic Energy:
# KE = 0.5 * np.sum(np.sum( mass * vel**2 ))
KE = 0.5 * np.sum(mass * vel**2)
# Potential Energy:
# positions r = [x,y,z] for all particles
x = pos[:, 0:1]
y = pos[:, 1:2]
z = pos[:, 2:3]
# matrix that stores all pairwise particle separations: r_j - r_i
dx = x.T - x
dy = y.T - y
dz = z.T - z
# matrix that stores 1/r for all particle pairwise particle separations
inv_r = np.sqrt(dx**2 + dy**2 + dz**2)
inv_r[inv_r > 0] = 1.0 / inv_r[inv_r > 0]
# sum over upper triangle, to count each interaction only once
# PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))
PE = G * np.sum(np.triu(-(mass * mass.T) * inv_r, 1))
return KE, PE
def nbody(mass, pos, vel, N, Nt, dt, G, softening):
# Convert to Center-of-Mass frame
vel -= np.mean(mass * vel, axis=0) / np.mean(mass)
# calculate initial gravitational accelerations
acc = getAcc(pos, mass, G, softening)
# calculate initial energy of system
KE = np.ndarray(Nt + 1, dtype=np.float64)
PE = np.ndarray(Nt + 1, dtype=np.float64)
KE[0], PE[0] = getEnergy(pos, vel, mass, G)
t = 0.0
# Simulation Main Loop
for i in range(Nt):
# (1/2) kick
vel += acc * dt / 2.0
# drift
pos += vel * dt
# update accelerations
acc = getAcc(pos, mass, G, softening)
# (1/2) kick
vel += acc * dt / 2.0
# update time
t += dt
# get energy of system
KE[i + 1], PE[i + 1] = getEnergy(pos, vel, mass, G)
return KE, PE
| [
"numpy.mean",
"numpy.sqrt",
"numpy.hstack",
"numpy.sum",
"numpy.ndarray",
"numpy.triu"
] | [((1234, 1257), 'numpy.hstack', 'np.hstack', (['(ax, ay, az)'], {}), '((ax, ay, az))\n', (1243, 1257), True, 'import numpy as np\n'), ((2085, 2121), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2 + dz ** 2)'], {}), '(dx ** 2 + dy ** 2 + dz ** 2)\n', (2092, 2121), True, 'import numpy as np\n'), ((2664, 2700), 'numpy.ndarray', 'np.ndarray', (['(Nt + 1)'], {'dtype': 'np.float64'}), '(Nt + 1, dtype=np.float64)\n', (2674, 2700), True, 'import numpy as np\n'), ((2710, 2746), 'numpy.ndarray', 'np.ndarray', (['(Nt + 1)'], {'dtype': 'np.float64'}), '(Nt + 1, dtype=np.float64)\n', (2720, 2746), True, 'import numpy as np\n'), ((1720, 1743), 'numpy.sum', 'np.sum', (['(mass * vel ** 2)'], {}), '(mass * vel ** 2)\n', (1726, 1743), True, 'import numpy as np\n'), ((2474, 2501), 'numpy.mean', 'np.mean', (['(mass * vel)'], {'axis': '(0)'}), '(mass * vel, axis=0)\n', (2481, 2501), True, 'import numpy as np\n'), ((2504, 2517), 'numpy.mean', 'np.mean', (['mass'], {}), '(mass)\n', (2511, 2517), True, 'import numpy as np\n'), ((2313, 2349), 'numpy.triu', 'np.triu', (['(-(mass * mass.T) * inv_r)', '(1)'], {}), '(-(mass * mass.T) * inv_r, 1)\n', (2320, 2349), True, 'import numpy as np\n')] |
"""
SGA.html
========
Code to generate HTML output for the various stages of the SGA analysis.
"""
import os
import numpy as np
def get_layer(onegal):
if onegal['DR'] == 'dr6':
layer = 'mzls+bass-dr6'
elif onegal['DR'] == 'dr7':
layer = 'decals-dr5'
else:
print('Unrecognized data release {}!'.format(onegal['DR']))
raise ValueError
return layer
def _get_cutouts_one(args):
"""Wrapper function for the multiprocessing."""
return get_cutouts_one(*args)
def get_cutouts_one(group, clobber=False):
"""Get viewer cutouts for a single galaxy."""
layer = get_layer(group)
groupname = get_groupname(group)
diam = group_diameter(group) # [arcmin]
size = np.ceil(diam * 60 / PIXSCALE).astype('int') # [pixels]
imageurl = '{}/?ra={:.8f}&dec={:.8f}&pixscale={:.3f}&size={:g}&layer={}'.format(
cutouturl, group['ra'], group['dec'], PIXSCALE, size, layer)
jpgfile = os.path.join(jpgdir, '{}.jpg'.format(groupname))
cmd = 'wget --continue -O {:s} "{:s}"' .format(jpgfile, imageurl)
if os.path.isfile(jpgfile) and not clobber:
print('File {} exists...skipping.'.format(jpgfile))
else:
if os.path.isfile(jpgfile):
os.remove(jpgfile)
print(cmd)
os.system(cmd)
def get_cutouts(groupsample, use_nproc=nproc, clobber=False):
"""Get viewer cutouts of the whole sample."""
cutoutargs = list()
for gg in groupsample:
cutoutargs.append( (gg, clobber) )
if use_nproc > 1:
p = multiprocessing.Pool(nproc)
p.map(_get_cutouts_one, cutoutargs)
p.close()
else:
for args in cutoutargs:
_get_cutouts_one(args)
return
def _add_labels_one(args):
"""Wrapper function for the multiprocessing."""
return add_labels_one(*args)
def add_labels_one(group, sample, clobber=False, nothumb=False):
jpgdir = os.path.join(SGAdir, 'cutouts', 'jpg')
pngdir = os.path.join(SGAdir, 'cutouts', 'png')
if not os.path.isdir(pngdir):
os.mkdir(pngdir)
groupname = get_groupname(group)
galaxy = get_galaxy(group, sample, html=True)
jpgfile = os.path.join(jpgdir, '{}.jpg'.format(groupname))
pngfile = os.path.join(pngdir, '{}.png'.format(groupname))
thumbfile = os.path.join(pngdir, 'thumb-{}.png'.format(groupname))
if os.path.isfile(jpgfile):
if os.path.isfile(pngfile) and not clobber:
print('File {} exists...skipping.'.format(pngfile))
else:
im = Image.open(jpgfile)
sz = im.size
fntsize = np.round(sz[0]/28).astype('int')
width = np.round(sz[0]/175).astype('int')
font = ImageFont.truetype(fonttype, size=fntsize)
draw = ImageDraw.Draw(im)
# Label the group--
draw.text((0+fntsize*2, 0+fntsize*2), galaxy, font=font)
# Add a scale bar--
x0, x1, yy = sz[1]-fntsize*2-barlen, sz[1]-fntsize*2, sz[0]-fntsize*2
draw.line((x0, yy, x1, yy), fill='white', width=width)
im.save(pngfile)
# Generate a thumbnail
if not nothumb:
cmd = 'convert -thumbnail 300x300 {} {}'.format(pngfile, thumbfile)
os.system(cmd)
def add_labels(groupsample, sample, clobber=False):
labelargs = list()
for group in groupsample:
labelargs.append((group, sample, clobber))
if nproc > 1:
p = multiprocessing.Pool(nproc)
res = p.map(_add_labels_one, labelargs)
p.close()
else:
for args in labelargs:
res = _add_labels_one(args)
def html_rows(_groupkeep, sample, nperrow=4):
# Not all objects may have been analyzed.
these = [os.path.isfile(os.path.join(SGAdir, 'cutouts', 'png', '{}.png'.format(
get_groupname(gg)))) for gg in _groupkeep]
groupkeep = _groupkeep[these]
nrow = np.ceil(len(groupkeep) / nperrow).astype('int')
groupsplit = list()
for ii in range(nrow):
i1 = nperrow*ii
i2 = nperrow*(ii+1)
if i2 > len(groupkeep):
i2 = len(groupkeep)
groupsplit.append(groupkeep[i1:i2])
print('Splitting the sample into {} rows with {} mosaics per row.'.format(nrow, nperrow))
html.write('<table class="ls-gallery">\n')
html.write('<tbody>\n')
for grouprow in groupsplit:
html.write('<tr>\n')
for group in grouprow:
groupname = get_groupname(group)
galaxy = get_galaxy(group, sample, html=True)
pngfile = os.path.join('cutouts', 'png', '{}.png'.format(groupname))
thumbfile = os.path.join('cutouts', 'png', 'thumb-{}.png'.format(groupname))
img = 'src="{}" alt="{}"'.format(thumbfile, galaxy)
#img = 'class="ls-gallery" src="{}" alt="{}"'.format(thumbfile, nicename)
html.write('<td><a href="{}"><img {}></a></td>\n'.format(pngfile, img))
html.write('</tr>\n')
html.write('<tr>\n')
for group in grouprow:
groupname = get_groupname(group)
galaxy = '{}: {}'.format(groupname.upper(), get_galaxy(group, sample, html=True))
layer = get_layer(group)
href = '{}/?layer={}&ra={:.8f}&dec={:.8f}&zoom=12'.format(viewerurl, layer, group['ra'], group['dec'])
html.write('<td><a href="{}" target="_blank">{}</a></td>\n'.format(href, galaxy))
html.write('</tr>\n')
html.write('</tbody>\n')
html.write('</table>\n')
def make_plots(sample, analysisdir=None, htmldir='.', refband='r',
band=('g', 'r', 'z'), clobber=False, verbose=True):
"""Make QA plots.
"""
sample_trends(sample, htmldir, analysisdir=analysisdir, verbose=verbose)
for gal in sample:
objid, objdir = get_objid(gal, analysisdir=analysisdir)
htmlobjdir = os.path.join(htmldir, '{}'.format(objid))
if not os.path.isdir(htmlobjdir):
os.makedirs(htmlobjdir, exist_ok=True)
# Build the ellipse plots.
qa_ellipse_results(objid, objdir, htmlobjdir, band=band,
clobber=clobber, verbose=verbose)
qa_sersic_results(objid, objdir, htmlobjdir, band=band,
clobber=clobber, verbose=verbose)
# Build the montage coadds.
qa_montage_coadds(objid, objdir, htmlobjdir, clobber=clobber, verbose=verbose)
# Build the MGE plots.
#qa_mge_results(objid, objdir, htmlobjdir, refband='r', band=band,
# clobber=clobber, verbose=verbose)
def _javastring():
"""Return a string that embeds a date in a webpage."""
import textwrap
js = textwrap.dedent("""
<SCRIPT LANGUAGE="JavaScript">
var months = new Array(13);
months[1] = "January";
months[2] = "February";
months[3] = "March";
months[4] = "April";
months[5] = "May";
months[6] = "June";
months[7] = "July";
months[8] = "August";
months[9] = "September";
months[10] = "October";
months[11] = "November";
months[12] = "December";
var dateObj = new Date(document.lastModified)
var lmonth = months[dateObj.getMonth() + 1]
var date = dateObj.getDate()
var fyear = dateObj.getYear()
if (fyear < 2000)
fyear = fyear + 1900
document.write(" " + fyear + " " + lmonth + " " + date)
</SCRIPT>
""")
return js
def make_html(sample=None, htmldir=None, dr='dr6-dr7', makeplots=True, clobber=False,
verbose=True):
"""Make the HTML pages.
"""
import SGA.io
if htmldir is None:
htmldir = SGA.io.html_dir()
sample = SGA.io.read_parent(dr=dr)
objid, objdir = legacyhalos.io.get_objid(sample)
reject = []
toss = np.zeros(len(groupsample), dtype=bool)
for ii, gg in enumerate(groupsample['groupid']):
for rej in np.atleast_1d(reject):
toss[ii] = rej in gg.lower()
if toss[ii]:
break
print('Rejecting {} groups.'.format(np.sum(toss)))
groupkeep = groupsample[~toss]
if np.sum(toss) > 0:
grouprej = groupsample[toss]
else:
grouprej = []
# Write the last-updated date to a webpage.
js = _javastring()
# Get the viewer link
def _viewer_link(gal, dr):
baseurl = 'http://legacysurvey.org/viewer/'
width = 2 * cutout_radius_150kpc(redshift=gal['z'], pixscale=0.262) # [pixels]
if width > 400:
zoom = 14
else:
zoom = 15
viewer = '{}?ra={:.6f}&dec={:.6f}&zoom={:g}&layer=decals-{}'.format(
baseurl, gal['ra'], gal['dec'], zoom, dr)
return viewer
homehtml = 'index.html'
# Build the home (index.html) page--
if not os.path.exists(htmldir):
os.makedirs(htmldir)
htmlfile = os.path.join(htmldir, homehtml)
with open(htmlfile, 'w') as html:
html.write('<html><head>\n')
html.write('<style type="text/css">\n')
html.write('table.ls-gallery {width: 90%;}\n')
html.write('p.ls-gallery {width: 80%;}\n')
html.write('</style>\n')
html.write('</head><body>\n')
html.write('<h1>Siena Galaxy Atlas 2020 (SGA-2020)</h1>\n')
html.write("""<p class="ls-gallery">Each thumbnail links to a larger image while the galaxy
name below each thumbnail links to the <a href="http://legacysurvey.org/viewer">Sky Viewer</a>.
For reference, the horizontal white bar in the lower-right corner of each image represents
one arcminute.</p>\n""")
html_rows(groupkeep, sample)
html.write('<br /><br />\n')
html.write('<b><i>Last updated {}</b></i>\n'.format(js))
html.write('</body></html>\n')
if makeplots:
make_plots(sample, analysisdir=analysisdir, htmldir=htmldir, refband=refband,
band=band, clobber=clobber, verbose=verbose)
| [
"textwrap.dedent",
"os.path.exists",
"numpy.ceil",
"os.makedirs",
"os.path.join",
"os.path.isfile",
"numpy.sum",
"os.remove",
"os.path.isdir",
"os.mkdir",
"os.system",
"numpy.round",
"numpy.atleast_1d"
] | [((1931, 1969), 'os.path.join', 'os.path.join', (['SGAdir', '"""cutouts"""', '"""jpg"""'], {}), "(SGAdir, 'cutouts', 'jpg')\n", (1943, 1969), False, 'import os\n'), ((1983, 2021), 'os.path.join', 'os.path.join', (['SGAdir', '"""cutouts"""', '"""png"""'], {}), "(SGAdir, 'cutouts', 'png')\n", (1995, 2021), False, 'import os\n'), ((2379, 2402), 'os.path.isfile', 'os.path.isfile', (['jpgfile'], {}), '(jpgfile)\n', (2393, 2402), False, 'import os\n'), ((6735, 7443), 'textwrap.dedent', 'textwrap.dedent', (['"""\n <SCRIPT LANGUAGE="JavaScript">\n var months = new Array(13);\n months[1] = "January";\n months[2] = "February";\n months[3] = "March";\n months[4] = "April";\n months[5] = "May";\n months[6] = "June";\n months[7] = "July";\n months[8] = "August";\n months[9] = "September";\n months[10] = "October";\n months[11] = "November";\n months[12] = "December";\n var dateObj = new Date(document.lastModified)\n var lmonth = months[dateObj.getMonth() + 1]\n var date = dateObj.getDate()\n var fyear = dateObj.getYear()\n if (fyear < 2000)\n fyear = fyear + 1900\n document.write(" " + fyear + " " + lmonth + " " + date)\n </SCRIPT>\n """'], {}), '(\n """\n <SCRIPT LANGUAGE="JavaScript">\n var months = new Array(13);\n months[1] = "January";\n months[2] = "February";\n months[3] = "March";\n months[4] = "April";\n months[5] = "May";\n months[6] = "June";\n months[7] = "July";\n months[8] = "August";\n months[9] = "September";\n months[10] = "October";\n months[11] = "November";\n months[12] = "December";\n var dateObj = new Date(document.lastModified)\n var lmonth = months[dateObj.getMonth() + 1]\n var date = dateObj.getDate()\n var fyear = dateObj.getYear()\n if (fyear < 2000)\n fyear = fyear + 1900\n document.write(" " + fyear + " " + lmonth + " " + date)\n </SCRIPT>\n """\n )\n', (6750, 7443), False, 'import textwrap\n'), ((8878, 8909), 'os.path.join', 'os.path.join', (['htmldir', 'homehtml'], {}), '(htmldir, homehtml)\n', (8890, 8909), False, 'import os\n'), ((1096, 1119), 'os.path.isfile', 'os.path.isfile', (['jpgfile'], {}), '(jpgfile)\n', (1110, 1119), False, 'import os\n'), ((1218, 1241), 'os.path.isfile', 'os.path.isfile', (['jpgfile'], {}), '(jpgfile)\n', (1232, 1241), False, 'import os\n'), ((1301, 1315), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1310, 1315), False, 'import os\n'), ((2033, 2054), 'os.path.isdir', 'os.path.isdir', (['pngdir'], {}), '(pngdir)\n', (2046, 2054), False, 'import os\n'), ((2064, 2080), 'os.mkdir', 'os.mkdir', (['pngdir'], {}), '(pngdir)\n', (2072, 2080), False, 'import os\n'), ((7921, 7942), 'numpy.atleast_1d', 'np.atleast_1d', (['reject'], {}), '(reject)\n', (7934, 7942), True, 'import numpy as np\n'), ((8129, 8141), 'numpy.sum', 'np.sum', (['toss'], {}), '(toss)\n', (8135, 8141), True, 'import numpy as np\n'), ((8809, 8832), 'os.path.exists', 'os.path.exists', (['htmldir'], {}), '(htmldir)\n', (8823, 8832), False, 'import os\n'), ((8842, 8862), 'os.makedirs', 'os.makedirs', (['htmldir'], {}), '(htmldir)\n', (8853, 8862), False, 'import os\n'), ((737, 766), 'numpy.ceil', 'np.ceil', (['(diam * 60 / PIXSCALE)'], {}), '(diam * 60 / PIXSCALE)\n', (744, 766), True, 'import numpy as np\n'), ((1255, 1273), 'os.remove', 'os.remove', (['jpgfile'], {}), '(jpgfile)\n', (1264, 1273), False, 'import os\n'), ((2415, 2438), 'os.path.isfile', 'os.path.isfile', (['pngfile'], {}), '(pngfile)\n', (2429, 2438), False, 'import os\n'), ((5972, 5997), 'os.path.isdir', 'os.path.isdir', (['htmlobjdir'], {}), '(htmlobjdir)\n', (5985, 5997), False, 'import os\n'), ((6011, 6049), 'os.makedirs', 'os.makedirs', (['htmlobjdir'], {'exist_ok': '(True)'}), '(htmlobjdir, exist_ok=True)\n', (6022, 6049), False, 'import os\n'), ((8072, 8084), 'numpy.sum', 'np.sum', (['toss'], {}), '(toss)\n', (8078, 8084), True, 'import numpy as np\n'), ((3298, 3312), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (3307, 3312), False, 'import os\n'), ((2618, 2638), 'numpy.round', 'np.round', (['(sz[0] / 28)'], {}), '(sz[0] / 28)\n', (2626, 2638), True, 'import numpy as np\n'), ((2671, 2692), 'numpy.round', 'np.round', (['(sz[0] / 175)'], {}), '(sz[0] / 175)\n', (2679, 2692), True, 'import numpy as np\n')] |
import numpy as np
import numpy.linalg as la
import scipy
import skimage
import PIL
from PIL import Image as PILImage
import TimestampedPacketMotionData_pb2
import argparse
import os
import google.protobuf.json_format
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import TimestampedImage_pb2
import Pose3d_pb2
import cv2
import PoseSequenceLabel_pb2
import bisect
import FrameId_pb2
import Vector3dStamped_pb2
import scipy.interpolate
import deepracing.protobuf_utils as protobuf_utils
import deepracing.pose_utils as pose_utils
import deepracing.arma_utils
import deepracing
def sortKey(packet):
return packet.udp_packet.m_header.m_sessionTime
parser = argparse.ArgumentParser()
parser.add_argument("motion_data_dir", help="Path to motion data to generate trackfile from", type=str)
parser.add_argument("--trackfileout", help="Path to an ARMA format matrix file", type=str, default="track.arma")
parser.add_argument("--json", action="store_true", help="Look for json files in motion_data_dir instead of binary .pb files")
args = parser.parse_args()
motion_data_dir = args.motion_data_dir
use_json = args.json
trackfileout = args.trackfileout
motion_packets = sorted(protobuf_utils.getAllMotionPackets(motion_data_dir, args.json), key=sortKey)
#print(motion_packets)
car_index = 0
poses = [ protobuf_utils.extractPose(p.udp_packet, car_index = car_index) for p in motion_packets]
t = np.array([sortKey(p) for p in motion_packets])
X = np.array([ pose[0] for pose in poses])
Xdot = np.array([protobuf_utils.extractVelocity(p.udp_packet, car_index = car_index) for p in motion_packets])
_,unique_indices = np.unique(t,return_index=True)
t = t[unique_indices]
X = X[unique_indices]
Xdot = Xdot[unique_indices]
Xdotnorm = Xdot.copy()
for i in range(Xdotnorm.shape[0]):
Xdotnorm[i,:] = Xdotnorm[i,:]/la.norm(Xdotnorm[i,:])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], c='r', marker='o', s = np.ones_like(X[:,0]))
ax.quiver(X[:,0], X[:,1], X[:,2], Xdotnorm[:,0], Xdotnorm[:,1], Xdotnorm[:,2])
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
if not os.path.isdir(os.path.dirname(trackfileout)):
os.makedirs(os.path.dirname(trackfileout))
deepracing.arma_utils.writeArmaFile(trackfileout,t,X,Xdot) | [
"numpy.ones_like",
"deepracing.protobuf_utils.extractVelocity",
"numpy.unique",
"argparse.ArgumentParser",
"deepracing.arma_utils.writeArmaFile",
"numpy.array",
"matplotlib.pyplot.figure",
"deepracing.protobuf_utils.getAllMotionPackets",
"os.path.dirname",
"deepracing.protobuf_utils.extractPose",
... | [((684, 709), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (707, 709), False, 'import argparse\n'), ((1471, 1508), 'numpy.array', 'np.array', (['[pose[0] for pose in poses]'], {}), '([pose[0] for pose in poses])\n', (1479, 1508), True, 'import numpy as np\n'), ((1640, 1671), 'numpy.unique', 'np.unique', (['t'], {'return_index': '(True)'}), '(t, return_index=True)\n', (1649, 1671), True, 'import numpy as np\n'), ((1867, 1879), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1877, 1879), True, 'import matplotlib.pyplot as plt\n'), ((2157, 2167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2165, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2269, 2330), 'deepracing.arma_utils.writeArmaFile', 'deepracing.arma_utils.writeArmaFile', (['trackfileout', 't', 'X', 'Xdot'], {}), '(trackfileout, t, X, Xdot)\n', (2304, 2330), False, 'import deepracing\n'), ((1201, 1263), 'deepracing.protobuf_utils.getAllMotionPackets', 'protobuf_utils.getAllMotionPackets', (['motion_data_dir', 'args.json'], {}), '(motion_data_dir, args.json)\n', (1235, 1263), True, 'import deepracing.protobuf_utils as protobuf_utils\n'), ((1326, 1387), 'deepracing.protobuf_utils.extractPose', 'protobuf_utils.extractPose', (['p.udp_packet'], {'car_index': 'car_index'}), '(p.udp_packet, car_index=car_index)\n', (1352, 1387), True, 'import deepracing.protobuf_utils as protobuf_utils\n'), ((1527, 1592), 'deepracing.protobuf_utils.extractVelocity', 'protobuf_utils.extractVelocity', (['p.udp_packet'], {'car_index': 'car_index'}), '(p.udp_packet, car_index=car_index)\n', (1557, 1592), True, 'import deepracing.protobuf_utils as protobuf_utils\n'), ((1836, 1859), 'numpy.linalg.norm', 'la.norm', (['Xdotnorm[i, :]'], {}), '(Xdotnorm[i, :])\n', (1843, 1859), True, 'import numpy.linalg as la\n'), ((1981, 2002), 'numpy.ones_like', 'np.ones_like', (['X[:, 0]'], {}), '(X[:, 0])\n', (1993, 2002), True, 'import numpy as np\n'), ((2190, 2219), 'os.path.dirname', 'os.path.dirname', (['trackfileout'], {}), '(trackfileout)\n', (2205, 2219), False, 'import os\n'), ((2238, 2267), 'os.path.dirname', 'os.path.dirname', (['trackfileout'], {}), '(trackfileout)\n', (2253, 2267), False, 'import os\n')] |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from skimage.draw import line_aa
import matplotlib.pyplot as plt
def draw_line(image, y1, x1, y2, x2, line_type):
rr, cc, val = line_aa(y1, x1, y2, x2)
if line_type == "dotted":
rr = np.delete(rr, np.arange(0, rr.size, 5))
cc = np.delete(cc, np.arange(0, cc.size, 5))
image[rr, cc] = 0
return image
def draw_box(bounding_box, image, line_type, is_xywh=True):
image_h, image_w = image.shape[-2:]
if is_xywh:
(x, y, w, h) = bounding_box
(x1, y1, x2, y2) = (x, y, x + w, y + h)
else:
(x1, y1, x2, y2) = bounding_box
(x1, y1, x2, y2) = (int(x1), int(y1), int(x2), int(y2))
if y2 >= image_h:
y2 = image_h - 1
if x2 >= image_w:
x2 = image_w - 1
if y1 >= image_h:
y1 = image_h - 1
if x1 >= image_w:
x1 = image_w - 1
if y2 < 0:
y2 = 0
if x2 < 0:
x2 =0
if y1 < 0:
y1 = 0
if x1 < 0:
x1 = 0
image = draw_line(image, y1, x1, y2, x1, line_type)
image = draw_line(image, y2, x1, y2, x2, line_type)
image = draw_line(image, y2, x2, y1, x2, line_type)
image = draw_line(image, y1, x2, y1, x1, line_type)
return image
def draw_boxes_on_image(pred, label, images):
''' Function to draw multiple bounding boxes on the images. Predicted bounding boxes will be
presented with a dotted line and actual boxes are presented with a solid line.
Parameters
----------
pred: [n x [x, y, w, h]]
The predicted bounding boxes in percentages.
n is the number of bounding boxes predicted on an image
label: [n x [x, y, w, h]]
The actual bounding boxes in percentages
n is the number of bounding boxes predicted on an image
images: [[np.array]]
The correponding images.
Returns
-------
images: [[np.array]]
Images with bounding boxes printed on them.
'''
image_h, image_w = images.shape[-2:]
label[:, :, 0], label[:, :, 1] = label[:, :, 0] * image_w, label[:, :, 1] * image_h
label[:, :, 2], label[:, :, 3] = label[:, :, 2] * image_w, label[:, :, 3] * image_h
for i in range(len(pred)):
pred_b = pred[i]
pred_b[:, 0], pred_b[:, 1] = pred_b[:, 0] * image_w, pred_b[:, 1] * image_h
pred_b[:, 2], pred_b[:, 3] = pred_b[:, 2] * image_w, pred_b[:, 3] * image_h
image = images[i, 0]
for j in range(pred_b.shape[0]):
image = draw_box(pred_b[j, :], image, line_type="dotted")
for k in range(label.shape[1]):
image = draw_box(label[i, k, :], image, line_type="solid")
images[i, 0, :, :] = image
return images
def draw_box_on_image(pred, label, images):
''' Function to draw bounding boxes on the images. Predicted bounding boxes will be
presented with a dotted line and actual boxes are presented with a solid line.
Parameters
----------
pred: [[x, y, w, h]]
The predicted bounding boxes in percentages
label: [[x, y, w, h]]
The actual bounding boxes in percentages
images: [[np.array]]
The correponding images.
Returns
-------
images: [[np.array]]
Images with bounding boxes printed on them.
'''
image_h, image_w = images.shape[-2:]
pred[:, 0], pred[:, 1] = pred[:, 0] * image_w, pred[:, 1] * image_h
pred[:, 2], pred[:, 3] = pred[:, 2] * image_w, pred[:, 3] * image_h
label[:, 0], label[:, 1] = label[:, 0] * image_w, label[:, 1] * image_h
label[:, 2], label[:, 3] = label[:, 2] * image_w, label[:, 3] * image_h
for i in range(images.shape[0]):
image = images[i, 0]
image = draw_box(pred[i, :], image, line_type="dotted")
image = draw_box(label[i, :], image, line_type="solid")
images[i, 0, :, :] = image
return images
| [
"skimage.draw.line_aa",
"numpy.arange"
] | [((265, 288), 'skimage.draw.line_aa', 'line_aa', (['y1', 'x1', 'y2', 'x2'], {}), '(y1, x1, y2, x2)\n', (272, 288), False, 'from skimage.draw import line_aa\n'), ((346, 370), 'numpy.arange', 'np.arange', (['(0)', 'rr.size', '(5)'], {}), '(0, rr.size, 5)\n', (355, 370), True, 'import numpy as np\n'), ((399, 423), 'numpy.arange', 'np.arange', (['(0)', 'cc.size', '(5)'], {}), '(0, cc.size, 5)\n', (408, 423), True, 'import numpy as np\n')] |
import time
import nltk
import numpy as np
import pandas as pd
from textblob.classifiers import DecisionTreeClassifier
from textblob.classifiers import NaiveBayesClassifier
nltk.download('stopwords')
from nltk.corpus import stopwords
stopset = set(stopwords.words('english'))
ignoreDT = True
def remove_prefix(theList, prefix):
return [text[len(prefix):] if text.startswith(prefix) else text for text in theList]
def remove_stopwords(theList):
return [' '.join([word for word in text.split() if word not in stopset]) for text in theList]
if __name__ == '__main__':
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train_data = remove_stopwords(train.Title)
train_target = remove_prefix(train.Team, 'UCM ')
test_data = remove_stopwords(test.Title)
test_id = test.Id
test_target = remove_prefix(test.Team, 'UCM ')
train = list(zip(train_data, train_target))
test = list(zip(test_data, test_target))
start_time = time.time()
cl = NaiveBayesClassifier(train)
# Compute accuracy
print("NaiveBayes Accuracy: {0}".format(cl.accuracy(test)))
# Show 10 most informative features
cl.show_informative_features(10)
print(cl.informative_features(10))
elapsed_time = time.time() - start_time
print(elapsed_time)
if (not ignoreDT):
start_time = time.time()
cl = DecisionTreeClassifier(train)
print("DecisionTree Accuracy: {0}".format(cl.accuracy(test)))
print(cl.pseudocode())
elapsed_time = time.time() - start_time
print(elapsed_time)
start_time = time.time()
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english", ignore_stopwords=True)
stemmed_count_vect = StemmedCountVectorizer(stop_words='english')
text_clf = Pipeline([('vect', stemmed_count_vect),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
text_clf.fit(train_data, train_target)
predicted = text_clf.predict(test_data)
print("MultinomialNB Accuracy: {0}".format(np.mean(predicted == test_target)))
df = pd.DataFrame(list(zip(test_data, predicted, test_target)))
df.to_csv('MB_list.csv', index=False)
elapsed_time = time.time() - start_time
print(elapsed_time)
start_time = time.time()
from sklearn.linear_model import SGDClassifier
text_clf = Pipeline([('vect', stemmed_count_vect),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2',
alpha=1e-3, random_state=42)),
])
text_clf.fit(train_data, train_target)
predicted = text_clf.predict(test_data)
print("SGD Accuracy: {0}".format(np.mean(predicted == test_target)))
df = pd.DataFrame(list(zip(test_id, test_data, predicted, test_target)))
df.to_csv('SGD_list.csv', index=False)
elapsed_time = time.time() - start_time
print(elapsed_time)
from sklearn import metrics
print(metrics.classification_report(test_target, predicted))
| [
"numpy.mean",
"sklearn.feature_extraction.text.TfidfTransformer",
"textblob.classifiers.NaiveBayesClassifier",
"sklearn.linear_model.SGDClassifier",
"nltk.corpus.stopwords.words",
"nltk.download",
"pandas.read_csv",
"sklearn.metrics.classification_report",
"nltk.stem.snowball.SnowballStemmer",
"te... | [((175, 201), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (188, 201), False, 'import nltk\n'), ((251, 277), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (266, 277), False, 'from nltk.corpus import stopwords\n'), ((594, 618), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (605, 618), True, 'import pandas as pd\n'), ((630, 653), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (641, 653), True, 'import pandas as pd\n'), ((983, 994), 'time.time', 'time.time', ([], {}), '()\n', (992, 994), False, 'import time\n'), ((1004, 1031), 'textblob.classifiers.NaiveBayesClassifier', 'NaiveBayesClassifier', (['train'], {}), '(train)\n', (1024, 1031), False, 'from textblob.classifiers import NaiveBayesClassifier\n'), ((1599, 1610), 'time.time', 'time.time', ([], {}), '()\n', (1608, 1610), False, 'import time\n'), ((2137, 2186), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {'ignore_stopwords': '(True)'}), "('english', ignore_stopwords=True)\n", (2152, 2186), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((2815, 2826), 'time.time', 'time.time', ([], {}), '()\n', (2824, 2826), False, 'import time\n'), ((1255, 1266), 'time.time', 'time.time', ([], {}), '()\n', (1264, 1266), False, 'import time\n'), ((1349, 1360), 'time.time', 'time.time', ([], {}), '()\n', (1358, 1360), False, 'import time\n'), ((1374, 1403), 'textblob.classifiers.DecisionTreeClassifier', 'DecisionTreeClassifier', (['train'], {}), '(train)\n', (1396, 1403), False, 'from textblob.classifiers import DecisionTreeClassifier\n'), ((2748, 2759), 'time.time', 'time.time', ([], {}), '()\n', (2757, 2759), False, 'import time\n'), ((3472, 3483), 'time.time', 'time.time', ([], {}), '()\n', (3481, 3483), False, 'import time\n'), ((3565, 3618), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_target', 'predicted'], {}), '(test_target, predicted)\n', (3594, 3618), False, 'from sklearn import metrics\n'), ((1528, 1539), 'time.time', 'time.time', ([], {}), '()\n', (1537, 1539), False, 'import time\n'), ((2583, 2616), 'numpy.mean', 'np.mean', (['(predicted == test_target)'], {}), '(predicted == test_target)\n', (2590, 2616), True, 'import numpy as np\n'), ((3296, 3329), 'numpy.mean', 'np.mean', (['(predicted == test_target)'], {}), '(predicted == test_target)\n', (3303, 3329), True, 'import numpy as np\n'), ((2348, 2366), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (2364, 2366), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((2402, 2417), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (2415, 2417), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((2969, 2987), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (2985, 2987), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((3023, 3094), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""', 'penalty': '"""l2"""', 'alpha': '(0.001)', 'random_state': '(42)'}), "(loss='hinge', penalty='l2', alpha=0.001, random_state=42)\n", (3036, 3094), False, 'from sklearn.linear_model import SGDClassifier\n')] |
# legacy app: mic pos calibration with a 40kHz ultrasonic distance sensor
import numpy as np
import math
import time
import cv2
import copy
import nidaqmx
import nidaqmx.stream_readers
import nidaqmx.constants
import pyrealsense2 as rs
from cv2 import aruco
import h5py
import platform
if platform.system() == "Windows":
import winsound
import scipy.signal
import scipy.optimize
import datetime
import yaml
import usvcam.tool as tool
import sys
config_path = sys.prefix + '/etc/usvcam/config.yaml'
def rs_start():
pipe = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30) # color
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) # depth
profile = pipe.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
depth_sensor.set_option(rs.option.visual_preset, 5) # short range
pc = rs.pointcloud()
### get camera parameters
frames = pipe.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
######
return pipe, pc, depth_intrin, color_intrin, depth_to_color_extrin
def update_markerpos(color_image, dict_aruco, markerpos):
markerpos_pre = copy.deepcopy(markerpos)
corners, ids, rejectedImgPoints = aruco.detectMarkers(color_image, dict_aruco)
markerpos = np.zeros([2, 2])
markerpos[:,:] = np.nan
if ids is not None:
for i in range(len(ids)):
ii = ids[i][0]
if ii > 2:
continue
#cv2.fillPoly(color_image, corners[i].astype(np.int), clr[ii])
x = np.mean(corners[i][0,:,:], axis=0)
markerpos[ii,:] = x
if np.sum(np.isnan(markerpos)) > 0:
d_markerpos = float('inf')
else:
d_markerpos = np.max(np.linalg.norm(markerpos - markerpos_pre, axis=1))
return markerpos, d_markerpos, corners, ids
def draw_labels(color_image, ids, corners, px_checked, i_mode):
clr = [[[0, 0, 255], [0, 0, 200]], [[0, 255, 255], [0, 200, 200]], [[0, 255, 0], [0, 200, 0]]]
if ids is not None:
for i in range(len(ids)):
ii = ids[i][0]
if ii > 2:
continue
cv2.polylines(color_image, corners[i].astype(np.int), True, clr[i_mode][ii], thickness=5)
for px in px_checked:
cv2.circle(color_image, px, 8, [0, 255, 0], thickness=1)
def draw_snd(x, w, v_dispmax):
img_snd = np.zeros([200, w, 3], np.uint8)
clr = [[0, 0, 255], [255, 255, 0], [255, 0, 255], [0, 255, 255]]
for i in range(daq_n_ch):
xx = x[i, :]
pts = np.vstack([np.arange(len(xx))/len(xx)*img_snd.shape[1], xx/v_dispmax*img_snd.shape[0]/2+img_snd.shape[0]/2])
pts = pts.T
cv2.polylines(img_snd, [pts.astype(np.int32)], False, clr[i])
return img_snd
def get_speaker_pos(ids, corners, pc, color_image, color_frame, depth_frame):
mask_image = np.zeros((color_image.shape[0],color_image.shape[1],1), np.uint8)
if ids is not None:
for i in range(len(ids)):
c = int(ids[i][0])+1
cv2.fillPoly(mask_image, corners[i].astype(np.int), c)
pc.map_to(color_frame)
points = pc.calculate(depth_frame)
v, t = points.get_vertices(), points.get_texture_coordinates()
verts = np.asanyarray(v).view(np.float32).reshape(-1, 3) # xyz
texcoords = np.asanyarray(t).view(np.float32).reshape(-1, 2) # uv
#convert unit of texcoord map (0 to 1) to pixels
cw, ch = color_image.shape[:2][::-1]
v, u = (texcoords * (cw, ch) + 0.5).astype(np.uint32).T
np.clip(u, 0, ch-1, out=u)
np.clip(v, 0, cw-1, out=v)
# get point cloud corresponding aruco marker
I = mask_image[u, v, 0]
v_marker1 = verts[I==1,:]
v_marker2 = verts[I==2,:]
p = (np.median(v_marker1, axis=0) + np.median(v_marker2, axis=0))/2
p[2] -= 0.0095 # speaker = 9.5 mm height
p = rs.rs2_transform_point_to_point(depth_to_color_extrin, p.tolist())
px = rs.rs2_project_point_to_pixel(color_intrin, p)
return p, px
dtime_str = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
out_filename = './data/'+ dtime_str + '.calib.h5'
with open(config_path, 'r') as f:
usvcam_cfg = yaml.safe_load(f)
devname = usvcam_cfg['daq_dev_name']
mic0pos = np.asarray(usvcam_cfg['mic0pos'])/1000.0
##### (1) initialize analog data acquisition
daq_fs = 3500000 # 3.5M Hz
daq_nsample = int(daq_fs*0.001) # 0.001 sec
daq_n_ch = 4
daq_dev_list = devname + '/ai0, ' + devname + '/ai1, ' + devname + '/ai2, ' + devname + '/ai3'
daq_vmax = 10.0
daq_buf_max = daq_fs
daq_buf = np.zeros([daq_n_ch, daq_buf_max])
daq_buf_n = 0
daq_task = nidaqmx.Task()
daq_task.ai_channels.add_ai_voltage_chan(daq_dev_list, min_val=-daq_vmax, max_val=daq_vmax)
daq_task.timing.cfg_samp_clk_timing(rate = daq_fs,
sample_mode = nidaqmx.constants.AcquisitionType.CONTINUOUS,
active_edge = nidaqmx.constants.Edge.RISING,
samps_per_chan = daq_nsample)
daq_reader = nidaqmx.stream_readers.AnalogMultiChannelReader(daq_task.in_stream)
daq_total_sample = 0
daq_running = False
def daq_callback(task_handle, every_n_samples_event_type, number_of_samples, callback_data):
if not daq_running:
return 0
global daq_total_sample
daq_total_sample += number_of_samples
buf = np.zeros([4, number_of_samples])
daq_reader.read_many_sample(data=buf, number_of_samples_per_channel = number_of_samples)
global daq_buf_n
if daq_buf_n < daq_buf_max - number_of_samples:
daq_buf[:,daq_buf_n:(daq_buf_n+number_of_samples)] = buf
daq_buf_n += number_of_samples
return 0
daq_task.register_every_n_samples_acquired_into_buffer_event(daq_nsample, daq_callback)
##### end of (1)
# initialize camera
pipe, pc, depth_intrin, color_intrin, depth_to_color_extrin = rs_start()
# save camera parameters
with h5py.File(out_filename, mode='w') as f:
tool.save_intrinsics(f, '/camera_param/depth_intrin', depth_intrin)
tool.save_intrinsics(f, '/camera_param/color_intrin', color_intrin)
tool.save_extrinsics(f, '/camera_param/depth_to_color_extrin', depth_to_color_extrin)
f.create_dataset('/daq_param/fs', data = daq_fs)
f.create_dataset('/daq_param/n_ch', data = daq_n_ch)
# initialize speaker pos detection
d_markerpos_thr = 1.0
dict_aruco = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
markerpos = np.zeros([2, 2])
px_checked = list()
cnt_stay = 0
# start daq
daq_task.start()
daq_running = True
daq_t0 = time.time()
# open monitor windows
app_winname = 'Calibrator'
cv2.namedWindow(app_winname, cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(app_winname, 80, 10)
wsize = int(2048) #int(daq_fs*0.0005)
v_thr = 0.5
v_dispmax = 3
img_snd = np.zeros([200, color_intrin.width, 3], np.uint8)
cnt = 0
i_mode = 0
while True:
# get new frame
frames = pipe.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
# check speaker position
markerpos, d_markerpos, corners, ids = update_markerpos(color_image, dict_aruco, markerpos)
if d_markerpos > d_markerpos_thr:
cnt_stay = 0
else:
cnt_stay += 1
if cnt_stay < 15:
i_mode = 0
elif i_mode == 0:
i_mode = 1
if daq_buf_n > 0:
X = copy.deepcopy(daq_buf[:, 0:daq_buf_n])
daq_buf_n = 0
if i_mode == 1:
x = X[0,:]
i_max = np.argmax(x)
if x[i_max] > v_thr and i_max - wsize > 0 and i_max + wsize < len(x):
cnt += 1
x = X[:, i_max-wsize:i_max+wsize]
img_snd = draw_snd(x, color_intrin.width, v_dispmax)
p, px = get_speaker_pos(ids, corners, pc, color_image, color_frame, depth_frame)
px_checked.append((int(px[0]), int(px[1])))
with h5py.File(out_filename, mode='a') as f:
group_name = '/sig_{:05}'.format(cnt)
f.create_dataset(group_name + '/color_image', data = color_image)
f.create_dataset(group_name + '/snd', data = x)
f.create_dataset(group_name + '/position', data = p)
if platform.system() == "Windows":
winsound.Beep(1000,100)
i_mode = 2
draw_labels(color_image, ids, corners, px_checked, i_mode)
img_disp = np.concatenate((color_image, img_snd))
cv2.imshow(app_winname, img_disp)
# exit with ESC key
k = cv2.waitKey(1)
if k== 27:
break
daq_running = False
daq_task.stop()
daq_task.close()
pipe.stop()
f_target = 40000
tool.calc_micpos(out_filename, f_target, mic0pos, False, False)
| [
"numpy.clip",
"pyrealsense2.rs2_project_point_to_pixel",
"cv2.imshow",
"numpy.asanyarray",
"copy.deepcopy",
"numpy.linalg.norm",
"cv2.moveWindow",
"usvcam.tool.save_intrinsics",
"numpy.mean",
"nidaqmx.Task",
"numpy.asarray",
"platform.system",
"numpy.concatenate",
"pyrealsense2.config",
... | [((4895, 4928), 'numpy.zeros', 'np.zeros', (['[daq_n_ch, daq_buf_max]'], {}), '([daq_n_ch, daq_buf_max])\n', (4903, 4928), True, 'import numpy as np\n'), ((4955, 4969), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (4967, 4969), False, 'import nidaqmx\n'), ((5369, 5436), 'nidaqmx.stream_readers.AnalogMultiChannelReader', 'nidaqmx.stream_readers.AnalogMultiChannelReader', (['daq_task.in_stream'], {}), '(daq_task.in_stream)\n', (5416, 5436), False, 'import nidaqmx\n'), ((6762, 6810), 'cv2.aruco.getPredefinedDictionary', 'aruco.getPredefinedDictionary', (['aruco.DICT_4X4_50'], {}), '(aruco.DICT_4X4_50)\n', (6791, 6810), False, 'from cv2 import aruco\n'), ((6823, 6839), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (6831, 6839), True, 'import numpy as np\n'), ((6931, 6942), 'time.time', 'time.time', ([], {}), '()\n', (6940, 6942), False, 'import time\n'), ((6994, 7043), 'cv2.namedWindow', 'cv2.namedWindow', (['app_winname', 'cv2.WINDOW_AUTOSIZE'], {}), '(app_winname, cv2.WINDOW_AUTOSIZE)\n', (7009, 7043), False, 'import cv2\n'), ((7044, 7079), 'cv2.moveWindow', 'cv2.moveWindow', (['app_winname', '(80)', '(10)'], {}), '(app_winname, 80, 10)\n', (7058, 7079), False, 'import cv2\n'), ((7156, 7204), 'numpy.zeros', 'np.zeros', (['[200, color_intrin.width, 3]', 'np.uint8'], {}), '([200, color_intrin.width, 3], np.uint8)\n', (7164, 7204), True, 'import numpy as np\n'), ((9203, 9266), 'usvcam.tool.calc_micpos', 'tool.calc_micpos', (['out_filename', 'f_target', 'mic0pos', '(False)', '(False)'], {}), '(out_filename, f_target, mic0pos, False, False)\n', (9219, 9266), True, 'import usvcam.tool as tool\n'), ((295, 312), 'platform.system', 'platform.system', ([], {}), '()\n', (310, 312), False, 'import platform\n'), ((541, 554), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (552, 554), True, 'import pyrealsense2 as rs\n'), ((569, 580), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (578, 580), True, 'import pyrealsense2 as rs\n'), ((922, 937), 'pyrealsense2.pointcloud', 'rs.pointcloud', ([], {}), '()\n', (935, 937), True, 'import pyrealsense2 as rs\n'), ((1498, 1522), 'copy.deepcopy', 'copy.deepcopy', (['markerpos'], {}), '(markerpos)\n', (1511, 1522), False, 'import copy\n'), ((1562, 1606), 'cv2.aruco.detectMarkers', 'aruco.detectMarkers', (['color_image', 'dict_aruco'], {}), '(color_image, dict_aruco)\n', (1581, 1606), False, 'from cv2 import aruco\n'), ((1625, 1641), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (1633, 1641), True, 'import numpy as np\n'), ((2714, 2745), 'numpy.zeros', 'np.zeros', (['[200, w, 3]', 'np.uint8'], {}), '([200, w, 3], np.uint8)\n', (2722, 2745), True, 'import numpy as np\n'), ((3195, 3262), 'numpy.zeros', 'np.zeros', (['(color_image.shape[0], color_image.shape[1], 1)', 'np.uint8'], {}), '((color_image.shape[0], color_image.shape[1], 1), np.uint8)\n', (3203, 3262), True, 'import numpy as np\n'), ((3864, 3892), 'numpy.clip', 'np.clip', (['u', '(0)', '(ch - 1)'], {'out': 'u'}), '(u, 0, ch - 1, out=u)\n', (3871, 3892), True, 'import numpy as np\n'), ((3895, 3923), 'numpy.clip', 'np.clip', (['v', '(0)', '(cw - 1)'], {'out': 'v'}), '(v, 0, cw - 1, out=v)\n', (3902, 3923), True, 'import numpy as np\n'), ((4266, 4312), 'pyrealsense2.rs2_project_point_to_pixel', 'rs.rs2_project_point_to_pixel', (['color_intrin', 'p'], {}), '(color_intrin, p)\n', (4295, 4312), True, 'import pyrealsense2 as rs\n'), ((4496, 4513), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4510, 4513), False, 'import yaml\n'), ((4562, 4595), 'numpy.asarray', 'np.asarray', (["usvcam_cfg['mic0pos']"], {}), "(usvcam_cfg['mic0pos'])\n", (4572, 4595), True, 'import numpy as np\n'), ((5734, 5766), 'numpy.zeros', 'np.zeros', (['[4, number_of_samples]'], {}), '([4, number_of_samples])\n', (5742, 5766), True, 'import numpy as np\n'), ((6307, 6340), 'h5py.File', 'h5py.File', (['out_filename'], {'mode': '"""w"""'}), "(out_filename, mode='w')\n", (6316, 6340), False, 'import h5py\n'), ((6351, 6418), 'usvcam.tool.save_intrinsics', 'tool.save_intrinsics', (['f', '"""/camera_param/depth_intrin"""', 'depth_intrin'], {}), "(f, '/camera_param/depth_intrin', depth_intrin)\n", (6371, 6418), True, 'import usvcam.tool as tool\n'), ((6423, 6490), 'usvcam.tool.save_intrinsics', 'tool.save_intrinsics', (['f', '"""/camera_param/color_intrin"""', 'color_intrin'], {}), "(f, '/camera_param/color_intrin', color_intrin)\n", (6443, 6490), True, 'import usvcam.tool as tool\n'), ((6495, 6584), 'usvcam.tool.save_extrinsics', 'tool.save_extrinsics', (['f', '"""/camera_param/depth_to_color_extrin"""', 'depth_to_color_extrin'], {}), "(f, '/camera_param/depth_to_color_extrin',\n depth_to_color_extrin)\n", (6515, 6584), True, 'import usvcam.tool as tool\n'), ((7454, 7498), 'cv2.cvtColor', 'cv2.cvtColor', (['color_image', 'cv2.COLOR_BGR2RGB'], {}), '(color_image, cv2.COLOR_BGR2RGB)\n', (7466, 7498), False, 'import cv2\n'), ((8965, 9003), 'numpy.concatenate', 'np.concatenate', (['(color_image, img_snd)'], {}), '((color_image, img_snd))\n', (8979, 9003), True, 'import numpy as np\n'), ((9008, 9041), 'cv2.imshow', 'cv2.imshow', (['app_winname', 'img_disp'], {}), '(app_winname, img_disp)\n', (9018, 9041), False, 'import cv2\n'), ((9075, 9089), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9086, 9089), False, 'import cv2\n'), ((2611, 2667), 'cv2.circle', 'cv2.circle', (['color_image', 'px', '(8)', '[0, 255, 0]'], {'thickness': '(1)'}), '(color_image, px, 8, [0, 255, 0], thickness=1)\n', (2621, 2667), False, 'import cv2\n'), ((4344, 4367), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4365, 4367), False, 'import datetime\n'), ((7848, 7886), 'copy.deepcopy', 'copy.deepcopy', (['daq_buf[:, 0:daq_buf_n]'], {}), '(daq_buf[:, 0:daq_buf_n])\n', (7861, 7886), False, 'import copy\n'), ((1895, 1931), 'numpy.mean', 'np.mean', (['corners[i][0, :, :]'], {'axis': '(0)'}), '(corners[i][0, :, :], axis=0)\n', (1902, 1931), True, 'import numpy as np\n'), ((1977, 1996), 'numpy.isnan', 'np.isnan', (['markerpos'], {}), '(markerpos)\n', (1985, 1996), True, 'import numpy as np\n'), ((2077, 2126), 'numpy.linalg.norm', 'np.linalg.norm', (['(markerpos - markerpos_pre)'], {'axis': '(1)'}), '(markerpos - markerpos_pre, axis=1)\n', (2091, 2126), True, 'import numpy as np\n'), ((4069, 4097), 'numpy.median', 'np.median', (['v_marker1'], {'axis': '(0)'}), '(v_marker1, axis=0)\n', (4078, 4097), True, 'import numpy as np\n'), ((4100, 4128), 'numpy.median', 'np.median', (['v_marker2'], {'axis': '(0)'}), '(v_marker2, axis=0)\n', (4109, 4128), True, 'import numpy as np\n'), ((7978, 7990), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (7987, 7990), True, 'import numpy as np\n'), ((3577, 3593), 'numpy.asanyarray', 'np.asanyarray', (['v'], {}), '(v)\n', (3590, 3593), True, 'import numpy as np\n'), ((3649, 3665), 'numpy.asanyarray', 'np.asanyarray', (['t'], {}), '(t)\n', (3662, 3665), True, 'import numpy as np\n'), ((8416, 8449), 'h5py.File', 'h5py.File', (['out_filename'], {'mode': '"""a"""'}), "(out_filename, mode='a')\n", (8425, 8449), False, 'import h5py\n'), ((8761, 8778), 'platform.system', 'platform.system', ([], {}), '()\n', (8776, 8778), False, 'import platform\n'), ((8813, 8837), 'winsound.Beep', 'winsound.Beep', (['(1000)', '(100)'], {}), '(1000, 100)\n', (8826, 8837), False, 'import winsound\n')] |
# -*- coding: utf-8 -*-
"""
@author: Tobias
Beschreibung:
Implementierung der DGL des EEG Modells nach "Dynamic causal models of steady-state responses",
hier jedoch in der vereinfachten Version von "A neural mass model for MEG/EEG:
coupling and neuronal dynamics", Friston, 2003.
Funktionsweise:
Die Zustandgleichungen werden mit dem RK4 oder Eulerverfahren gelöst. Um eine Simulation zu starten, müssen folgende
Startparameter übergeben werden:
u: Anregungen/Stimulus
thetha: Laterale, Vorwarts und rueckwaerts Kopplung, sowie Stimulusauswirkungsmatrix
Die Parameter x und tstep werden aus dem Runge-Kutta-Verfahren übernommen.
Pythonversion:
3.5.1
"""
import numpy as np
def stateEquations(x,u,theta,tstep):
"""
Beschreibung:
DGL zur Berechnung der Zetableitungen der Zustandsgrößen zu einem Zeitpunkt tstep.
Die Ausgabe ist ein Vektor.
"""
# Parameter des EEG Modells
AL = theta[0]
AB = theta[1]
AF = theta[2]
C = theta[3]
# k_ex = 1./10.
# k_in = 0.05
# t=1000.
# H_ex = 3.25/t
# H_in = 22./t
k_ex = 4.
k_in = 16.
H_ex = 0.08
H_in = 0.32
gamma1,gamma2,gamma3,gamma4,gamma5=0,0,0,0,0
N = np.size(x[:,0])/12 #Netzwerkgröße
"""
x steht für ein Potential, v für den Strom, p kennzeichnet Pyramidalzellen, i inhibitorische Interneureonen, s spiny cells
indizes ex bzw. in stehen für den exzitatorischen bzw. inhibitorischen Teil
"""
# Die zeitabhängigen Variablen werden aus dem Gesamtvektor herausgeschnitten
xp_ex = np.vsplit(x,(0,N))[1]
vp_ex = np.vsplit(x,(N,2*N))[1]
xp_in = np.vsplit(x,(2*N,3*N))[1]
vp_in = np.vsplit(x,(3*N,4*N))[1]
xp_ges = np.vsplit(x,(4*N,5*N))[1]
xs = np.vsplit(x,(5*N,6*N))[1]
vs = np.vsplit(x,(6*N,7*N))[1]
xi_ex = np.vsplit(x,(7*N,8*N))[1]
vi_ex = np.vsplit(x,(8*N,9*N))[1]
xi_in = np.vsplit(x,(9*N,10*N))[1]
vi_in = np.vsplit(x,(10*N,11*N))[1]
xi_ges = np.vsplit(x,(11*N,12*N))[1]
#Die Teile die in der vereinfachten Version nicht benötigt werden, werden hier auf null gesetzt
xp_in_dot,vp_in_dot,xp_ges_dot,vi_in_dot,xi_in_dot,xi_ges_dot=np.zeros(3),np.zeros(3),np.zeros(3),np.zeros(3),np.zeros(3),np.zeros(3)
# Differentialgleichungen des Modells
#DGL für Pyramidalneuronen
xp_ex_dot = vp_ex[:,tstep]
vp_ex_dot =k_ex*H_ex*sig2(xs[:,tstep]-xi_ex[:,tstep])-2.*k_ex*vp_ex[:,tstep]-k_ex**2.*xp_ex[:,tstep]
#DGL für Spiny Neuronen
xs_dot = vs[:,tstep]
vs_dot = k_ex*H_ex*(sig2(xp_ex[:,tstep])+np.dot(C,u[:,tstep]))-2.*k_ex*vs[:,tstep]-k_ex**2.*xs[:,tstep]
#DGL für inhibitorische Interneuronen
xi_ex_dot = vi_ex[:,tstep]
vi_ex_dot =k_in*H_in*sig2(xp_ex[:,tstep])-2.*k_in*vi_ex[:,tstep]-k_in**2.*xi_ex[:,tstep]
# Zum Gesamtvektor zum Zeitpunkt tstep zusammenfügen
x_dot = np.hstack([[xp_ex_dot],[vp_ex_dot],[xp_in_dot],[vp_in_dot],[xp_ges_dot],[xs_dot],[vs_dot],[xi_ex_dot],[vi_ex_dot],[xi_in_dot],[vi_in_dot],[xi_ges_dot]]).T
return x_dot
#Die in dem Paper "A neural mass model for MEG/EEG: coupling and neuronal dynamics" angegebene Sigmoidfunktion
def sig2(x):
r=0.56
c1=135.
v0=6.
c2=0.8*135.
e0=5.
return (c1*e0/(1+np.exp(r*(v0-c2*x))))
#
| [
"numpy.hstack",
"numpy.size",
"numpy.exp",
"numpy.vsplit",
"numpy.zeros",
"numpy.dot"
] | [((1201, 1217), 'numpy.size', 'np.size', (['x[:, 0]'], {}), '(x[:, 0])\n', (1208, 1217), True, 'import numpy as np\n'), ((1572, 1592), 'numpy.vsplit', 'np.vsplit', (['x', '(0, N)'], {}), '(x, (0, N))\n', (1581, 1592), True, 'import numpy as np\n'), ((1608, 1632), 'numpy.vsplit', 'np.vsplit', (['x', '(N, 2 * N)'], {}), '(x, (N, 2 * N))\n', (1617, 1632), True, 'import numpy as np\n'), ((1653, 1681), 'numpy.vsplit', 'np.vsplit', (['x', '(2 * N, 3 * N)'], {}), '(x, (2 * N, 3 * N))\n', (1662, 1681), True, 'import numpy as np\n'), ((1691, 1719), 'numpy.vsplit', 'np.vsplit', (['x', '(3 * N, 4 * N)'], {}), '(x, (3 * N, 4 * N))\n', (1700, 1719), True, 'import numpy as np\n'), ((1737, 1765), 'numpy.vsplit', 'np.vsplit', (['x', '(4 * N, 5 * N)'], {}), '(x, (4 * N, 5 * N))\n', (1746, 1765), True, 'import numpy as np\n'), ((1777, 1805), 'numpy.vsplit', 'np.vsplit', (['x', '(5 * N, 6 * N)'], {}), '(x, (5 * N, 6 * N))\n', (1786, 1805), True, 'import numpy as np\n'), ((1812, 1840), 'numpy.vsplit', 'np.vsplit', (['x', '(6 * N, 7 * N)'], {}), '(x, (6 * N, 7 * N))\n', (1821, 1840), True, 'import numpy as np\n'), ((1855, 1883), 'numpy.vsplit', 'np.vsplit', (['x', '(7 * N, 8 * N)'], {}), '(x, (7 * N, 8 * N))\n', (1864, 1883), True, 'import numpy as np\n'), ((1895, 1923), 'numpy.vsplit', 'np.vsplit', (['x', '(8 * N, 9 * N)'], {}), '(x, (8 * N, 9 * N))\n', (1904, 1923), True, 'import numpy as np\n'), ((1942, 1971), 'numpy.vsplit', 'np.vsplit', (['x', '(9 * N, 10 * N)'], {}), '(x, (9 * N, 10 * N))\n', (1951, 1971), True, 'import numpy as np\n'), ((1981, 2011), 'numpy.vsplit', 'np.vsplit', (['x', '(10 * N, 11 * N)'], {}), '(x, (10 * N, 11 * N))\n', (1990, 2011), True, 'import numpy as np\n'), ((2029, 2059), 'numpy.vsplit', 'np.vsplit', (['x', '(11 * N, 12 * N)'], {}), '(x, (11 * N, 12 * N))\n', (2038, 2059), True, 'import numpy as np\n'), ((2224, 2235), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2232, 2235), True, 'import numpy as np\n'), ((2236, 2247), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2244, 2247), True, 'import numpy as np\n'), ((2248, 2259), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2256, 2259), True, 'import numpy as np\n'), ((2260, 2271), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2268, 2271), True, 'import numpy as np\n'), ((2272, 2283), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2280, 2283), True, 'import numpy as np\n'), ((2284, 2295), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2292, 2295), True, 'import numpy as np\n'), ((2949, 3120), 'numpy.hstack', 'np.hstack', (['[[xp_ex_dot], [vp_ex_dot], [xp_in_dot], [vp_in_dot], [xp_ges_dot], [xs_dot],\n [vs_dot], [xi_ex_dot], [vi_ex_dot], [xi_in_dot], [vi_in_dot], [xi_ges_dot]]'], {}), '([[xp_ex_dot], [vp_ex_dot], [xp_in_dot], [vp_in_dot], [xp_ges_dot],\n [xs_dot], [vs_dot], [xi_ex_dot], [vi_ex_dot], [xi_in_dot], [vi_in_dot],\n [xi_ges_dot]])\n', (2958, 3120), True, 'import numpy as np\n'), ((3354, 3379), 'numpy.exp', 'np.exp', (['(r * (v0 - c2 * x))'], {}), '(r * (v0 - c2 * x))\n', (3360, 3379), True, 'import numpy as np\n'), ((2623, 2645), 'numpy.dot', 'np.dot', (['C', 'u[:, tstep]'], {}), '(C, u[:, tstep])\n', (2629, 2645), True, 'import numpy as np\n')] |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.0.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% {"_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19"}
import numpy as np
import pandas as pd
import os
import xgboost as xgb
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from scipy import sparse
from sklearn.decomposition import TruncatedSVD
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
import scipy as sp
from sklearn import linear_model
from functools import partial
from sklearn import metrics
from collections import Counter
import json
import lightgbm as lgb
# %% {"_uuid": "77161d0931b6ce8d627967c419f813ccf4c859f8"}
# The following 3 functions have been taken from <NAME>ner's github repository
# https://github.com/benhamner/Metrics
def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Returns the confusion matrix between rater's ratings
"""
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(rater_a + rater_b)
if max_rating is None:
max_rating = max(rater_a + rater_b)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)]
for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
conf_mat[a - min_rating][b - min_rating] += 1
return conf_mat
def histogram(ratings, min_rating=None, max_rating=None):
"""
Returns the counts of each type of rating that a rater made
"""
if min_rating is None:
min_rating = min(ratings)
if max_rating is None:
max_rating = max(ratings)
num_ratings = int(max_rating - min_rating + 1)
hist_ratings = [0 for x in range(num_ratings)]
for r in ratings:
hist_ratings[r - min_rating] += 1
return hist_ratings
def quadratic_weighted_kappa(y, y_pred):
"""
Calculates the quadratic weighted kappa
axquadratic_weighted_kappa calculates the quadratic weighted kappa
value, which is a measure of inter-rater agreement between two raters
that provide discrete numeric ratings. Potential values range from -1
(representing complete disagreement) to 1 (representing complete
agreement). A kappa value of 0 is expected if all agreement is due to
chance.
quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b
each correspond to a list of integer ratings. These lists must have the
same length.
The ratings should be integers, and it is assumed that they contain
the complete range of possible ratings.
quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating
is the minimum possible rating, and max_rating is the maximum possible
rating
"""
rater_a = y
rater_b = y_pred
min_rating=None
max_rating=None
rater_a = np.array(rater_a, dtype=int)
rater_b = np.array(rater_b, dtype=int)
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(min(rater_a), min(rater_b))
if max_rating is None:
max_rating = max(max(rater_a), max(rater_b))
conf_mat = confusion_matrix(rater_a, rater_b,
min_rating, max_rating)
num_ratings = len(conf_mat)
num_scored_items = float(len(rater_a))
hist_rater_a = histogram(rater_a, min_rating, max_rating)
hist_rater_b = histogram(rater_b, min_rating, max_rating)
numerator = 0.0
denominator = 0.0
for i in range(num_ratings):
for j in range(num_ratings):
expected_count = (hist_rater_a[i] * hist_rater_b[j]
/ num_scored_items)
d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)
numerator += d * conf_mat[i][j] / num_scored_items
denominator += d * expected_count / num_scored_items
return (1.0 - numerator / denominator)
# %% {"_uuid": "f9c15a9a24576fb4c720fb4fd9db4600220896d0"}
class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _kappa_loss(self, coef, X, y):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
ll = quadratic_weighted_kappa(y, X_p)
return -ll
def fit(self, X, y):
loss_partial = partial(self._kappa_loss, X=X, y=y)
initial_coef = [0.5, 1.5, 2.5, 3.5]
self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')
def predict(self, X, coef):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
return X_p
def coefficients(self):
return self.coef_['x']
# %% {"_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0", "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"}
train = pd.read_csv('../input/train/train.csv')
test = pd.read_csv('../input/test/test.csv')
# %% {"_uuid": "b8d7ffdf6906478c6ba00bd26405088eb8a36656"}
# train[train.AdoptionSpeed==0]
# %% {"_uuid": "3b4da8ff030a20a7daaa73ea910adc106d75f95f"}
doc_sent_mag = []
doc_sent_score = []
nf_count = 0
for pet in train.PetID.values:
try:
with open('../input/train_sentiment/' + pet + '.json', 'r') as f:
sentiment = json.load(f)
doc_sent_mag.append(sentiment['documentSentiment']['magnitude'])
doc_sent_score.append(sentiment['documentSentiment']['score'])
except FileNotFoundError:
nf_count += 1
doc_sent_mag.append(-1)
doc_sent_score.append(-1)
# %% {"_uuid": "936005439ef1387902251657bc9c81cd834ca68d"}
train['doc_sent_mag'] = doc_sent_mag
train['doc_sent_score'] = doc_sent_score
# %% {"_uuid": "f33296918b38649dd1cf0bf209e5295b958de7cd"}
nf_count
# %% {"_uuid": "b0736bd6f47b95d965908c236d7f0e6e3b6c4a0d"}
doc_sent_mag = []
doc_sent_score = []
nf_count = 0
for pet in test.PetID.values:
try:
with open('../input/test_sentiment/' + pet + '.json', 'r') as f:
sentiment = json.load(f)
doc_sent_mag.append(sentiment['documentSentiment']['magnitude'])
doc_sent_score.append(sentiment['documentSentiment']['score'])
except FileNotFoundError:
nf_count += 1
doc_sent_mag.append(-1)
doc_sent_score.append(-1)
# %% {"_uuid": "99f80c6f98c101f4648cf1ab2c9af1de56862ef6"}
test['doc_sent_mag'] = doc_sent_mag
test['doc_sent_score'] = doc_sent_score
# %% {"_uuid": "d963f18679dd3e13de5697bc94ec55a334695db5"}
nf_count
# %% {"_uuid": "c354ed2372c7b019755376d6c3a004b2223f78b2"}
lbl_enc = LabelEncoder()
lbl_enc.fit(train.RescuerID.values.tolist() + test.RescuerID.values.tolist())
train.RescuerID = lbl_enc.transform(train.RescuerID.values)
test.RescuerID = lbl_enc.transform(test.RescuerID.values)
# %% {"_uuid": "228b9ac44451329b2abca99629b859955666262c"}
train_desc = train.Description.fillna("none").values
test_desc = test.Description.fillna("none").values
tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1,
stop_words = 'english')
# Fit TFIDF
tfv.fit(list(train_desc) + list(test_desc))
X = tfv.transform(train_desc)
X_test = tfv.transform(test_desc)
svd = TruncatedSVD(n_components=180)
svd.fit(X)
X = svd.transform(X)
X_test = svd.transform(X_test)
# %% {"_uuid": "612000ae8312b3f78698a41add1787c528617132"}
y = train.AdoptionSpeed
# %% {"_uuid": "287d378245614e0d7a2bbe4b7979681dc89587dc"}
y.value_counts()
# %% {"_uuid": "b3f01cfee25b40d08a1bebb306aa280c8c0d975b"}
train = np.hstack((train.drop(['Name', 'Description', 'PetID', 'AdoptionSpeed'], axis=1).values, X))
test = np.hstack((test.drop(['Name', 'Description', 'PetID'], axis=1).values, X_test))
# %% {"_uuid": "c63c99a4f66179cf71ac0ca1c484881796f2bd5f"}
train_predictions = np.zeros((train.shape[0], 1))
test_predictions = np.zeros((test.shape[0], 1))
zero_test_predictions = np.zeros((test.shape[0], 1))
FOLDS = 3
print("stratified k-folds")
skf = StratifiedKFold(n_splits=FOLDS, random_state=42, shuffle=True)
skf.get_n_splits(train, y)
cv_scores = []
fold = 1
coefficients = np.zeros((FOLDS, 4))
for train_idx, valid_idx in skf.split(train, y):
xtrain, xvalid = train[train_idx], train[valid_idx]
xtrain_text, xvalid_text = X[train_idx], X[valid_idx]
ytrain, yvalid = y.iloc[train_idx], y.iloc[valid_idx]
w = y.value_counts()
weights = {i : np.sum(w) / w[i] for i in w.index}
print(weights)
#model = xgb.XGBRegressor(n_estimators=500, nthread=-1, max_depth=19, learning_rate=0.01, min_child_weight = 150, colsample_bytree=0.8)
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'learning_rate': 0.005,
'subsample': .8,
'colsample_bytree': 0.8,
'min_split_gain': 0.006,
'min_child_samples': 150,
'min_child_weight': 0.1,
'max_depth': 17,
'n_estimators': 10000,
'num_leaves': 80,
'silent': -1,
'verbose': -1,
'max_depth': 11,
'random_state': 2018
}
model = lgb.LGBMRegressor(**lgb_params)
model.fit(
xtrain, ytrain,
eval_set=[(xvalid, yvalid)],
eval_metric='rmse',
verbose=100,
early_stopping_rounds=100
)
#model.fit(xtrain, ytrain)
valid_preds = model.predict(xvalid, num_iteration=model.best_iteration_)
optR = OptimizedRounder()
optR.fit(valid_preds, yvalid.values)
coefficients[fold-1,:] = optR.coefficients()
valid_p = optR.predict(valid_preds, coefficients[fold-1,:])
print("Valid Counts = ", Counter(yvalid.values))
print("Predicted Counts = ", Counter(valid_p))
test_preds = model.predict(test, num_iteration=model.best_iteration_)
scr = quadratic_weighted_kappa(yvalid.values, valid_p)
cv_scores.append(scr)
print("Fold = {}. QWK = {}. Coef = {}".format(fold, scr, coefficients[fold-1,:]))
print("\n")
train_predictions[valid_idx] = valid_preds.reshape(-1, 1)
test_predictions += test_preds.reshape(-1, 1)
fold += 1
test_predictions = test_predictions * 1./FOLDS
print("Mean Score: {}. Std Dev: {}. Mean Coeff: {}".format(np.mean(cv_scores), np.std(cv_scores), np.mean(coefficients, axis=0)))
# %% {"_uuid": "1cf0f817ff7048f4d77d2f153d9fdbb0fb98a237"}
# %% {"_uuid": "13e0e8cde2f271c6783b87b0ff44b63a284169c6"}
optR = OptimizedRounder()
train_predictions = np.array([item for sublist in train_predictions for item in sublist])
optR.fit(train_predictions, y)
coefficients = optR.coefficients()
print(quadratic_weighted_kappa(y, optR.predict(train_predictions, coefficients)))
predictions = optR.predict(test_predictions, coefficients).astype(int)
predictions = [item for sublist in predictions for item in sublist]
# %% {"_uuid": "4604b0219c467ee0bfe18a4491463fba7e04779e"}
sample = pd.read_csv('../input/test/sample_submission.csv')
# %% {"_uuid": "da9b6b80b21ddeabfcef556a6cb65f38ae675b2b"}
sample.AdoptionSpeed = predictions
# %% {"_uuid": "bfc98b6c249a187cfe25cdb4448382325166f3f4"}
sample.to_csv('submission.csv', index=False)
# %% {"_uuid": "87392765ce9a0b7dc1426323bf7be8d8aad230c5"}
sample.dtypes
# %% {"_uuid": "d920a49d4554faa12474b9fa6759c9aa1ee6931c"}
sample.AdoptionSpeed.value_counts()
# %% {"_uuid": "17004d159a6e1a67620d2b2b9126e36e243e7e9d"}
sample.head()
# %% {"_uuid": "1848ae9e76e383e911f0f760895722d9d8e91953"}
# %% {"_uuid": "0b4d3079f7d08aee467992f5be7e65bf7d6da045"}
| [
"numpy.copy",
"sklearn.preprocessing.LabelEncoder",
"numpy.mean",
"pandas.read_csv",
"scipy.optimize.minimize",
"lightgbm.LGBMRegressor",
"sklearn.decomposition.TruncatedSVD",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.zero... | [((5671, 5710), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train/train.csv"""'], {}), "('../input/train/train.csv')\n", (5682, 5710), True, 'import pandas as pd\n'), ((5718, 5755), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test/test.csv"""'], {}), "('../input/test/test.csv')\n", (5729, 5755), True, 'import pandas as pd\n'), ((7376, 7390), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (7388, 7390), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((7758, 7960), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(3)', 'max_features': 'None', 'strip_accents': '"""unicode"""', 'analyzer': '"""word"""', 'token_pattern': '"""\\\\w{1,}"""', 'ngram_range': '(1, 3)', 'use_idf': '(1)', 'smooth_idf': '(1)', 'sublinear_tf': '(1)', 'stop_words': '"""english"""'}), "(min_df=3, max_features=None, strip_accents='unicode',\n analyzer='word', token_pattern='\\\\w{1,}', ngram_range=(1, 3), use_idf=1,\n smooth_idf=1, sublinear_tf=1, stop_words='english')\n", (7773, 7960), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((8114, 8144), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(180)'}), '(n_components=180)\n', (8126, 8144), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((8697, 8726), 'numpy.zeros', 'np.zeros', (['(train.shape[0], 1)'], {}), '((train.shape[0], 1))\n', (8705, 8726), True, 'import numpy as np\n'), ((8746, 8774), 'numpy.zeros', 'np.zeros', (['(test.shape[0], 1)'], {}), '((test.shape[0], 1))\n', (8754, 8774), True, 'import numpy as np\n'), ((8799, 8827), 'numpy.zeros', 'np.zeros', (['(test.shape[0], 1)'], {}), '((test.shape[0], 1))\n', (8807, 8827), True, 'import numpy as np\n'), ((8873, 8935), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'FOLDS', 'random_state': '(42)', 'shuffle': '(True)'}), '(n_splits=FOLDS, random_state=42, shuffle=True)\n', (8888, 8935), False, 'from sklearn.model_selection import train_test_split, KFold, StratifiedKFold\n'), ((9002, 9022), 'numpy.zeros', 'np.zeros', (['(FOLDS, 4)'], {}), '((FOLDS, 4))\n', (9010, 9022), True, 'import numpy as np\n'), ((11249, 11318), 'numpy.array', 'np.array', (['[item for sublist in train_predictions for item in sublist]'], {}), '([item for sublist in train_predictions for item in sublist])\n', (11257, 11318), True, 'import numpy as np\n'), ((11675, 11725), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test/sample_submission.csv"""'], {}), "('../input/test/sample_submission.csv')\n", (11686, 11725), True, 'import pandas as pd\n'), ((3116, 3144), 'numpy.array', 'np.array', (['rater_a'], {'dtype': 'int'}), '(rater_a, dtype=int)\n', (3124, 3144), True, 'import numpy as np\n'), ((3159, 3187), 'numpy.array', 'np.array', (['rater_b'], {'dtype': 'int'}), '(rater_b, dtype=int)\n', (3167, 3187), True, 'import numpy as np\n'), ((9913, 9944), 'lightgbm.LGBMRegressor', 'lgb.LGBMRegressor', ([], {}), '(**lgb_params)\n', (9930, 9944), True, 'import lightgbm as lgb\n'), ((4349, 4359), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (4356, 4359), True, 'import numpy as np\n'), ((4857, 4892), 'functools.partial', 'partial', (['self._kappa_loss'], {'X': 'X', 'y': 'y'}), '(self._kappa_loss, X=X, y=y)\n', (4864, 4892), False, 'from functools import partial\n'), ((4958, 5028), 'scipy.optimize.minimize', 'sp.optimize.minimize', (['loss_partial', 'initial_coef'], {'method': '"""nelder-mead"""'}), "(loss_partial, initial_coef, method='nelder-mead')\n", (4978, 5028), True, 'import scipy as sp\n'), ((5076, 5086), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (5083, 5086), True, 'import numpy as np\n'), ((10438, 10460), 'collections.Counter', 'Counter', (['yvalid.values'], {}), '(yvalid.values)\n', (10445, 10460), False, 'from collections import Counter\n'), ((10495, 10511), 'collections.Counter', 'Counter', (['valid_p'], {}), '(valid_p)\n', (10502, 10511), False, 'from collections import Counter\n'), ((11012, 11030), 'numpy.mean', 'np.mean', (['cv_scores'], {}), '(cv_scores)\n', (11019, 11030), True, 'import numpy as np\n'), ((11032, 11049), 'numpy.std', 'np.std', (['cv_scores'], {}), '(cv_scores)\n', (11038, 11049), True, 'import numpy as np\n'), ((11051, 11080), 'numpy.mean', 'np.mean', (['coefficients'], {'axis': '(0)'}), '(coefficients, axis=0)\n', (11058, 11080), True, 'import numpy as np\n'), ((6097, 6109), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6106, 6109), False, 'import json\n'), ((6826, 6838), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6835, 6838), False, 'import json\n'), ((9293, 9302), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (9299, 9302), True, 'import numpy as np\n')] |
import mediapipe as mp
import cv2
import numpy as np
import time
#contants
ml = 150
max_x, max_y = 250+ml, 50
curr_tool = "select tool"
time_init = True
rad = 40
var_inits = False
thick = 4
prevx, prevy = 0,0
#get tools function
def getTool(x):
if x < 50 + ml:
return "line"
elif x<100 + ml:
return "rectangle"
elif x < 150 + ml:
return"draw"
elif x<200 + ml:
return "circle"
#elif x < 250 + ml:
#return "mouse"
else:
return "erase"
def index_raised(yi, y9):
if (y9 - yi) > 40:
return True
return False
hands = mp.solutions.hands
hand_landmark = hands.Hands(min_detection_confidence=0.6, min_tracking_confidence=0.6, max_num_hands=1)
draw = mp.solutions.drawing_utils
# drawing tools
tools = cv2.imread("tools.png")
tools = tools.astype('uint8')
mask = np.ones((480, 640))*255
mask = mask.astype('uint8')
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
while True:
_, frm = cap.read()
frm = cv2.flip(frm, 1)
rgb = cv2.cvtColor(frm, cv2.COLOR_BGR2RGB)
op = hand_landmark.process(rgb)
if op.multi_hand_landmarks:
for i in op.multi_hand_landmarks:
draw.draw_landmarks(frm, i, hands.HAND_CONNECTIONS)
x, y = int(i.landmark[8].x*640), int(i.landmark[8].y*480)
if x < max_x and y < max_y and x > ml:
if time_init:
ctime = time.time()
time_init = False
ptime = time.time()
cv2.circle(frm, (x, y), rad, (0,255,255), 2)
rad -= 1
if (ptime - ctime) > 0.8:
curr_tool = getTool(x)
print("your current tool set to : ", curr_tool)
time_init = True
rad = 40
else:
time_init = True
rad = 40
if curr_tool == "draw":
xi, yi = int(i.landmark[12].x*640), int(i.landmark[12].y*480)
y9 = int(i.landmark[9].y*480)
if index_raised(yi, y9):
cv2.line(mask, (prevx, prevy), (x, y),0, thick)
prevx, prevy = x, y
else:
prevx = x
prevy = y
elif curr_tool == "line":
xi, yi = int(i.landmark[12].x*640), int(i.landmark[12].y*480)
y9 = int(i.landmark[9].y*480)
if index_raised(yi, y9):
if not(var_inits):
xii, yii = x, y
var_inits = True
cv2.line(frm, (xii, yii), (x, y), (0,255,255), thick)
else:
if var_inits:
cv2.line(mask, (xii, yii), (x, y), 0, thick)
var_inits = False
elif curr_tool == "rectangle":
xi, yi = int(i.landmark[12].x*640), int(i.landmark[12].y*480)
y9 = int(i.landmark[9].y*480)
if index_raised(yi, y9):
if not(var_inits):
xii, yii = x, y
var_inits = True
cv2.rectangle(frm, (xii, yii), (x, y), (255,255,0), thick)
else:
if var_inits:
cv2.rectangle(mask, (xii, yii), (x, y), 0, thick)
var_inits = False
elif curr_tool == "circle":
xi, yi = int(i.landmark[12].x*640), int(i.landmark[12].y*480)
y9 = int(i.landmark[9].y*480)
if index_raised(yi, y9):
if not(var_inits):
xii, yii = x, y
var_inits = True
cv2.circle(frm, (xii, yii), int(((xii-x)**2 + (yii-y)**2)**0.5), (0,255,255), thick)
else:
if var_inits:
cv2.circle(mask, (xii, yii), int(((xii-x)**2 + (yii-y)**2)**0.5), (0,255,255), thick)
var_inits = False
elif curr_tool == "erase":
xi, yi = int(i.landmark[12].x*640), int(i.landmark[12].y*480)
y9 = int(i.landmark[9].y*480)
if index_raised(yi, y9):
cv2.circle(frm, (x, y), 30, (0,0,0), -1)
cv2.circle(mask, (x, y), 30, 255, -1)
op = cv2.bitwise_and(frm, frm, mask=mask)
frm[:, :, 1] = op[:, :, 1]
frm[:, :, 2] = op[:, :, 2]
frm[:max_y, ml:max_x] = cv2.addWeighted(tools, 0.7, frm[:max_y, ml:max_x], 0.3, 0)
cv2.putText(frm, curr_tool, (270+ml,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 2)
cv2.imshow("Air writing", frm)
if cv2.waitKey(1) == 27:
cv2.destroyAllWindows()
cap.release()
break
| [
"cv2.rectangle",
"numpy.ones",
"cv2.flip",
"cv2.line",
"cv2.bitwise_and",
"cv2.putText",
"cv2.imshow",
"cv2.addWeighted",
"cv2.waitKey",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"time.time",
"cv2.imread"
] | [((727, 750), 'cv2.imread', 'cv2.imread', (['"""tools.png"""'], {}), "('tools.png')\n", (737, 750), False, 'import cv2\n'), ((849, 883), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)', 'cv2.CAP_DSHOW'], {}), '(0, cv2.CAP_DSHOW)\n', (865, 883), False, 'import cv2\n'), ((789, 808), 'numpy.ones', 'np.ones', (['(480, 640)'], {}), '((480, 640))\n', (796, 808), True, 'import numpy as np\n'), ((923, 939), 'cv2.flip', 'cv2.flip', (['frm', '(1)'], {}), '(frm, 1)\n', (931, 939), False, 'import cv2\n'), ((948, 984), 'cv2.cvtColor', 'cv2.cvtColor', (['frm', 'cv2.COLOR_BGR2RGB'], {}), '(frm, cv2.COLOR_BGR2RGB)\n', (960, 984), False, 'import cv2\n'), ((3403, 3439), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frm', 'frm'], {'mask': 'mask'}), '(frm, frm, mask=mask)\n', (3418, 3439), False, 'import cv2\n'), ((3522, 3580), 'cv2.addWeighted', 'cv2.addWeighted', (['tools', '(0.7)', 'frm[:max_y, ml:max_x]', '(0.3)', '(0)'], {}), '(tools, 0.7, frm[:max_y, ml:max_x], 0.3, 0)\n', (3537, 3580), False, 'import cv2\n'), ((3583, 3677), 'cv2.putText', 'cv2.putText', (['frm', 'curr_tool', '(270 + ml, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 255)', '(2)'], {}), '(frm, curr_tool, (270 + ml, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,\n 255, 255), 2)\n', (3594, 3677), False, 'import cv2\n'), ((3670, 3700), 'cv2.imshow', 'cv2.imshow', (['"""Air writing"""', 'frm'], {}), "('Air writing', frm)\n", (3680, 3700), False, 'import cv2\n'), ((3706, 3720), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3717, 3720), False, 'import cv2\n'), ((3730, 3753), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3751, 3753), False, 'import cv2\n'), ((1322, 1333), 'time.time', 'time.time', ([], {}), '()\n', (1331, 1333), False, 'import time\n'), ((1339, 1385), 'cv2.circle', 'cv2.circle', (['frm', '(x, y)', 'rad', '(0, 255, 255)', '(2)'], {}), '(frm, (x, y), rad, (0, 255, 255), 2)\n', (1349, 1385), False, 'import cv2\n'), ((1275, 1286), 'time.time', 'time.time', ([], {}), '()\n', (1284, 1286), False, 'import time\n'), ((1753, 1801), 'cv2.line', 'cv2.line', (['mask', '(prevx, prevy)', '(x, y)', '(0)', 'thick'], {}), '(mask, (prevx, prevy), (x, y), 0, thick)\n', (1761, 1801), False, 'import cv2\n'), ((2105, 2160), 'cv2.line', 'cv2.line', (['frm', '(xii, yii)', '(x, y)', '(0, 255, 255)', 'thick'], {}), '(frm, (xii, yii), (x, y), (0, 255, 255), thick)\n', (2113, 2160), False, 'import cv2\n'), ((2195, 2239), 'cv2.line', 'cv2.line', (['mask', '(xii, yii)', '(x, y)', '(0)', 'thick'], {}), '(mask, (xii, yii), (x, y), 0, thick)\n', (2203, 2239), False, 'import cv2\n'), ((2505, 2565), 'cv2.rectangle', 'cv2.rectangle', (['frm', '(xii, yii)', '(x, y)', '(255, 255, 0)', 'thick'], {}), '(frm, (xii, yii), (x, y), (255, 255, 0), thick)\n', (2518, 2565), False, 'import cv2\n'), ((2600, 2649), 'cv2.rectangle', 'cv2.rectangle', (['mask', '(xii, yii)', '(x, y)', '(0)', 'thick'], {}), '(mask, (xii, yii), (x, y), 0, thick)\n', (2613, 2649), False, 'import cv2\n'), ((3310, 3352), 'cv2.circle', 'cv2.circle', (['frm', '(x, y)', '(30)', '(0, 0, 0)', '(-1)'], {}), '(frm, (x, y), 30, (0, 0, 0), -1)\n', (3320, 3352), False, 'import cv2\n'), ((3356, 3393), 'cv2.circle', 'cv2.circle', (['mask', '(x, y)', '(30)', '(255)', '(-1)'], {}), '(mask, (x, y), 30, 255, -1)\n', (3366, 3393), False, 'import cv2\n')] |
'''
Plotter to collect all plotting functionality at one place.
If available, it uses simple plotting functionalities included into the different classes.
Merges them together to create more meaningfull plots.
'''
from __future__ import print_function, division
import numpy as np
import pandas as pd
import math
from warnings import warn
#from .metergroup import MeterGroup, iterate_through_submeters_of_two_metergroups
#from .electric import align_two_meters
import matplotlib as mpl
import matplotlib.pyplot as plt
import itertools
import seaborn as sns
from nilmtk import TimeFrameGroup
import itertools
from nilmtk import TimeFrameGroup, TimeFrame
import matplotlib.dates as mdates
#############################################################
#region Nilm Plotting
def plot_overall_power_vs_disaggregation(main_meter, disaggregations, verbose = False):
""" The plot for validating the NILM algorithm.
Plots the disaggregation below the overall powerflow together with
orientation lines.
Parameters
----------
predictions: nilmtk.Electrical
Electrical with the disaggregation of the meters.
ground_truth : nilmtk.MeterGroup
MeterGroup with all the disaggregated meters.
verbose:
Whether additional ouput is printed.
"""
# Create the main figure
fig = plt.figure() #, tight_layout=True)
# Create one bigger subplot for the overall power
timeframe = disaggregations.get_timeframe(intersection_instead_union = False)
timeframe.start = timeframe.end - pd.Timedelta("48h")
ax = fig.add_subplot(4,1,1)
if not main_meter is None:
main_meter.plot(ax, timeframe=timeframe, sample_period=2)
ax.set_xlim([timeframe.start, timeframe.end])
ax.set_xlabel('Time', fontsize=12)
ax.set_title('Disaggregation', fontsize=14)
#ax.set_ylabel('{0}'.format(i), fontsize=12)
# Create multiple smaller ones for the disaggregated flows
n = len(disaggregations.meters)
sections = math.ceil(n / 2 * 3)
size_main_figure = math.ceil(sections / 3)
for i, dis in enumerate(disaggregations.meters):
if verbose:
print(str(i) + "/" + str(n))
sub_ax = fig.add_subplot(sections, 1, size_main_figure+i+1)
dis.plot(sub_ax,timeframe=timeframe, legend = False, sample_period = 2)
ax.get_shared_x_axes().join(ax, sub_ax)
ax.get_shared_y_axes().join(ax, sub_ax)
sub_ax.set_ylim(ax.get_ylim())
if i != 2:
ax.set_ylabel("")
#sub_ax.set_xlim([timeframe.start, timeframe.end])
# Link the axis
plt.setp(ax.get_xticklabels(), visible=True)
#fig.subplots_adjust(hspace=0.0)
return fig
def plot_phases(building, interval = pd.Timedelta("1d"), verbose = False):
''' Simply plots all three phases to see the output.
This is equal to plotting the different sitemeters of the building.
Parameters
----------
building: nilmtk.building
The building for which the different phases are plottet.
interval: pd.Timedelta
The timedelta to plot.
verbose: bool
Whether to plot additional output.
'''
fig = plt.figure()
start = building.elec.sitemeters()[1].get_timeframe().start
new_timeframe = TimeFrameGroup([TimeFrame(start=start, end = start + interval)])
flows = []
for i in range(1,4):
if verbose:
print("Load {0}/{1}".format(i,3))
flows.append(building.elec.sitemeters()[i].power_series_all_data(sections=new_timeframe))
all = pd.concat(flows, axis = 1)
all.columns = ['Phase 1', 'Phase 2', 'Phase 3']
all.plot(colors=['r', 'g', 'b'], ax = fig.add_subplot(111))
return fig
def plot_stackplot(disaggregations, total_power = None, stacked = True, verbose = True):
""" Plots a stackplot, which stacks all disaggregation results on top of each other.
Parameters
----------
disaggregations: nilmtk.MeterGroup
Remember appliance 0 is the rest powerflow
plot_total_power: nilmtk.Electric (optional)
Just for comparison an additional plot with the whole powerflow.
Should be the same as all the diaggregated meters stacked together.
verbose: bool
Whether to print additional information
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
"""
timeframe = disaggregations.get_timeframe(intersection_instead_union = False)
timeframe.start = timeframe.end - pd.Timedelta("48h")
# Additional total power plot if demanded
fig = plt.figure()
if not total_power is None:
ax = fig.add_subplot(211)
total_power.power_series_all_data(sections=[timeframe], sample_period=2).plot(ax = ax)
ax = fig.add_subplot(212)
else:
ax = fig.add_subplot(111)
# The stacked plot
all = pd.DataFrame(disaggregations.meters[0].power_series_all_data(sections=[timeframe], sample_period=2).rename('Rest'))
for i, dis in enumerate(disaggregations.meters):
if i == 0:
continue
name = "Appliance " + str(i)
if verbose:
print(name)
all[name] = dis.power_series_all_data(sections=[timeframe], sample_period=2)
all = all.fillna(0)
all.plot.area(ax = ax, stacked = stacked)
ax.set_xscale("log", nonposx='clip')
ax.set_xlim([timeframe.start, timeframe.end])
return fig
def plot_segments(transitions, steady_states, ax = None):
'''
This function takes the events and plots the segments.
Paramters
---------
transitions:
The transitions with the 'segment' field set
steady_states:
The transitions with the 'segment' field set
ax: matplotlib.axes.Axes
An axis object to print to.
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
'''
# Prepare plot
fig = plt.figure()
ax = fig.add_subplot(111)
if ax is None:
ax = plt.gca()
#ax.xaxis.axis_date()
# Sort segments to always plot lower segment on top
steady_states['segment'] = transitions.set_index('starts')['segment']
steady_states.sort_index(ascending = True, inplace = True)
steady_states['starts'] = steady_states.index
firsts = steady_states.groupby('segment').first()
firsts = firsts.sort_values('starts', ascending = False).index
# Fill_between does the trick
for cur in firsts:
rows = steady_states[steady_states['segment'] == cur]
ax.fill_between(rows.index.to_pydatetime(), rows['active average'].values, 0, step='post')
ax.set_xlabel("Time", fontsize = "12")
ax.set_ylabel("Power [W]", fontsize = "12")
ax.autoscale_view()
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
return fig
def plot_evaluation_assignments(sec_ground_truth, sec_disaggregations, assignments,
gt_meters = None, timeframe = None, verbose = False):
'''
This function plots the assignments of the preassignment during the NILM evaluation.
The plot has three columns:
- The original disaggregated meters
- The ground_truth meters
- the combination of the meters assigned to the ground truth meters.
Paramters
---------
sec_ground_truth: [nilmtk.TimeFrameGroup]
The on-sections of the ground truth.
sec_disaggregations: [nilmtk.TimeFrameGroup]
The on sections of the disaggregated meters. Some of these purely
disaggregated meters might belong to the same ground truth appliance.
assignments: dict(int -> [int])
A dictionary with its entries mapping from a number of the ground_truth meters to a
list of disaggregation meters. This enables the combination of the disaggregation meters.
gt_meters: nilmtk.Electric
If set, the meters are used to get the captions for the plots
timeframe: nilmtk.Timeframe
A timeframe for which the plot shall be drawn. If kept None, the whole timeframe
of the ground_truth is plotted.
verbose: bool
If additional output is generated
Returns
-------
fig: matplotlib.figure.Figure
The newly plotted figure
'''
fig = plt.figure(figsize=(50,50)) #, tight_layout=True)
if timeframe is None:
timeframe = TimeFrameGroup(map(lambda cur: cur.get_timeframe(), sec_ground_truth)).get_timeframe()
limit = TimeFrameGroup([timeframe])
overall_length = max([len(sec_ground_truth), len(sec_disaggregations)])
# Plot before assignment
for i, cur_nonzero in enumerate(sec_disaggregations):
ax = fig.add_subplot(overall_length,3,1+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
# Plot the original load
for i, cur_nonzero in enumerate(sec_ground_truth):
ax = fig.add_subplot(overall_length,3,2+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
if not gt_meters is None:
ax.set_title(gt_meters.meters[i].appliances[0].metadata['type'])
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
# Plot assigned disaggregations right
for i in range(len(sec_ground_truth)):
cur_nonzero = TimeFrameGroup.union_many(map(lambda a: sec_disaggregations[a], assignments[i]))
ax = fig.add_subplot(overall_length,3,3+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
ax.set_title(str(assignments[i]))
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
return fig
def plot_multiphase_event(original_powerflows, original_adapted, multiphase_events, section,
surrounding = 30, col = "active transition", plot_freq = "2s", verbose = False):
''' This function is used to plot multiphase events.
It shows how the multiphase events are cut out and put inside separate poweflows.
Parameters
----------
original_powerflows: [pd.DataFrame]
The original transients as DataFrame one per phase
original_adapted: [pd.DataFrame]
The new original phases where the multiphaseevents
are removed.
multiphase_events:
The separated transients appearing in multiple phases.
section: nilmtk.TimeFrame
The section which shall be plotted.
surrounding: int
Minutes in the original power flows plottet
arround the interesting section.
col: index
Which is the power transient index
plot_freq: str
The frequency with which the powerflows are resampled before being plotted.
verbose: bool
Whether to print additional information
Returns
-------
fig: matplotlib.figure.Figure
The newly plotted figure
'''
if not type(surrounding) is pd.Timedelta:
surrounding = pd.Timedelta(minutes=surrounding)
fig = plt.figure(figsize=(50,50)) #, tight_layout=True)
plots_per_column = 3
all_plots = [original_powerflows, original_adapted, multiphase_events]
for i, cur_plot in enumerate(all_plots):
for j, powerflow in enumerate(cur_plot):
ax = fig.add_subplot(plots_per_column,3,i+j*3+1)
limited = powerflow.loc[section.start-surrounding:section.end+surrounding][col]
if verbose:
print("Plot {0}:{1}".format(i,j))
limited.loc[section.start-surrounding] = 0
limited.loc[section.end+surrounding] = 0
limited = limited.cumsum().resample(plot_freq).ffill()
limited.plot(ax=ax)
ax.set_xlim([section.start-surrounding, section.end+surrounding])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
return fig
#endregion
################################################################
#region Cluster plotting
def plot_clustering(clusterers, elements, columns_to_project,
subtype_column="subtype", appliance_column="appliance", confidence_column = "confident",
print_confidence=True, filter=False, **plot_args):
'''
Plotting of points in 2d space. For K-means and gmm the bordes are also plotted.
Paramters
---------
clusterers: {str -> scikit.GaussianMixture}
The dictionary of available clusterers as built
within the eventbased_combination clusterer.
elements: pd.DataFrame
The dataframe containing the elements to plot.
columns_to_project: [index,...]
The indices to project to. The length of this function
defines automatically defines the way of plotting.
subtype_column: index
The column defining the entry in the clusterers.
appliance_column: index
The column defining the appliance.
confidence_column: index
The column defining if the prediction was condident
print_confidence: int
If not zero, the confidence interval which will be plotted.
(Currently not yet supported for 3d plots.)
filter: bool
Whether only the confident points shall be plotted.
plot_args: dict
Additional arguments forwarded to the plot function.
Eg point size: s=0.1
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
'''
# Create the input for the concrete plotting functions
data = elements[columns_to_project].values
_, labels = np.unique(elements[subtype_column].values, return_inverse=True)
labels = labels * 100 + elements[appliance_column].astype(int).values
confidence = elements[confidence_column].values
# Call the plotting
if len(columns_to_project) == 2:
fig = plot_clustering_2d(clusterers, data, labels, confidence, columns_to_project, print_confidence, filter, **plot_args)
elif len(columns_to_project) == 3:
fig = plot_clustering_3d(clusterers, data, labels, confidence, columns_to_project, print_confidence, filter, **plot_args)
else:
raise Exception("Only 2d or 3d plot possible.")
return fig
def plot_clustering_2d(clusterers, data, labels, confidence, columns, print_confidence = 1, filter = False, **plot_kwargs):
'''
Plotting of points in 2d space. For K-means and gmm the bordes are also plotted.
Parameters
----------
clusterers: {str -> scikit.GaussianMixture}
The dictionary of relevant clusterers as built
within the eventbased_combination clusterer.
data: np.ndarray(,2)
The data points to plot
labels: np.ndarray(int)
The labels the datapoints belong to
confidence: np.ndarray(bool)
Bool whether the point is seen as confident
columns: str
The columns which are printed as label of the axis.
print_confidence: int
If not zero, the confidence interval which will be plotted.
filter: bool
Whether only the confident points shall be plotted.
plot_kwargs: dict
Additional arguments forwarded to the plot function.
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
'''
fig = plt.figure()
plot_kwargs.setdefault('cmap', plt.cm.Set1)
color = plot_kwargs["cmap"](labels)
plot_kwargs["s"] = 5
# Print the datapoints
plt.scatter(data[confidence][:,0], data[confidence][:,1], c=color[confidence], alpha = 1, **plot_kwargs)
plt.xlabel("Power Transition [W]") #columns[0]
plt.ylabel("Power Peak [W]") #columns[1]
if not filter:
plt.scatter(data[~confidence][:,0], data[~confidence][:,1], c=color[~confidence], alpha = 0.5, **plot_kwargs)
if not print_confidence:
return fig
# Print the confidence intervals if demanded
for key in clusterers:
for mean, covar in zip(clusterers[key].means_, clusterers[key].covariances_):
v, w = np.linalg.eigh(covar)
v = print_confidence * np.sqrt(2.) * np.sqrt(v)
#u = w[0] / linalg.norm(w[0]) # already normalized
w = w[0,:] / np.linalg.norm(w[0,:])
angle = np.arctan(w[1] / w[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color='black', fill = False, linewidth = 3)
ell.set_clip_box(fig.bbox)
ell.set_alpha(0.5)
fig.axes[0].add_artist(ell)
plt.show()
return fig
def plot_clustering_3d(clusterers, data, labels, confidence, columns, print_confidence = True, filter = False, **plot_kwargs):
'''
Plotting of points in 3d space with optional colouring after assignments.
clusterers: {str -> scikit.GaussianMixture}
The dictionary of available clusterers as built
within the eventbased_combination clusterer.
data: np.ndarray(,2)
The data points to plot
labels: np.ndarray(int)
The labels the datapoints belong to
confidence: np.ndarray(bool)
Bool whether the point is seen as confident
columns: str
The columns which are printed as label of the axis.
Not yet used as plot labels.
print_confidence: int
If not zero, the confidence interval which will be plotted.
Not yet supported for 3D!!!
filter: bool
Whether only the confident points shall be plotted.
plot_args: dict
Additional arguments forwarded to the plot function.
Returns
-------
ax: matplotlib.figure.Axes
The newly plot axes. One has to have a look how to make it a figure.
'''
plot_kwargs.setdefault('cmap', plt.cm.Set1)
color = plot_kwargs["cmap"](labels)
ax = plt.axes(projection='3d');
ax.scatter(data[confidence][:,0], data[confidence][:,1], data[confidence][:,2], c=color[confidence],
s=plot+plot_kwargs["s"])
if not filter:
ax.scatter(data[~confidence][:,0], data[~confidence][:,1], data[~confidence][:,2], c=color[~confidence],
alpha=0.5 ,s=0.1)
return ax
def plot_correlation_matrix(corr_base, corr_disag, corr_base_clustered, corr_disag_clustered, corr_all):
# native implementation
corrs = [corr_base, corr_disag, corr_base_clustered, corr_disag_clustered, corr_all]
names = ["corr_households", "corr_disag", "corr_households_clustered", "corr_disag_clustered", "corr_all"]
for cur in range(len(corrs)):
if 'cluster' in corrs[cur].columns:
corrs[cur] = corrs[cur].sort_values("cluster").drop(columns=['cluster'])
columns =[]
for cur in corr_base.columns:
if cur[0] == 'hour' or cur[0] == 'weekday':
columns.append(cur[1])
else:
columns.append(cur[0])
for names, corr in zip(names, corrs):
columns =[]
for cur in corr.columns:
if cur[0] == 'hour' or cur[0] == 'weekday':
columns.append(cur[1])
else:
columns.append(cur[0])
sns.set_style("white")
plt.figure()
cax = plt.matshow(corr.values.astype(float), cmap = plt.cm.seismic, vmin=-1, vmax=1, aspect='auto')
plt.colorbar(cax)
plt.xticks(range(len(columns)), columns);
#plt.yticks(range(len(corr.index)), range(len(corr.index)));
#plt.tight_layout()
plt.savefig("F:/" + names + ".svg", bbox_inches='tight')
def plot_correlations(orig_corrs, disag_corrs, cluster_corrs):
'''
Returns three columns of plots with the correlations of dimensions as Bar Diagram.
For a single household!
It taks place in three columns:
1. The correlation of the original
2. The correlation of the disaggregations
3. The correlation of the clusters
This plot shall visualize the quality of correlation within each diagram.
Parameters
----------
orig_corrs: pd.DataFrame
The correlations for the different clusters
Row per correlation dimension
disag_corrs: pd.DataFrame
The metergroup of disaggregations
Row per correlation dimension
cluster_corrs: pd.DataFrame
The correlations of each of the clustered powerflows.
Results
-------
error_report: pd.Df
The correlations mixed together and calculated.
'''
fig = plt.figure()
plots_per_column = len(disag_corrs)
# Plot before separation
corrs = [orig_corrs, disag_corrs, cluster_corrs]
for i, corr in enumerate(all_plots):
for j, cur_corr in corr.iterrows():
ax = fig.add_subplot(plots_per_column,3,i+j*3+1)
cur_corr.plot(ax = ax)
#limited = powerflow.loc[section.start-surrounding:section.end+surrounding][col]
#if verbose:
# print("Plot {0}:{1}".format(i,j))
#limited.loc[section.start-surrounding] = 0
#limited.loc[section.end+surrounding] = 0
#limited = limited.cumsum().resample(plot_freq).ffill()
#limited.plot(ax=ax)
#ax.set_xlim([section.start-surrounding, section.end+surrounding])
#plt.setp(ax.get_xticklabels(), visible=False)
#plt.setp(ax.get_yticklabels(), visible=False)
#endregion
################################################################
#region Forecast plotting
def plot_ext_data_relation(load, external_data, interval = None, smoothing = None, ):
'''
Plots a two scale plot for the load and the external data.
Like this one can compare influence of external data to the powerflow.
Paramters
---------
load: pd.Series
The load profile in KW
external_data: pd.Series
The external data one wants to compare the load to.
interval: pd.Timeframe
Optional definition of the reagion to plot.
smoothing: int
If set, the data is smoothed to the rolling average across the
given dates. Reasonable since the correlation sometimes gets
onlz visible within long term periods.
'''
if ax == None:
fig, ax1 = plt.subplots()
if not smoothing is None:
load = load.rolling_mean(smoothing)
load.plot(ax=ax1)
#ax1.plot(t, s1, 'b-')
#ax1.set_xlabel('time (s)')
## Make the y-axis label, ticks and tick labels match the line color.
#ax1.set_ylabel('exp', color='b')
#ax1.tick_params('y', colors='b')
ax2 = ax1.twinx()
if not smoothing is None:
external_data = external_data.rolling_mean(smoothing)
external_data.plot(ax=ax2)
#s2 = np.sin(2 * np.pi * t)
#ax2.plot(t, s2, 'r.')
#ax2.set_ylabel('sin', color='r')
#ax2.tick_params('y', colors='r')
return fig
def plot_forecast(forecasts, original_load, interval = None, ax = None, additional_data = {}):
''' Plots the forecast along the real powerflow
This is the main function to be used for visualization of forecasting quality.
Paramters
---------
forecast: pd.Series
The forecasted values
original_load: pd.Series
The original load. Series contains at least interval of forecast.
interval: pd.Timeframe
Optional definition of the reagion to plot. If nothing given
the timeframe of the forecasts dataframe is used - double the interval
additional_data: [pd.Dataframe] or [pd.Series]
Additional data, which can be plotted. For example the residuals of the
ARIMA model or the external power.
Returns
-------
ax: matplotlib.axes.Axes
The ax object with the forecasts.
'''
if interval is None:
load = original_load[forecasts.index[0]-pd.Timedelta("24h"):forecasts.index[-1]+pd.Timedelta("24h")]
else:
load = original_load[interval.start:interval.end]
if ax is None:
fig, ax = plt.subplots()
# One main plot and distinct plot for each forecast
load.plot(ax=ax, linewidth=2)
marker = itertools.cycle((',', '+', '.', 'o', '*'))
for forecast in forecasts:
forecasts[forecast].plot(ax=ax, marker = next(marker))
# Finally plot additiona data if available
for additional in additional_data:
residuals =pd.DataFrame(model_fit.resid)
additional.plot(ax = ax, kind='kde')
pyplot.show()
residuals.plot()
pyplot.show()
return ax
#endregion
################################################################
#region Elaborate Powerflow plotting
def plot_powerflow_from_events(events_list=[], column = 'active transition'):
"""
Currently not in use.
"""
fig, ax = plt.subplots(figsize=(8,6))#grps.plot(kind='kde', ax=ax, legend = None)
for events in events_list:
events[column].cumsum().plot(ax=ax)
#fig, ax = plt.subplots(figsize=(8,6))#grps.plot(kind='kde', ax=ax, legend = None)
#transients[a][firsts.values[1]:last.values[1]]['active transition'].cumsum().plot(ax=ax)
#transients[a].loc[common_transients.index][firsts.values[1]:last.values[1]]['active transition'].cumsum().plot(ax=ax)
#endregion
################################################################
#region Originally available plots
def plot_series(series, ax=None, fig=None, date_format='%d/%m/%y %H:%M:%S', tz_localize=True, **plot_kwargs):
"""Faster plot function
Function is about 5 times faster than pd.Series.plot().
Parameters
----------
series : pd.Series
Data to plot
ax : matplotlib Axes, optional
If not provided then will generate our own axes.
fig : matplotlib.figure.Figure
date_format : str, optional, default='%d/%m/%y %H:%M:%S'
tz_localize : boolean, optional, default is True
if False then display UTC times.
plot_kwargs:
Can also use all **plot_kwargs expected by `ax.plot`
"""
if series is None or len(series) == 0:
return ax
if ax is None:
ax = plt.gca()
if fig is None:
fig = plt.gcf()
x = _to_ordinalf_np_vectorized(series.index.to_pydatetime())
ax.plot(x, series, **plot_kwargs)
tz = series.index.tzinfo if tz_localize else None
ax.xaxis.set_major_formatter(
mdates.DateFormatter(date_format, tz=tz))
ax.set_ylabel('watts')
fig.autofmt_xdate()
return ax
def plot_pairwise_heatmap(df, labels, edgecolors='w', cmap=mpl.cm.RdYlBu_r, log=False):
"""
Plots a heatmap of a 'square' df
Rows and columns are same and the values in this dataframe
correspond to the computation b/w row,column.
This plot can be used for plotting pairwise_correlation
or pairwise_mutual_information or any method which works
similarly
"""
width = len(df.columns) / 4
height = len(df.index) / 4
fig, ax = plt.subplots(figsize=(width, height))
heatmap = ax.pcolor(
df,
edgecolors=edgecolors, # put white lines between squares in heatmap
cmap=cmap,
norm=mpl.colors.LogNorm() if log else None)
ax.autoscale(tight=True) # get rid of whitespace in margins of heatmap
ax.set_aspect('equal') # ensure heatmap cells are square
ax.xaxis.set_ticks_position('top') # put column labels at the top
# turn off ticks:
ax.tick_params(bottom='off', top='off', left='off', right='off')
plt.yticks(np.arange(len(df.index)) + 0.5, labels)
plt.xticks(np.arange(len(df.columns)) + 0.5, labels, rotation=90)
# ugliness from http://matplotlib.org/users/tight_layout_guide.html
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "3%", pad="1%")
plt.colorbar(heatmap, cax=cax)
#endregion
################################################################
# region Configurations
def latexify(fig_width=None, fig_height=None, columns=1, fontsize=10):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
<NAME>: 8.267 x 11.692 inches
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert columns in [1, 2]
if fig_width is None:
fig_width = 3.39 if columns == 2 else 6.9 # width in inches
if fig_height is None:
golden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width * golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:", fig_height,
"so will reduce to", MAX_HEIGHT_INCHES, "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {
#'backend': 'ps',
#'text.latex.preamble': ['\\usepackage{gensymb}'],
'axes.labelsize': fontsize, # fontsize for x and y labels (was 10)
'axes.titlesize': fontsize,
'font.size': fontsize,
'legend.fontsize': fontsize,
'xtick.labelsize': fontsize,
'ytick.labelsize': fontsize,
#'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif'
}
mpl.rcParams.update(params)
def format_axes(ax, spine_color='gray'):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(spine_color)
ax.spines[spine].set_linewidth(0.5)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=spine_color)
# matplotlib.pyplot.tight_layout()
return ax
#endregion
| [
"numpy.sqrt",
"nilmtk.TimeFrameGroup",
"matplotlib.pyplot.ylabel",
"math.sqrt",
"seaborn.set_style",
"numpy.linalg.norm",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"pandas.DataFrame",
"numpy.linalg.eigh"... | [((1329, 1341), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1339, 1341), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2026), 'math.ceil', 'math.ceil', (['(n / 2 * 3)'], {}), '(n / 2 * 3)\n', (2015, 2026), False, 'import math\n'), ((2050, 2073), 'math.ceil', 'math.ceil', (['(sections / 3)'], {}), '(sections / 3)\n', (2059, 2073), False, 'import math\n'), ((2740, 2758), 'pandas.Timedelta', 'pd.Timedelta', (['"""1d"""'], {}), "('1d')\n", (2752, 2758), True, 'import pandas as pd\n'), ((3170, 3182), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3180, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3546, 3570), 'pandas.concat', 'pd.concat', (['flows'], {'axis': '(1)'}), '(flows, axis=1)\n', (3555, 3570), True, 'import pandas as pd\n'), ((4566, 4578), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4576, 4578), True, 'import matplotlib.pyplot as plt\n'), ((5892, 5904), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5902, 5904), True, 'import matplotlib.pyplot as plt\n'), ((8218, 8246), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(50, 50)'}), '(figsize=(50, 50))\n', (8228, 8246), True, 'import matplotlib.pyplot as plt\n'), ((8413, 8440), 'nilmtk.TimeFrameGroup', 'TimeFrameGroup', (['[timeframe]'], {}), '([timeframe])\n', (8427, 8440), False, 'from nilmtk import TimeFrameGroup, TimeFrame\n'), ((11672, 11700), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(50, 50)'}), '(figsize=(50, 50))\n', (11682, 11700), True, 'import matplotlib.pyplot as plt\n'), ((14225, 14288), 'numpy.unique', 'np.unique', (['elements[subtype_column].values'], {'return_inverse': '(True)'}), '(elements[subtype_column].values, return_inverse=True)\n', (14234, 14288), True, 'import numpy as np\n'), ((15916, 15928), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15926, 15928), True, 'import matplotlib.pyplot as plt\n'), ((16075, 16184), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[confidence][:, 0]', 'data[confidence][:, 1]'], {'c': 'color[confidence]', 'alpha': '(1)'}), '(data[confidence][:, 0], data[confidence][:, 1], c=color[\n confidence], alpha=1, **plot_kwargs)\n', (16086, 16184), True, 'import matplotlib.pyplot as plt\n'), ((16184, 16218), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Power Transition [W]"""'], {}), "('Power Transition [W]')\n", (16194, 16218), True, 'import matplotlib.pyplot as plt\n'), ((16236, 16264), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Peak [W]"""'], {}), "('Power Peak [W]')\n", (16246, 16264), True, 'import matplotlib.pyplot as plt\n'), ((17179, 17189), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17187, 17189), True, 'import matplotlib.pyplot as plt\n'), ((18434, 18459), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (18442, 18459), True, 'import matplotlib.pyplot as plt\n'), ((21048, 21060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21058, 21060), True, 'import matplotlib.pyplot as plt\n'), ((24646, 24688), 'itertools.cycle', 'itertools.cycle', (["(',', '+', '.', 'o', '*')"], {}), "((',', '+', '.', 'o', '*'))\n", (24661, 24688), False, 'import itertools\n'), ((25299, 25327), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (25311, 25327), True, 'import matplotlib.pyplot as plt\n'), ((27447, 27484), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (27459, 27484), True, 'import matplotlib.pyplot as plt\n'), ((28245, 28268), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (28264, 28268), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((28328, 28358), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['heatmap'], {'cax': 'cax'}), '(heatmap, cax=cax)\n', (28340, 28358), True, 'import matplotlib.pyplot as plt\n'), ((30034, 30061), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['params'], {}), '(params)\n', (30053, 30061), True, 'import matplotlib as mpl\n'), ((1540, 1559), 'pandas.Timedelta', 'pd.Timedelta', (['"""48h"""'], {}), "('48h')\n", (1552, 1559), True, 'import pandas as pd\n'), ((4489, 4508), 'pandas.Timedelta', 'pd.Timedelta', (['"""48h"""'], {}), "('48h')\n", (4501, 4508), True, 'import pandas as pd\n'), ((5967, 5976), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5974, 5976), True, 'import matplotlib.pyplot as plt\n'), ((6735, 6764), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (6755, 6764), True, 'import matplotlib.dates as mdates\n'), ((11627, 11660), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': 'surrounding'}), '(minutes=surrounding)\n', (11639, 11660), True, 'import pandas as pd\n'), ((16311, 16425), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[~confidence][:, 0]', 'data[~confidence][:, 1]'], {'c': 'color[~confidence]', 'alpha': '(0.5)'}), '(data[~confidence][:, 0], data[~confidence][:, 1], c=color[~\n confidence], alpha=0.5, **plot_kwargs)\n', (16322, 16425), True, 'import matplotlib.pyplot as plt\n'), ((19742, 19764), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (19755, 19764), True, 'import seaborn as sns\n'), ((19773, 19785), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19783, 19785), True, 'import matplotlib.pyplot as plt\n'), ((19903, 19920), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cax'], {}), '(cax)\n', (19915, 19920), True, 'import matplotlib.pyplot as plt\n'), ((20076, 20132), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('F:/' + names + '.svg')"], {'bbox_inches': '"""tight"""'}), "('F:/' + names + '.svg', bbox_inches='tight')\n", (20087, 20132), True, 'import matplotlib.pyplot as plt\n'), ((22783, 22797), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (22795, 22797), True, 'import matplotlib.pyplot as plt\n'), ((24527, 24541), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (24539, 24541), True, 'import matplotlib.pyplot as plt\n'), ((24890, 24919), 'pandas.DataFrame', 'pd.DataFrame', (['model_fit.resid'], {}), '(model_fit.resid)\n', (24902, 24919), True, 'import pandas as pd\n'), ((26616, 26625), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (26623, 26625), True, 'import matplotlib.pyplot as plt\n'), ((26661, 26670), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (26668, 26670), True, 'import matplotlib.pyplot as plt\n'), ((26871, 26911), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['date_format'], {'tz': 'tz'}), '(date_format, tz=tz)\n', (26891, 26911), True, 'import matplotlib.dates as mdates\n'), ((3283, 3327), 'nilmtk.TimeFrame', 'TimeFrame', ([], {'start': 'start', 'end': '(start + interval)'}), '(start=start, end=start + interval)\n', (3292, 3327), False, 'from nilmtk import TimeFrameGroup, TimeFrame\n'), ((16652, 16673), 'numpy.linalg.eigh', 'np.linalg.eigh', (['covar'], {}), '(covar)\n', (16666, 16673), True, 'import numpy as np\n'), ((16865, 16887), 'numpy.arctan', 'np.arctan', (['(w[1] / w[0])'], {}), '(w[1] / w[0])\n', (16874, 16887), True, 'import numpy as np\n'), ((16969, 17066), 'matplotlib.patches.Ellipse', 'mpl.patches.Ellipse', (['mean', 'v[0]', 'v[1]', '(180.0 + angle)'], {'color': '"""black"""', 'fill': '(False)', 'linewidth': '(3)'}), "(mean, v[0], v[1], 180.0 + angle, color='black', fill=\n False, linewidth=3)\n", (16988, 17066), True, 'import matplotlib as mpl\n'), ((16723, 16733), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (16730, 16733), True, 'import numpy as np\n'), ((16822, 16845), 'numpy.linalg.norm', 'np.linalg.norm', (['w[0, :]'], {}), '(w[0, :])\n', (16836, 16845), True, 'import numpy as np\n'), ((27632, 27652), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {}), '()\n', (27650, 27652), True, 'import matplotlib as mpl\n'), ((29217, 29229), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (29226, 29229), False, 'import math\n'), ((16709, 16721), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (16716, 16721), True, 'import numpy as np\n'), ((24360, 24379), 'pandas.Timedelta', 'pd.Timedelta', (['"""24h"""'], {}), "('24h')\n", (24372, 24379), True, 'import pandas as pd\n'), ((24400, 24419), 'pandas.Timedelta', 'pd.Timedelta', (['"""24h"""'], {}), "('24h')\n", (24412, 24419), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
import os
from math import ceil
from typing import Any, Dict, List, Union
import h5py
import nibabel as nib
import numpy as np
import torch
import tqdm
from dipy.io.stateful_tractogram import Space
from dipy.io.streamline import load_tractogram
from dipy.tracking.streamline import length as slength
from nibabel.affines import apply_affine
from nibabel.streamlines import ArraySequence, Tractogram
from dwi_ml.data.dataset.single_subject_containers import (
MRIDataVolume, SubjectData)
from dwi_ml.data.processing.space.world_to_vox import convert_world_to_vox
from dwi_ml.experiment.timer import Timer
from dwi_ml.tracking.step_tracker import (StepTracker,
PreInitializedStepTracker)
from dwi_ml.tracking.utils import StoppingFlags, count_flags
class TrackerAbstract(object):
"""Use an existing model to track on a new subject."""
def __init__(self, model: torch.nn.Module,
dataset_file: str, subject_id: str,
seeding_file: str, tracking_file: str = None,
rng_seed: int = 1234, n_seeds_per_voxel: int = 1,
seeding_ref: str = None, use_gpu: bool = True,
add_neighborhood: float = None,
add_previous_dir: bool = False):
"""
Parameters
----------
model: torch.nn.Module
Trained model that will generate the tracking directions.
MUST HAVE A sample_tracking_directions FUNCTION AND A eval FUNCTION.
dataset_file : str
Path to dataset file (.hdf5).
subject_id : str
Subject id to fetch from the dataset file.
seeding_file : str
Path to seeding mask (.nii.gz) or seeding streamlines (.tck|.trk).
tracking_file : str (optional)
Path to binary tracking mask (.nii.gz).
rng_seed : int
Random seed.
n_seeds_per_voxel : int
Number of random seeds to be initialized in each voxel of the
seeding mask.
seeding_ref : str
Path to reference file neceserray if `seeding_file` is a tractogram.
use_gpu : bool
If False, do not use the GPU for tracking.
add_neighborhood : float (optional)
If given, add neighboring information to the input signal at the
given distance in each axis (in mm).
add_previous_dir : bool (optional)
If given, add the streamline previous direction to the input signal.
"""
self.rng = np.random.RandomState(seed=rng_seed)
self.n_seeds_per_voxel = n_seeds_per_voxel
self.use_gpu = use_gpu
# Load subject
with h5py.File(dataset_file, 'r') as hdf_file:
assert subject_id in list(hdf_file.keys()), \
"Subject {} not found in file: {}".format(subject_id,
dataset_file)
self.tracto_data = SubjectData.create_from_hdf(hdf_file[subject_id])
self.tracto_data.input_dv.subject_id = subject_id
ext = os.path.splitext(seeding_file)[1]
if ext in ['.nii', '.gz']:
# Load seeding mask (should be a binary image)
seeding_image = nib.load(seeding_file)
self.seeding = seeding_image.get_fdata()
self.affine_seedsvox2rasmm = seeding_image.affine
elif ext in ['.tck', '.trk']:
# Load seeding streamlines
if seeding_ref is None:
raise ValueError("A reference is necessary to load a "
"tractogram; please use --seeding-ref")
seeding_ref_img = nib.load(seeding_ref)
seeding_tractogram = load_tractogram(seeding_file, seeding_ref_img,
to_space=Space.VOX)
seeding_tractogram.to_center()
self.seeding = seeding_tractogram.streamlines
self.affine_seedsvox2rasmm = seeding_ref_img.affine
# Load tracking mask if given
self.tracking_dv = None
if tracking_file:
tracking_image = nib.load(tracking_file)
self.tracking_dv = MRIDataVolume(
data=tracking_image.get_fdata(dtype=np.float32),
affine_vox2rasmm=tracking_image.affine)
# Compute affine to bring seeds into DWI voxel space
# affine_seedsvox2dwivox : seeds voxel space => rasmm space => dwi voxel space
affine_rasmm2dwivox = np.linalg.inv(
self.tracto_data.input_dv.affine_vox2rasmm)
self.affine_seedsvox2dwivox = np.dot(
affine_rasmm2dwivox, self.affine_seedsvox2rasmm)
# Other parameters
self.add_neighborhood = add_neighborhood
self.add_previous_dir = add_previous_dir
self.model = model
self.model.eval()
@staticmethod
def _load_model(model_path: str, hyperparameters: Dict[str, Any]):
raise NotImplementedError
@staticmethod
def _run_tracker(tracker: StepTracker, seeds: Union[np.ndarray, List]) \
-> Tractogram:
"""Runs a tracker, starting from the provided seeds, and returns the
final tractogram.
Parameters
----------
tracker : StepTracker
Tracker that will grow streamlines
seeds : np.ndarray with shape (n_streamlines, 3) or (n_streamlines,
n_points, 3), or list of np.ndarray with shape (n_points, 3)
Initial starting points or initial streamlines.
Returns
-------
tractogram : nib.Tractogram
Tractogram containing all streamlines and stopping information.
"""
tractogram = None
tracker.initialize(seeds)
length_stopping_criterion = \
tracker.stopping_criteria[StoppingFlags.STOPPING_LENGTH]
with torch.no_grad(), \
tqdm.tqdm(range(length_stopping_criterion.keywords['max_nb_steps'])
) as pbar:
for _ in pbar:
tracker.grow_step()
if tractogram is None:
tractogram = tracker.harvest()
else:
tractogram += tracker.harvest()
if tracker.is_finished_tracking():
pbar.close()
break
return tractogram
@staticmethod
def _get_tracking_seeds_from_mask(mask: np.ndarray,
affine_seedsvox2dwivox: np.ndarray,
n_seeds_per_voxel: int,
rng: np.random.RandomState) -> np.ndarray:
"""Given a binary seeding mask, get seeds in DWI voxel space using the
provided affine.
Parameters
----------
mask : np.ndarray with shape (X,Y,Z)
Binary seeding mask.
affine_seedsvox2dwivox : np.ndarray
Affine to bring the seeds from their voxel space to the input voxel
space.
n_seeds_per_voxel : int
Number of seeds to generate in each voxel
rng : np.random.RandomState
Random number generator
Returns
-------
seeds : np.ndarray with shape (N_seeds, 3)
Position of each initial tracking seeds
"""
seeds = []
indices = np.array(np.where(mask)).T
for idx in indices:
seeds_in_seeding_voxel = idx + rng.uniform(-0.5, 0.5,
size=(n_seeds_per_voxel, 3))
seeds_in_dwi_voxel = nib.affines.apply_affine(affine_seedsvox2dwivox,
seeds_in_seeding_voxel)
seeds.extend(seeds_in_dwi_voxel)
seeds = np.array(seeds, dtype=np.float32)
return seeds
def track(self, max_length: float, batch_size: int = None,
step_size: float = None, max_angle: float = None,
min_length: float = None) -> Tractogram:
"""Track a whole tractogram from the seeds. First run forward,
then backwards using the streamlines that were tracked.
Parameters
----------
max_length : float
Maximum streamline length in mm.
batch_size : int (optional)
Number of streamlines that should be tracked at the same time.
If None, try with a full batch and divide by 2 until it fits into
memory.
step_size : float (optional)
Step size in mm. If None, use the model outputs without scaling.
max_angle : float
Maximum angle in degrees that two consecutive segments can have
between each other (corresponds to the maximum half-cone angle).
min_length : float
Minimum streamline length in mm.
(If given, streamlines shorter than this length will be discarded).
Returns
-------
tractogram : nib.Tractogram
Tractogram with all the tracked streamlines.
"""
if isinstance(self.seeding, np.ndarray):
# Get random seeds from seeding mask
seeds = self._get_tracking_seeds_from_mask(
self.seeding, self.affine_seedsvox2dwivox,
self.n_seeds_per_voxel, self.rng)
else:
# Use streamlines as seeds
seeds = self.seeding
# Compute minimum length voxel-wise
if min_length:
min_length_vox = convert_mm2vox(
min_length, self.tracto_data.input_dv.affine_vox2rasmm)
# Initialize trackers
if isinstance(seeds, (list, ArraySequence)):
forward_tracker_cls = PreInitializedStepTracker
else:
forward_tracker_cls = StepTracker
forward_step_tracker = forward_tracker_cls(model=self.model,
input_dv=self.tracto_data.input_dv,
mask_dv=self.tracking_dv,
step_size=step_size,
add_neighborhood=self.add_neighborhood,
add_previous_dir=self.add_previous_dir,
max_length=max_length,
max_angle=max_angle,
use_gpu=self.use_gpu)
backwards_step_tracker = PreInitializedStepTracker(model=self.model,
input_dv=self.tracto_data.input_dv,
mask_dv=self.tracking_dv,
step_size=step_size,
add_neighborhood=self.add_neighborhood,
add_previous_dir=self.add_previous_dir,
max_length=max_length,
max_angle=max_angle,
use_gpu=self.use_gpu)
if step_size:
print("Tracking using a step size of {:.3f} mm "
"({:.3f} voxels)".format(step_size, forward_step_tracker.step_size_vox))
else:
print("Tracking using the model output without scaling")
print("Tracking from {} seeds".format(len(seeds)))
if batch_size is None:
batch_size = len(seeds)
# Try batch sizes until it fits into memory (divide by 1.25 if it
# doesn't and try again)
while True:
print("Trying a batch size of {} streamlines".format(batch_size))
n_iter = int(ceil(len(seeds) / batch_size))
try:
tractogram = None
for i, start in enumerate(range(0, len(seeds), batch_size)):
end = start + batch_size
print("Iteration {} of {}".format(i + 1, n_iter))
# Forward tracking
with Timer("Forward pass", newline=True, color='green'):
batch_tractogram = self._run_tracker(forward_step_tracker,
seeds[start:end])
stopping_flags = batch_tractogram.data_per_streamline['stopping_flags'].astype(np.uint8)
print("Forward pass stopped because of - mask: {:,}\t "
"curvature: {:,}\t length: {:,}".format(
count_flags(stopping_flags, StoppingFlags.STOPPING_MASK),
count_flags(stopping_flags, StoppingFlags.STOPPING_CURVATURE),
count_flags(stopping_flags, StoppingFlags.STOPPING_LENGTH)))
# Backwards tracking
# Flip streamlines to initialize backwards tracker
streamlines_init = [s[::-1] for s in batch_tractogram.streamlines]
with Timer("Backwards pass", newline=True, color='green'):
batch_tractogram = self._run_tracker(backwards_step_tracker,
streamlines_init)
stopping_flags = batch_tractogram.data_per_streamline['stopping_flags'].astype(np.uint8)
print("Backwards pass stopped because of - mask: {:,}\t "
"curvature: {:,}\t length: {:,}".format(
count_flags(stopping_flags, StoppingFlags.STOPPING_MASK),
count_flags(stopping_flags, StoppingFlags.STOPPING_CURVATURE),
count_flags(stopping_flags, StoppingFlags.STOPPING_LENGTH)))
# Filter short streamlines
if min_length:
lengths_vox = slength(batch_tractogram.streamlines)
to_keep = np.where(lengths_vox > min_length_vox)
print("Removing {} streamlines that were under {} mm".format(
len(batch_tractogram) - len(to_keep[0]), min_length))
# Make a copy because indexing an ArraySequence creates
# a "view" with the same _data property, which causes problems
# when extending tractograms
batch_tractogram = batch_tractogram[to_keep].copy()
if tractogram is None:
tractogram = batch_tractogram
else:
tractogram += batch_tractogram
return tractogram
except MemoryError:
print("Not enough memory for a batch size of {} streamlines".format(batch_size))
batch_size = int(batch_size / 1.25)
if batch_size <= 0:
raise MemoryError("Not enough memory! You might need a "
"bigger graphics card!")
except RuntimeError as e:
if "out of memory" in e.args[0] or "CuDNN error" in e.args[0]:
print("Not enough memory for a batch size of {} streamlines"
.format(batch_size))
batch_size = int(batch_size / 1.25)
if batch_size <= 0:
raise MemoryError("Not enough memory! You might need a "
"bigger graphics card!")
else:
raise e
| [
"nibabel.affines.apply_affine",
"nibabel.load",
"numpy.where",
"dwi_ml.tracking.step_tracker.PreInitializedStepTracker",
"os.path.splitext",
"h5py.File",
"dwi_ml.tracking.utils.count_flags",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"dwi_ml.experiment.timer.Timer",
"dipy.io.streamline.lo... | [((2573, 2609), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'rng_seed'}), '(seed=rng_seed)\n', (2594, 2609), True, 'import numpy as np\n'), ((4543, 4600), 'numpy.linalg.inv', 'np.linalg.inv', (['self.tracto_data.input_dv.affine_vox2rasmm'], {}), '(self.tracto_data.input_dv.affine_vox2rasmm)\n', (4556, 4600), True, 'import numpy as np\n'), ((4652, 4707), 'numpy.dot', 'np.dot', (['affine_rasmm2dwivox', 'self.affine_seedsvox2rasmm'], {}), '(affine_rasmm2dwivox, self.affine_seedsvox2rasmm)\n', (4658, 4707), True, 'import numpy as np\n'), ((7843, 7876), 'numpy.array', 'np.array', (['seeds'], {'dtype': 'np.float32'}), '(seeds, dtype=np.float32)\n', (7851, 7876), True, 'import numpy as np\n'), ((10557, 10848), 'dwi_ml.tracking.step_tracker.PreInitializedStepTracker', 'PreInitializedStepTracker', ([], {'model': 'self.model', 'input_dv': 'self.tracto_data.input_dv', 'mask_dv': 'self.tracking_dv', 'step_size': 'step_size', 'add_neighborhood': 'self.add_neighborhood', 'add_previous_dir': 'self.add_previous_dir', 'max_length': 'max_length', 'max_angle': 'max_angle', 'use_gpu': 'self.use_gpu'}), '(model=self.model, input_dv=self.tracto_data.\n input_dv, mask_dv=self.tracking_dv, step_size=step_size,\n add_neighborhood=self.add_neighborhood, add_previous_dir=self.\n add_previous_dir, max_length=max_length, max_angle=max_angle, use_gpu=\n self.use_gpu)\n', (10582, 10848), False, 'from dwi_ml.tracking.step_tracker import StepTracker, PreInitializedStepTracker\n'), ((2729, 2757), 'h5py.File', 'h5py.File', (['dataset_file', '"""r"""'], {}), "(dataset_file, 'r')\n", (2738, 2757), False, 'import h5py\n'), ((3003, 3052), 'dwi_ml.data.dataset.single_subject_containers.SubjectData.create_from_hdf', 'SubjectData.create_from_hdf', (['hdf_file[subject_id]'], {}), '(hdf_file[subject_id])\n', (3030, 3052), False, 'from dwi_ml.data.dataset.single_subject_containers import MRIDataVolume, SubjectData\n'), ((3130, 3160), 'os.path.splitext', 'os.path.splitext', (['seeding_file'], {}), '(seeding_file)\n', (3146, 3160), False, 'import os\n'), ((3286, 3308), 'nibabel.load', 'nib.load', (['seeding_file'], {}), '(seeding_file)\n', (3294, 3308), True, 'import nibabel as nib\n'), ((4173, 4196), 'nibabel.load', 'nib.load', (['tracking_file'], {}), '(tracking_file)\n', (4181, 4196), True, 'import nibabel as nib\n'), ((5913, 5928), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5926, 5928), False, 'import torch\n'), ((7651, 7723), 'nibabel.affines.apply_affine', 'nib.affines.apply_affine', (['affine_seedsvox2dwivox', 'seeds_in_seeding_voxel'], {}), '(affine_seedsvox2dwivox, seeds_in_seeding_voxel)\n', (7675, 7723), True, 'import nibabel as nib\n'), ((3711, 3732), 'nibabel.load', 'nib.load', (['seeding_ref'], {}), '(seeding_ref)\n', (3719, 3732), True, 'import nibabel as nib\n'), ((3766, 3832), 'dipy.io.streamline.load_tractogram', 'load_tractogram', (['seeding_file', 'seeding_ref_img'], {'to_space': 'Space.VOX'}), '(seeding_file, seeding_ref_img, to_space=Space.VOX)\n', (3781, 3832), False, 'from dipy.io.streamline import load_tractogram\n'), ((7422, 7436), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (7430, 7436), True, 'import numpy as np\n'), ((12218, 12268), 'dwi_ml.experiment.timer.Timer', 'Timer', (['"""Forward pass"""'], {'newline': '(True)', 'color': '"""green"""'}), "('Forward pass', newline=True, color='green')\n", (12223, 12268), False, 'from dwi_ml.experiment.timer import Timer\n'), ((13166, 13218), 'dwi_ml.experiment.timer.Timer', 'Timer', (['"""Backwards pass"""'], {'newline': '(True)', 'color': '"""green"""'}), "('Backwards pass', newline=True, color='green')\n", (13171, 13218), False, 'from dwi_ml.experiment.timer import Timer\n'), ((14015, 14052), 'dipy.tracking.streamline.length', 'slength', (['batch_tractogram.streamlines'], {}), '(batch_tractogram.streamlines)\n', (14022, 14052), True, 'from dipy.tracking.streamline import length as slength\n'), ((14087, 14125), 'numpy.where', 'np.where', (['(lengths_vox > min_length_vox)'], {}), '(lengths_vox > min_length_vox)\n', (14095, 14125), True, 'import numpy as np\n'), ((12710, 12766), 'dwi_ml.tracking.utils.count_flags', 'count_flags', (['stopping_flags', 'StoppingFlags.STOPPING_MASK'], {}), '(stopping_flags, StoppingFlags.STOPPING_MASK)\n', (12721, 12766), False, 'from dwi_ml.tracking.utils import StoppingFlags, count_flags\n'), ((12792, 12853), 'dwi_ml.tracking.utils.count_flags', 'count_flags', (['stopping_flags', 'StoppingFlags.STOPPING_CURVATURE'], {}), '(stopping_flags, StoppingFlags.STOPPING_CURVATURE)\n', (12803, 12853), False, 'from dwi_ml.tracking.utils import StoppingFlags, count_flags\n'), ((12879, 12937), 'dwi_ml.tracking.utils.count_flags', 'count_flags', (['stopping_flags', 'StoppingFlags.STOPPING_LENGTH'], {}), '(stopping_flags, StoppingFlags.STOPPING_LENGTH)\n', (12890, 12937), False, 'from dwi_ml.tracking.utils import StoppingFlags, count_flags\n'), ((13664, 13720), 'dwi_ml.tracking.utils.count_flags', 'count_flags', (['stopping_flags', 'StoppingFlags.STOPPING_MASK'], {}), '(stopping_flags, StoppingFlags.STOPPING_MASK)\n', (13675, 13720), False, 'from dwi_ml.tracking.utils import StoppingFlags, count_flags\n'), ((13746, 13807), 'dwi_ml.tracking.utils.count_flags', 'count_flags', (['stopping_flags', 'StoppingFlags.STOPPING_CURVATURE'], {}), '(stopping_flags, StoppingFlags.STOPPING_CURVATURE)\n', (13757, 13807), False, 'from dwi_ml.tracking.utils import StoppingFlags, count_flags\n'), ((13833, 13891), 'dwi_ml.tracking.utils.count_flags', 'count_flags', (['stopping_flags', 'StoppingFlags.STOPPING_LENGTH'], {}), '(stopping_flags, StoppingFlags.STOPPING_LENGTH)\n', (13844, 13891), False, 'from dwi_ml.tracking.utils import StoppingFlags, count_flags\n')] |
import os
import numpy as np
from PIL import Image
class Calibration:
def __init__(self, calibs):
self.P0 = calibs['P0'] # 3 x 4
self.P1 = calibs['P1'] # 3 x 4
self.P2 = calibs['P2'] # 3 x 4
self.P3 = calibs['P3'] # 3 x 4
self.R0 = calibs['R0_rect'] # 3 x 3
self.V2C = calibs['Tr_velo_to_cam'] # 3 x 4
self.I2V = calibs['Tr_imu_to_velo'] # 3 x 4
# Camera intrinsics and extrinsics
# self.cu = self.P2[0, 2]
# self.cv = self.P2[1, 2]
# self.fu = self.P2[0, 0]
# self.fv = self.P2[1, 1]
# self.tx = self.P2[0, 3] / (-self.fu)
# self.ty = self.P2[1, 3] / (-self.fv)
@property
def cu(self):
return self.P2[0, 2]
@property
def cv(self):
return self.P2[1, 2]
@property
def fu(self):
return self.P2[0, 0]
@property
def fv(self):
return self.P2[1, 1]
@property
def tx(self):
return self.P2[0, 3] / (-self.fu)
@property
def ty(self):
return self.P2[1, 3] / (-self.fv)
@property
def stereo_baseline(self):
return self.P2[0, 3] - self.P3[0, 3]
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return: pts_rect:(N, 3)
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def depthmap_to_rect(self, depth_map):
"""
:param depth_map: (H, W), depth_map
:return: pts_rect(H*W, 3), x_idxs(N), y_idxs(N)
"""
x_range = np.arange(0, depth_map.shape[1])
y_range = np.arange(0, depth_map.shape[0])
x_idxs, y_idxs = np.meshgrid(x_range, y_range)
x_idxs, y_idxs = x_idxs.reshape(-1), y_idxs.reshape(-1)
depth = depth_map[y_idxs, x_idxs]
pts_rect = self.img_to_rect(x_idxs, y_idxs, depth)
return pts_rect, x_idxs, y_idxs
def disparity_map_to_rect(self, disparity_map, epsilon=1e-6):
depth_map = self.stereo_baseline / (disparity_map + epsilon)
return self.depthmap_to_rect(depth_map)
def disparity_map_to_depth_map(self, disparity_map, epsilon=1e-6):
depth_map = self.stereo_baseline / (disparity_map + epsilon)
return depth_map
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
def camera_dis_to_rect(self, u, v, d):
"""
Can only process valid u, v, d, which means u, v can not beyond the image shape, reprojection error 0.02
:param u: (N)
:param v: (N)
:param d: (N), the distance between camera and 3d points, d^2 = x^2 + y^2 + z^2
:return:
"""
assert self.fu == self.fv, '%.8f != %.8f' % (self.fu, self.fv)
fd = np.sqrt((u - self.cu) ** 2 + (v - self.cv) ** 2 + self.fu ** 2)
x = ((u - self.cu) * d) / fd + self.tx
y = ((v - self.cv) * d) / fd + self.ty
z = np.sqrt(d ** 2 - x ** 2 - y ** 2)
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)), axis=1)
return pts_rect
def load_calib(kitti_root, split, imgid):
if isinstance(imgid, int):
imgid = '%06d' % imgid
calib_dir = os.path.join(kitti_root, 'object', split, 'calib')
absolute_path = os.path.join(calib_dir, imgid + '.txt')
with open(absolute_path) as f:
lines = {line.strip().split(':')[0]: list(map(float, line.strip().split(':')[1].split())) for line in
f.readlines()[:-1]}
calibs = {'P0': np.array(lines['P0']).reshape((3, 4)),
'P1': np.array(lines['P1']).reshape((3, 4)),
'P2': np.array(lines['P2']).reshape((3, 4)),
'P3': np.array(lines['P3']).reshape((3, 4)),
'R0_rect': np.array(lines['R0_rect']).reshape((3, 3)),
'Tr_velo_to_cam': np.array(lines['Tr_velo_to_cam']).reshape((3, 4)),
'Tr_imu_to_velo': np.array(lines['Tr_imu_to_velo']).reshape((3, 4))}
return Calibration(calibs)
def load_image_2(kitti_root, split, imgid):
imgid = '%06d' % imgid
img_dir = os.path.join(kitti_root, 'object', split, 'image_2')
absolute_path = os.path.join(img_dir, imgid + '.png')
rgb = Image.open(absolute_path)
return rgb
from enum import IntEnum
class KITTIObjectClass(IntEnum):
Car = 1
Van = 2
Truck = 3
Pedestrian = 4
Person_sitting = 5
Cyclist = 6
Tram = 7
Misc = 8
DontCare = 9
class KITTIObject3D:
cls: KITTIObjectClass
truncated: float
occluded: float
alpha: float
x1: float
y1: float
x2: float
y2: float
h: float
w: float
l: float
x: float
y: float
z: float
ry: float
def __init__(self, cls: KITTIObjectClass, truncated: float, occluded: float, alpha: float,
x1: float, y1: float, x2: float, y2: float,
h: float, w: float, l: float,
x: float, y: float, z: float, ry: float) -> None:
super().__init__()
self.ry = ry
self.z = z
self.y = y
self.x = x
self.l = l
self.w = w
self.h = h
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.alpha = alpha
self.occluded = occluded
self.truncated = truncated
self.cls = cls
def load_label_2(kitti_root, split, imgid):
imgid = '%06d' % imgid
label_2_dir = os.path.join(kitti_root, 'object', split, 'label_2')
absolute_path = os.path.join(label_2_dir, imgid + '.txt')
with open(absolute_path) as f:
lines = f.read().splitlines()
labels = []
for l in lines:
items = l.split()
cls = items[0]
truncated, occluded, alpha, x1, y1, x2, y2, h, w, l, x, y, z, ry = map(float, items[1:])
label = KITTIObject3D(KITTIObjectClass[cls], truncated, occluded, alpha,
x1, y1, x2, y2, h, w, l, x, y, z, ry)
labels.append(label)
return labels
def load_label_3(kitti_root, split, imgid):
imgid = '%06d' % imgid
label_3_dir = os.path.join(kitti_root, 'object', split, 'label_3')
absolute_path = os.path.join(label_3_dir, imgid + '.txt')
with open(absolute_path) as f:
lines = f.read().splitlines()
labels = []
for l in lines:
items = l.split()
cls = items[0]
truncated, occluded, alpha, x1, y1, x2, y2, h, w, l, x, y, z, ry = map(float, items[1:])
label = KITTIObject3D(KITTIObjectClass[cls], truncated, occluded, alpha,
x1, y1, x2, y2, h, w, l, x, y, z, ry)
labels.append(label)
return labels
| [
"PIL.Image.open",
"numpy.sqrt",
"numpy.ones",
"os.path.join",
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.matmul",
"numpy.min",
"numpy.meshgrid",
"numpy.arange"
] | [((5562, 5612), 'os.path.join', 'os.path.join', (['kitti_root', '"""object"""', 'split', '"""calib"""'], {}), "(kitti_root, 'object', split, 'calib')\n", (5574, 5612), False, 'import os\n'), ((5633, 5672), 'os.path.join', 'os.path.join', (['calib_dir', "(imgid + '.txt')"], {}), "(calib_dir, imgid + '.txt')\n", (5645, 5672), False, 'import os\n'), ((6444, 6496), 'os.path.join', 'os.path.join', (['kitti_root', '"""object"""', 'split', '"""image_2"""'], {}), "(kitti_root, 'object', split, 'image_2')\n", (6456, 6496), False, 'import os\n'), ((6517, 6554), 'os.path.join', 'os.path.join', (['img_dir', "(imgid + '.png')"], {}), "(img_dir, imgid + '.png')\n", (6529, 6554), False, 'import os\n'), ((6565, 6590), 'PIL.Image.open', 'Image.open', (['absolute_path'], {}), '(absolute_path)\n', (6575, 6590), False, 'from PIL import Image\n'), ((7788, 7840), 'os.path.join', 'os.path.join', (['kitti_root', '"""object"""', 'split', '"""label_2"""'], {}), "(kitti_root, 'object', split, 'label_2')\n", (7800, 7840), False, 'import os\n'), ((7861, 7902), 'os.path.join', 'os.path.join', (['label_2_dir', "(imgid + '.txt')"], {}), "(label_2_dir, imgid + '.txt')\n", (7873, 7902), False, 'import os\n'), ((8445, 8497), 'os.path.join', 'os.path.join', (['kitti_root', '"""object"""', 'split', '"""label_3"""'], {}), "(kitti_root, 'object', split, 'label_3')\n", (8457, 8497), False, 'import os\n'), ((8518, 8559), 'os.path.join', 'os.path.join', (['label_3_dir', "(imgid + '.txt')"], {}), "(label_3_dir, imgid + '.txt')\n", (8530, 8559), False, 'import os\n'), ((1961, 1992), 'numpy.dot', 'np.dot', (['pts_rect_hom', 'self.P2.T'], {}), '(pts_rect_hom, self.P2.T)\n', (1967, 1992), True, 'import numpy as np\n'), ((3081, 3113), 'numpy.arange', 'np.arange', (['(0)', 'depth_map.shape[1]'], {}), '(0, depth_map.shape[1])\n', (3090, 3113), True, 'import numpy as np\n'), ((3132, 3164), 'numpy.arange', 'np.arange', (['(0)', 'depth_map.shape[0]'], {}), '(0, depth_map.shape[0])\n', (3141, 3164), True, 'import numpy as np\n'), ((3190, 3219), 'numpy.meshgrid', 'np.meshgrid', (['x_range', 'y_range'], {}), '(x_range, y_range)\n', (3201, 3219), True, 'import numpy as np\n'), ((4210, 4245), 'numpy.matmul', 'np.matmul', (['corners3d_hom', 'self.P2.T'], {}), '(corners3d_hom, self.P2.T)\n', (4219, 4245), True, 'import numpy as np\n'), ((5114, 5177), 'numpy.sqrt', 'np.sqrt', (['((u - self.cu) ** 2 + (v - self.cv) ** 2 + self.fu ** 2)'], {}), '((u - self.cu) ** 2 + (v - self.cv) ** 2 + self.fu ** 2)\n', (5121, 5177), True, 'import numpy as np\n'), ((5284, 5317), 'numpy.sqrt', 'np.sqrt', (['(d ** 2 - x ** 2 - y ** 2)'], {}), '(d ** 2 - x ** 2 - y ** 2)\n', (5291, 5317), True, 'import numpy as np\n'), ((1633, 1662), 'numpy.dot', 'np.dot', (['self.V2C.T', 'self.R0.T'], {}), '(self.V2C.T, self.R0.T)\n', (1639, 1662), True, 'import numpy as np\n'), ((4365, 4382), 'numpy.min', 'np.min', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (4371, 4382), True, 'import numpy as np\n'), ((4384, 4401), 'numpy.min', 'np.min', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4390, 4401), True, 'import numpy as np\n'), ((4419, 4436), 'numpy.max', 'np.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (4425, 4436), True, 'import numpy as np\n'), ((4438, 4455), 'numpy.max', 'np.max', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4444, 4455), True, 'import numpy as np\n'), ((1339, 1383), 'numpy.ones', 'np.ones', (['(pts.shape[0], 1)'], {'dtype': 'np.float32'}), '((pts.shape[0], 1), dtype=np.float32)\n', (1346, 1383), True, 'import numpy as np\n'), ((4140, 4167), 'numpy.ones', 'np.ones', (['(sample_num, 8, 1)'], {}), '((sample_num, 8, 1))\n', (4147, 4167), True, 'import numpy as np\n'), ((5875, 5896), 'numpy.array', 'np.array', (["lines['P0']"], {}), "(lines['P0'])\n", (5883, 5896), True, 'import numpy as np\n'), ((5934, 5955), 'numpy.array', 'np.array', (["lines['P1']"], {}), "(lines['P1'])\n", (5942, 5955), True, 'import numpy as np\n'), ((5993, 6014), 'numpy.array', 'np.array', (["lines['P2']"], {}), "(lines['P2'])\n", (6001, 6014), True, 'import numpy as np\n'), ((6052, 6073), 'numpy.array', 'np.array', (["lines['P3']"], {}), "(lines['P3'])\n", (6060, 6073), True, 'import numpy as np\n'), ((6116, 6142), 'numpy.array', 'np.array', (["lines['R0_rect']"], {}), "(lines['R0_rect'])\n", (6124, 6142), True, 'import numpy as np\n'), ((6192, 6225), 'numpy.array', 'np.array', (["lines['Tr_velo_to_cam']"], {}), "(lines['Tr_velo_to_cam'])\n", (6200, 6225), True, 'import numpy as np\n'), ((6275, 6308), 'numpy.array', 'np.array', (["lines['Tr_imu_to_velo']"], {}), "(lines['Tr_imu_to_velo'])\n", (6283, 6308), True, 'import numpy as np\n')] |
# bi-directional srnn within pkg
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import StepLR,MultiStepLR
import math
import torch.nn.functional as F
from torch.utils import data
from SRNN_layers.spike_dense import *#spike_dense,readout_integrator
from SRNN_layers.spike_neuron import *#output_Neuron
from SRNN_layers.spike_rnn import *# spike_rnn
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device: ',device)
def normalize(data_set,Vmax,Vmin):
return (data_set-Vmin)/(Vmax-Vmin)#+1e-6)
train_data = np.load('./f40/train_f40_t100.npy')
test_data = np.load('./f40/test_f40_t100.npy')
valid_data = np.load('./f40/valid_f40_t100.npy')
num_channels = 39
use_channels = 39
Vmax = np.max(train_data[:,:,:use_channels],axis=(0,1))
Vmin = np.min(train_data[:,:,:use_channels],axis=(0,1))
print(train_data.shape,Vmax.shape,b_j0_value)
train_x = normalize(train_data[:,:,:use_channels],Vmax,Vmin)
train_y = train_data[:,:,num_channels:]
test_x = normalize(test_data[:,:,:num_channels],Vmax,Vmin)
test_y = test_data[:,:,num_channels:]
valid_x = normalize(valid_data[:,:,:num_channels],Vmax,Vmin)
valid_y = valid_data[:,:,num_channels:]
print('input dataset shap: ',train_x.shape)
print('output dataset shap: ',train_y.shape)
_,seq_length,input_dim = train_x.shape
_,_,output_dim = train_y.shape
batch_size =16*8
# spike_neuron.b_j0_value = 1.59
torch.manual_seed(0)
def get_DataLoader(train_x,train_y,batch_size=200):
train_dataset = data.TensorDataset(torch.Tensor(train_x), torch.Tensor(np.argmax(train_y,axis=-1)))
train_loader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
return train_loader
train_loader = get_DataLoader(train_x,train_y,batch_size=batch_size)
test_loader = get_DataLoader(test_x,test_y,batch_size=batch_size)
valid_loader = get_DataLoader(valid_x,valid_y,batch_size=batch_size)
class RNN_s(nn.Module):
def __init__(self,criterion,device,delay=0):
super(RNN_s, self).__init__()
self.criterion = criterion
self.delay = delay
#self.network = [input_dim,128,128,256,output_dim]
self.network = [39,256,256,output_dim]
self.rnn_fw1 = spike_rnn(self.network[0],self.network[1],
tau_initializer='multi_normal',
tauM=[20,20,20,20],tauM_inital_std=[1,5,5,5],
tauAdp_inital=[200,200,250,200],tauAdp_inital_std=[5,50,100,50],
device=device)
self.rnn_bw1 = spike_rnn(self.network[0],self.network[2],
tau_initializer='multi_normal',
tauM=[20,20,20,20],tauM_inital_std=[5,5,5,5],
tauAdp_inital=[200,200,150,200],tauAdp_inital_std=[5,50,30,10],
device=device)
self.dense_mean = readout_integrator(self.network[2]+self.network[1],self.network[3],
tauM=3,tauM_inital_std=1,device=device)
def forward(self, input,labels=None):
b,s,c = input.shape
self.rnn_fw1.set_neuron_state(b)
self.rnn_bw1.set_neuron_state(b)
self.dense_mean.set_neuron_state(b)
loss = 0
predictions = []
fw_spikes = []
bw_spikes = []
mean_tensor = 0
for l in range(s*5):
input_fw=input[:,l//5,:].float()
input_bw=input[:,-l//5,:].float()
mem_layer1, spike_layer1 = self.rnn_fw1.forward(input_fw)
mem_layer2, spike_layer2 = self.rnn_bw1.forward(input_bw)
fw_spikes.append(spike_layer1)
bw_spikes.insert(0,spike_layer2)
for k in range(s*5):
bw_idx = int(k//5)*5 + (4 - int(k%5))
second_tensor = bw_spikes[k]#[bw_idx]
merge_spikes = torch.cat((fw_spikes[k], second_tensor), -1)
mean_tensor += merge_spikes
if k %5 ==4:
mem_layer3 = self.dense_mean(mean_tensor/5.)# mean or accumulate
output = F.log_softmax(mem_layer3,dim=-1)#
predictions.append(output.data.cpu().numpy())
if labels is not None:
loss += self.criterion(output, labels[:, k//5])
mean_tensor = 0
predictions = torch.tensor(predictions)
return predictions, loss
def test(data_loader,after_num_frames=0):
test_acc = 0.
sum_samples = 0
fr = []
for i, (images, labels) in enumerate(data_loader):
images = images.view(-1, seq_length, input_dim).to(device)
labels = labels.view((-1,seq_length)).long().to(device)
predictions, _ = model(images)
_, predicted = torch.max(predictions.data, 2)
labels = labels.cpu()
predicted = predicted.cpu().t()
# fr.append(fr_)
test_acc += (predicted == labels).sum()
sum_samples = sum_samples + predicted.numel()
# print(predicted[1],'\n',labels[1])
# if is_fr:
# print('Mean fr: ', np.mean(fr))
return test_acc.data.cpu().numpy() / sum_samples
def test_vote(data_loader,after_num_frames=0):
test_acc = 0.
sum_samples = 0
for i, (images, labels) in enumerate(data_loader):
images = images.view(-1, seq_length, input_dim).to(device)
labels = labels.view((-1,seq_length)).long().to(device)
predictions, _ = model(images)
_, predicted = torch.max(predictions.data, 2)
labels = labels.cpu()#.data.numpy()
sum_samples = sum_samples + predicted.numel()
predicted = predicted.cpu().t().data.numpy()
for j in range(seq_length):
res_tsp = predicted[:,j*5:(j+1)*5]
lab_tsp = labels[:,j]
for k in range(len(labels)):
if i==0 and k==1:
print(lab_tsp[k], res_tsp[k,:])
counts = np.bincount(res_tsp[k,:])
pred = np.argmax(counts)
if pred == lab_tsp[k]:
test_acc += 1
# if lab_tsp[k] in res_tsp[k,:]:
# test_acc += 1.
#for j in range(5):
#test_acc += (predicted[:,np.arange(seq_length)*5+j] == labels).sum()
return test_acc / sum_samples*5
def train(model,loader,optimizer,scheduler=None,num_epochs=10):
best_acc = 0
path = 'model/' # .pth'
acc_list=[]
print(model.rnn_fw1.b_j0)
for epoch in range(num_epochs):
train_acc = 0
train_loss_sum = 0
sum_samples = 0
for i, (images, labels) in enumerate(loader):
images = images.view(-1, seq_length, input_dim).requires_grad_().to(device)
labels = labels.view((-1,seq_length)).long().to(device)
optimizer.zero_grad()
predictions, train_loss = model(images, labels)
_, predicted = torch.max(predictions.data, 2)
train_loss.backward()
train_loss_sum += train_loss
optimizer.step()
labels = labels.cpu()
predicted = predicted.cpu().t()
train_acc += (predicted == labels).sum()
sum_samples = sum_samples + predicted.numel()
torch.cuda.empty_cache()
if scheduler is not None:
scheduler.step()
train_acc = train_acc.data.cpu().numpy() / sum_samples
valid_acc = test(valid_loader)
if valid_acc>best_acc and train_acc>0.30:
best_acc = valid_acc
torch.save(model, path+str(best_acc)[:7]+'-bi-srnn-v3_MN-v1.pth')
acc_list.append(train_acc)
print('epoch: {:3d}, Train Loss: {:.4f}, Train Acc: {:.4f},Valid Acc: {:.4f}'.format(epoch,
train_loss_sum.item()/len(loader)/(seq_length),
train_acc,valid_acc), flush=True)
return acc_list
num_epochs = 200
criterion = nn.NLLLoss()#nn.CrossEntropyLoss()
model = RNN_s(criterion=criterion,device=device)
model = torch.load('./model/0.65942-bi-srnn-v3_MN.pth') # v1: only MN initialize fw rnn
# model = torch.load('./model/0.64553-bi-srnn-v3_MN.pth') # v2: MN initialize fw and bw rnn
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device:",device)
model.to(device)
# print(model.parameters())
# for name, param in model.named_parameters():
# if param.requires_grad:
# print(name)
learning_rate =1e-3
base_params = [
model.rnn_fw1.dense.weight,model.rnn_fw1.dense.bias,
model.rnn_fw1.recurrent.weight,model.rnn_fw1.recurrent.bias,
model.rnn_bw1.dense.weight,model.rnn_bw1.dense.bias,
model.rnn_bw1.recurrent.weight,model.rnn_bw1.recurrent.bias,
model.dense_mean.dense.weight,model.dense_mean.dense.bias]
optimizer = torch.optim.Adagrad([
{'params': base_params},
{'params': model.rnn_fw1.tau_adp, 'lr': learning_rate * 5},
{'params': model.rnn_bw1.tau_adp, 'lr': learning_rate * 5},
{'params': model.rnn_fw1.tau_m, 'lr': learning_rate * 2},
{'params': model.rnn_bw1.tau_m, 'lr': learning_rate * 2},
{'params': model.dense_mean.tau_m, 'lr': learning_rate * 2}],
lr=learning_rate,eps=1e-5)
optimizer = torch.optim.Adamax([
{'params': base_params}],
lr=learning_rate)
scheduler = StepLR(optimizer, step_size=100, gamma=.5) # LIF
# training network
# with sechdual
test_acc = test(test_loader)
print(test_acc)
train_acc_list = train(model,train_loader,optimizer,scheduler,num_epochs=num_epochs)
test_acc = test(test_loader)
print(test_acc)
# print(test_vote(test_loader))
# q = 'abcdefghijklmnopqrstuvwxyz'
# fw = []
# bw = []
# for i in range(len(q)):
# for j in range(5):
# fw.append(q[i]+str(j))
# bw.insert(0,q[-i-1]+str(j))
# d_bw = []
# for k in range(len(fw)):
# bw_idx = int(k//5)*5 + 4 - int(k%5)
# d_bw.append(bw[bw_idx])
# print(fw)
# print(bw)
# print(d_bw)
| [
"torch.max",
"torch.cuda.is_available",
"numpy.max",
"numpy.min",
"torch.Tensor",
"numpy.argmax",
"torch.nn.NLLLoss",
"torch.nn.functional.log_softmax",
"numpy.bincount",
"torch.cuda.empty_cache",
"torch.cat",
"torch.manual_seed",
"torch.optim.Adagrad",
"torch.load",
"torch.optim.lr_sche... | [((613, 648), 'numpy.load', 'np.load', (['"""./f40/train_f40_t100.npy"""'], {}), "('./f40/train_f40_t100.npy')\n", (620, 648), True, 'import numpy as np\n'), ((661, 695), 'numpy.load', 'np.load', (['"""./f40/test_f40_t100.npy"""'], {}), "('./f40/test_f40_t100.npy')\n", (668, 695), True, 'import numpy as np\n'), ((709, 744), 'numpy.load', 'np.load', (['"""./f40/valid_f40_t100.npy"""'], {}), "('./f40/valid_f40_t100.npy')\n", (716, 744), True, 'import numpy as np\n'), ((790, 842), 'numpy.max', 'np.max', (['train_data[:, :, :use_channels]'], {'axis': '(0, 1)'}), '(train_data[:, :, :use_channels], axis=(0, 1))\n', (796, 842), True, 'import numpy as np\n'), ((846, 898), 'numpy.min', 'np.min', (['train_data[:, :, :use_channels]'], {'axis': '(0, 1)'}), '(train_data[:, :, :use_channels], axis=(0, 1))\n', (852, 898), True, 'import numpy as np\n'), ((1455, 1475), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (1472, 1475), False, 'import torch\n'), ((8202, 8214), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (8212, 8214), True, 'import torch.nn as nn\n'), ((8294, 8341), 'torch.load', 'torch.load', (['"""./model/0.65942-bi-srnn-v3_MN.pth"""'], {}), "('./model/0.65942-bi-srnn-v3_MN.pth')\n", (8304, 8341), False, 'import torch\n'), ((9122, 9517), 'torch.optim.Adagrad', 'torch.optim.Adagrad', (["[{'params': base_params}, {'params': model.rnn_fw1.tau_adp, 'lr': \n learning_rate * 5}, {'params': model.rnn_bw1.tau_adp, 'lr': \n learning_rate * 5}, {'params': model.rnn_fw1.tau_m, 'lr': learning_rate *\n 2}, {'params': model.rnn_bw1.tau_m, 'lr': learning_rate * 2}, {'params':\n model.dense_mean.tau_m, 'lr': learning_rate * 2}]"], {'lr': 'learning_rate', 'eps': '(1e-05)'}), "([{'params': base_params}, {'params': model.rnn_fw1.\n tau_adp, 'lr': learning_rate * 5}, {'params': model.rnn_bw1.tau_adp,\n 'lr': learning_rate * 5}, {'params': model.rnn_fw1.tau_m, 'lr': \n learning_rate * 2}, {'params': model.rnn_bw1.tau_m, 'lr': learning_rate *\n 2}, {'params': model.dense_mean.tau_m, 'lr': learning_rate * 2}], lr=\n learning_rate, eps=1e-05)\n", (9141, 9517), False, 'import torch\n'), ((9539, 9602), 'torch.optim.Adamax', 'torch.optim.Adamax', (["[{'params': base_params}]"], {'lr': 'learning_rate'}), "([{'params': base_params}], lr=learning_rate)\n", (9557, 9602), False, 'import torch\n'), ((9625, 9668), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(100)', 'gamma': '(0.5)'}), '(optimizer, step_size=100, gamma=0.5)\n', (9631, 9668), False, 'from torch.optim.lr_scheduler import StepLR, MultiStepLR\n'), ((1651, 1718), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=True)\n', (1666, 1718), False, 'from torch.utils import data\n'), ((454, 479), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (477, 479), False, 'import torch\n'), ((1567, 1588), 'torch.Tensor', 'torch.Tensor', (['train_x'], {}), '(train_x)\n', (1579, 1588), False, 'import torch\n'), ((4461, 4486), 'torch.tensor', 'torch.tensor', (['predictions'], {}), '(predictions)\n', (4473, 4486), False, 'import torch\n'), ((4862, 4892), 'torch.max', 'torch.max', (['predictions.data', '(2)'], {}), '(predictions.data, 2)\n', (4871, 4892), False, 'import torch\n'), ((5594, 5624), 'torch.max', 'torch.max', (['predictions.data', '(2)'], {}), '(predictions.data, 2)\n', (5603, 5624), False, 'import torch\n'), ((8500, 8525), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8523, 8525), False, 'import torch\n'), ((1603, 1630), 'numpy.argmax', 'np.argmax', (['train_y'], {'axis': '(-1)'}), '(train_y, axis=-1)\n', (1612, 1630), True, 'import numpy as np\n'), ((3969, 4013), 'torch.cat', 'torch.cat', (['(fw_spikes[k], second_tensor)', '(-1)'], {}), '((fw_spikes[k], second_tensor), -1)\n', (3978, 4013), False, 'import torch\n'), ((7047, 7077), 'torch.max', 'torch.max', (['predictions.data', '(2)'], {}), '(predictions.data, 2)\n', (7056, 7077), False, 'import torch\n'), ((7410, 7434), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7432, 7434), False, 'import torch\n'), ((4199, 4232), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['mem_layer3'], {'dim': '(-1)'}), '(mem_layer3, dim=-1)\n', (4212, 4232), True, 'import torch.nn.functional as F\n'), ((6054, 6080), 'numpy.bincount', 'np.bincount', (['res_tsp[k, :]'], {}), '(res_tsp[k, :])\n', (6065, 6080), True, 'import numpy as np\n'), ((6103, 6120), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (6112, 6120), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
implement the cnn network with tensorflow
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import xt.model.impala.vtrace_tf as vtrace
from tensorflow.python.util import deprecation
from xt.framework.register import Registers
from xt.model import XTModel
from xt.model.impala.default_config import GAMMA, LR
from xt.model.tf_compat import (
DTYPE_MAP,
AdamOptimizer,
Conv2D,
Flatten,
Lambda,
Saver,
global_variables_initializer,
piecewise_constant,
tf,
)
from xt.model.atari_model import get_atari_filter
from xt.model.tf_utils import TFVariables, restore_tf_variable
from xt.util.common import import_config
deprecation._PRINT_DEPRECATION_WARNINGS = False
@Registers.model
class ImpalaCNNNetV2(XTModel):
"""docstring for ActorNetwork."""
def __init__(self, model_info):
model_config = model_info.get("model_config", dict())
import_config(globals(), model_config)
self.dtype = DTYPE_MAP.get(model_config.get("dtype", "float32"))
self.state_dim = model_info["state_dim"]
self.action_dim = model_info["action_dim"]
self.filter_arch = get_atari_filter(self.state_dim)
self.ph_state = None
self.ph_adv = None
self.out_actions = None
self.policy_logits, self.baseline = None, None
self.ph_behavior_logits = None
self.ph_actions = None
self.ph_dones = None
self.ph_rewards = None
self.loss, self.optimizer, self.train_op = None, None, None
self.grad_norm_clip = 40.0
self.sample_batch_steps = 50
self.saver = None
self.explore_paras = None
self.actor_var = None # store weights for agent
super(ImpalaCNNNetV2, self).__init__(model_info)
def create_model(self, model_info):
self.ph_state = tf.placeholder(
tf.int8, shape=(None, *self.state_dim,), name="state_input"
)
with tf.variable_scope("explore_agent"):
state_input = Lambda(lambda x: tf.cast(x, dtype="float32") / 128.0)(
self.ph_state
)
last_layer = state_input
for (out_size, kernel, stride) in self.filter_arch[:-1]:
last_layer = Conv2D(out_size, (kernel, kernel),
strides=(stride, stride), activation="relu",
padding="same")(last_layer)
# last convolution
(out_size, kernel, stride) = self.filter_arch[-1]
convolution_layer = Conv2D(out_size, (kernel, kernel),
strides=(stride, stride), activation="relu",
padding="valid")(last_layer)
self.policy_logits = tf.squeeze(
Conv2D(self.action_dim, (1, 1), padding="same")(convolution_layer),
axis=[1, 2])
baseline_flat = Flatten()(convolution_layer)
self.baseline = tf.squeeze(
tf.layers.dense(
inputs=baseline_flat,
units=1,
activation=None,
kernel_initializer=norm_initializer(0.01),
),
1,
)
self.out_actions = tf.squeeze(
tf.multinomial(
self.policy_logits, num_samples=1, output_dtype=tf.int32
),
1,
name="out_action",
)
# create learner
self.ph_behavior_logits = tf.placeholder(
self.dtype, shape=(None, self.action_dim), name="ph_behavior_logits"
)
self.ph_actions = tf.placeholder(tf.int32, shape=(None,), name="ph_action")
self.ph_dones = tf.placeholder(tf.bool, shape=(None,), name="ph_dones")
self.ph_rewards = tf.placeholder(self.dtype, shape=(None,), name="ph_rewards")
# Split the tensor into batches at known episode cut boundaries.
# [batch_count * batch_step] -> [batch_step, batch_count]
batch_step = self.sample_batch_steps
def split_batches(tensor, drop_last=False):
batch_count = tf.shape(tensor)[0] // batch_step
reshape_tensor = tf.reshape(
tensor, tf.concat([[batch_count, batch_step], tf.shape(tensor)[1:]], axis=0))
# swap B and T axes
res = tf.transpose(
reshape_tensor, [1, 0] + list(range(2, 1 + int(tf.shape(tensor).shape[0])))
)
if drop_last:
return res[:-1]
return res
self.loss = vtrace_loss(
behavior_policy_logits=split_batches(
self.ph_behavior_logits, drop_last=True
),
target_policy_logits=split_batches(self.policy_logits, drop_last=True),
actions=split_batches(self.ph_actions, drop_last=True),
discounts=split_batches(
tf.cast(~self.ph_dones, tf.float32) * GAMMA, drop_last=True
),
rewards=split_batches(
tf.clip_by_value(self.ph_rewards, -1, 1), drop_last=True
),
values=split_batches(self.baseline, drop_last=True),
bootstrap_value=split_batches(self.baseline)[-1],
)
opt_type = "adam"
learning_rate, global_step = self._get_lr()
if opt_type == "adam":
optimizer = AdamOptimizer(LR)
# optimizer = AdamOptimizer(learning_rate)
elif opt_type == "rmsprop":
optimizer = tf.train.RMSPropOptimizer(
LR, decay=0.99, epsilon=0.1, centered=True
)
else:
raise KeyError("invalid opt_type: {}".format(opt_type))
grads_and_vars = optimizer.compute_gradients(self.loss)
capped_gvs = [
(grad if grad is None else tf.clip_by_norm(
grad, clip_norm=self.grad_norm_clip),
var) for grad, var in grads_and_vars
]
self.train_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)
self.actor_var = TFVariables(self.out_actions, self.sess)
self.sess.run(global_variables_initializer())
# self.saver = Saver(max_to_keep=100)
self.explore_paras = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="explore_agent"
)
self.saver = Saver({t.name: t for t in self.explore_paras}, max_to_keep=100)
return True
@staticmethod
def _get_lr(values=None, boundaries=None):
"""make dynamic learning rate"""
values = [0.0025, 0.002, 0.001]
boundaries = np.array([20000 / 1000, 200000 / 1000]).astype(np.int32).tolist()
global_step = tf.Variable(0, trainable=False, dtype=tf.int32)
learning_rate = piecewise_constant(global_step, boundaries, values)
return learning_rate, global_step
def train(self, state, label):
"""train with sess.run"""
behavior_logits, actions, dones, rewards = label
with self.graph.as_default():
_, loss = self.sess.run(
[self.train_op, self.loss],
feed_dict={
self.ph_state: state,
self.ph_behavior_logits: behavior_logits,
self.ph_actions: actions,
self.ph_dones: dones,
self.ph_rewards: rewards,
},
)
return loss
def predict(self, state):
"""
action_logits, action_val, value
Do predict use the newest model.
:param state:
:return:
"""
with self.graph.as_default():
feed_dict = {self.ph_state: state}
return self.sess.run(
[self.policy_logits, self.baseline, self.out_actions], feed_dict
)
def save_model(self, file_name):
"""save model without meta graph"""
ck_name = self.saver.save(
self.sess, save_path=file_name, write_meta_graph=False
)
return ck_name
def load_model(self, model_name, by_name=False):
"""load model with inference variables."""
# print(">> load model: {}".format(model_name))
# self.saver.restore(self.sess, model_name)
restore_tf_variable(self.sess, self.explore_paras, model_name)
def set_weights(self, weights):
"""set weight with memory tensor"""
with self.graph.as_default():
self.actor_var.set_weights(weights)
def get_weights(self):
"""get weights"""
# print("model get weight")
with self.graph.as_default():
return self.actor_var.get_weights()
def compute_baseline_loss(advantages):
"""Loss for the baseline, summed over the time dimension.
Multiply by 0.5 to match the standard update rule:"""
# d(loss) / d(baseline) = advantage
return 0.5 * tf.reduce_sum(tf.square(advantages))
def compute_entropy_loss(logits):
"""calculate entropy loss"""
policy = tf.nn.softmax(logits)
log_policy = tf.nn.log_softmax(logits)
entropy_per_timestep = tf.reduce_sum(-policy * log_policy, axis=-1)
return -tf.reduce_sum(entropy_per_timestep)
def compute_policy_gradient_loss(logits, actions, advantages):
"""calculate policy gradient loss"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=actions, logits=logits
)
advantages = tf.stop_gradient(advantages)
policy_gradient_loss_per_timestep = cross_entropy * advantages
return tf.reduce_sum(policy_gradient_loss_per_timestep)
def vtrace_loss(
behavior_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
):
"""vtrace loss from impala algorithm."""
# clip reward
# clipped_rewards = tf.clip_by_value(rewards, -1, 1)
# discounts = tf.to_float(~dones) * FLAGS.discounting
with tf.device("/cpu"):
vtrace_returns = vtrace.from_logits(
behaviour_policy_logits=behavior_policy_logits,
target_policy_logits=target_policy_logits,
actions=actions,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
)
total_loss = compute_policy_gradient_loss(
target_policy_logits, actions, vtrace_returns.pg_advantages
)
total_loss += 0.5 * compute_baseline_loss(vtrace_returns.vs - values)
total_loss += 0.01 * compute_entropy_loss(target_policy_logits)
return total_loss
def norm_initializer(std=0.5):
"""custom norm initializer for op"""
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
| [
"xt.model.tf_compat.tf.variable_scope",
"numpy.array",
"xt.model.tf_compat.tf.placeholder",
"xt.model.tf_compat.tf.nn.sparse_softmax_cross_entropy_with_logits",
"xt.model.tf_compat.tf.Variable",
"xt.model.tf_compat.Flatten",
"xt.model.tf_compat.piecewise_constant",
"xt.model.tf_utils.restore_tf_variab... | [((9117, 9138), 'xt.model.tf_compat.tf.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (9130, 9138), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((9156, 9181), 'xt.model.tf_compat.tf.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {}), '(logits)\n', (9173, 9181), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((9209, 9253), 'xt.model.tf_compat.tf.reduce_sum', 'tf.reduce_sum', (['(-policy * log_policy)'], {'axis': '(-1)'}), '(-policy * log_policy, axis=-1)\n', (9222, 9253), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((9428, 9505), 'xt.model.tf_compat.tf.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'actions', 'logits': 'logits'}), '(labels=actions, logits=logits)\n', (9474, 9505), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((9537, 9565), 'xt.model.tf_compat.tf.stop_gradient', 'tf.stop_gradient', (['advantages'], {}), '(advantages)\n', (9553, 9565), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((9644, 9692), 'xt.model.tf_compat.tf.reduce_sum', 'tf.reduce_sum', (['policy_gradient_loss_per_timestep'], {}), '(policy_gradient_loss_per_timestep)\n', (9657, 9692), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((1219, 1251), 'xt.model.atari_model.get_atari_filter', 'get_atari_filter', (['self.state_dim'], {}), '(self.state_dim)\n', (1235, 1251), False, 'from xt.model.atari_model import get_atari_filter\n'), ((1908, 1982), 'xt.model.tf_compat.tf.placeholder', 'tf.placeholder', (['tf.int8'], {'shape': '(None, *self.state_dim)', 'name': '"""state_input"""'}), "(tf.int8, shape=(None, *self.state_dim), name='state_input')\n", (1922, 1982), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((3622, 3711), 'xt.model.tf_compat.tf.placeholder', 'tf.placeholder', (['self.dtype'], {'shape': '(None, self.action_dim)', 'name': '"""ph_behavior_logits"""'}), "(self.dtype, shape=(None, self.action_dim), name=\n 'ph_behavior_logits')\n", (3636, 3711), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((3756, 3813), 'xt.model.tf_compat.tf.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None,)', 'name': '"""ph_action"""'}), "(tf.int32, shape=(None,), name='ph_action')\n", (3770, 3813), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((3838, 3893), 'xt.model.tf_compat.tf.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '(None,)', 'name': '"""ph_dones"""'}), "(tf.bool, shape=(None,), name='ph_dones')\n", (3852, 3893), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((3920, 3980), 'xt.model.tf_compat.tf.placeholder', 'tf.placeholder', (['self.dtype'], {'shape': '(None,)', 'name': '"""ph_rewards"""'}), "(self.dtype, shape=(None,), name='ph_rewards')\n", (3934, 3980), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((6182, 6222), 'xt.model.tf_utils.TFVariables', 'TFVariables', (['self.out_actions', 'self.sess'], {}), '(self.out_actions, self.sess)\n', (6193, 6222), False, 'from xt.model.tf_utils import TFVariables, restore_tf_variable\n'), ((6354, 6428), 'xt.model.tf_compat.tf.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""explore_agent"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='explore_agent')\n", (6371, 6428), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((6473, 6536), 'xt.model.tf_compat.Saver', 'Saver', (['{t.name: t for t in self.explore_paras}'], {'max_to_keep': '(100)'}), '({t.name: t for t in self.explore_paras}, max_to_keep=100)\n', (6478, 6536), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((6814, 6861), 'xt.model.tf_compat.tf.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'dtype': 'tf.int32'}), '(0, trainable=False, dtype=tf.int32)\n', (6825, 6861), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((6886, 6937), 'xt.model.tf_compat.piecewise_constant', 'piecewise_constant', (['global_step', 'boundaries', 'values'], {}), '(global_step, boundaries, values)\n', (6904, 6937), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((8374, 8436), 'xt.model.tf_utils.restore_tf_variable', 'restore_tf_variable', (['self.sess', 'self.explore_paras', 'model_name'], {}), '(self.sess, self.explore_paras, model_name)\n', (8393, 8436), False, 'from xt.model.tf_utils import TFVariables, restore_tf_variable\n'), ((9266, 9301), 'xt.model.tf_compat.tf.reduce_sum', 'tf.reduce_sum', (['entropy_per_timestep'], {}), '(entropy_per_timestep)\n', (9279, 9301), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((10058, 10075), 'xt.model.tf_compat.tf.device', 'tf.device', (['"""/cpu"""'], {}), "('/cpu')\n", (10067, 10075), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((10102, 10323), 'xt.model.impala.vtrace_tf.from_logits', 'vtrace.from_logits', ([], {'behaviour_policy_logits': 'behavior_policy_logits', 'target_policy_logits': 'target_policy_logits', 'actions': 'actions', 'discounts': 'discounts', 'rewards': 'rewards', 'values': 'values', 'bootstrap_value': 'bootstrap_value'}), '(behaviour_policy_logits=behavior_policy_logits,\n target_policy_logits=target_policy_logits, actions=actions, discounts=\n discounts, rewards=rewards, values=values, bootstrap_value=bootstrap_value)\n', (10120, 10323), True, 'import xt.model.impala.vtrace_tf as vtrace\n'), ((10977, 10993), 'xt.model.tf_compat.tf.constant', 'tf.constant', (['out'], {}), '(out)\n', (10988, 10993), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((2020, 2054), 'xt.model.tf_compat.tf.variable_scope', 'tf.variable_scope', (['"""explore_agent"""'], {}), "('explore_agent')\n", (2037, 2054), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((5496, 5513), 'xt.model.tf_compat.AdamOptimizer', 'AdamOptimizer', (['LR'], {}), '(LR)\n', (5509, 5513), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((6246, 6276), 'xt.model.tf_compat.global_variables_initializer', 'global_variables_initializer', ([], {}), '()\n', (6274, 6276), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((9012, 9033), 'xt.model.tf_compat.tf.square', 'tf.square', (['advantages'], {}), '(advantages)\n', (9021, 9033), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((2623, 2724), 'xt.model.tf_compat.Conv2D', 'Conv2D', (['out_size', '(kernel, kernel)'], {'strides': '(stride, stride)', 'activation': '"""relu"""', 'padding': '"""valid"""'}), "(out_size, (kernel, kernel), strides=(stride, stride), activation=\n 'relu', padding='valid')\n", (2629, 2724), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((2998, 3007), 'xt.model.tf_compat.Flatten', 'Flatten', ([], {}), '()\n', (3005, 3007), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((3382, 3454), 'xt.model.tf_compat.tf.multinomial', 'tf.multinomial', (['self.policy_logits'], {'num_samples': '(1)', 'output_dtype': 'tf.int32'}), '(self.policy_logits, num_samples=1, output_dtype=tf.int32)\n', (3396, 3454), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((5629, 5698), 'xt.model.tf_compat.tf.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['LR'], {'decay': '(0.99)', 'epsilon': '(0.1)', 'centered': '(True)'}), '(LR, decay=0.99, epsilon=0.1, centered=True)\n', (5654, 5698), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((10847, 10870), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (10862, 10870), True, 'import numpy as np\n'), ((2317, 2417), 'xt.model.tf_compat.Conv2D', 'Conv2D', (['out_size', '(kernel, kernel)'], {'strides': '(stride, stride)', 'activation': '"""relu"""', 'padding': '"""same"""'}), "(out_size, (kernel, kernel), strides=(stride, stride), activation=\n 'relu', padding='same')\n", (2323, 2417), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((2872, 2919), 'xt.model.tf_compat.Conv2D', 'Conv2D', (['self.action_dim', '(1, 1)'], {'padding': '"""same"""'}), "(self.action_dim, (1, 1), padding='same')\n", (2878, 2919), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((4245, 4261), 'xt.model.tf_compat.tf.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (4253, 4261), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((5153, 5193), 'xt.model.tf_compat.tf.clip_by_value', 'tf.clip_by_value', (['self.ph_rewards', '(-1)', '(1)'], {}), '(self.ph_rewards, -1, 1)\n', (5169, 5193), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((5938, 5990), 'xt.model.tf_compat.tf.clip_by_norm', 'tf.clip_by_norm', (['grad'], {'clip_norm': 'self.grad_norm_clip'}), '(grad, clip_norm=self.grad_norm_clip)\n', (5953, 5990), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((5027, 5062), 'xt.model.tf_compat.tf.cast', 'tf.cast', (['(~self.ph_dones)', 'tf.float32'], {}), '(~self.ph_dones, tf.float32)\n', (5034, 5062), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((6726, 6765), 'numpy.array', 'np.array', (['[20000 / 1000, 200000 / 1000]'], {}), '([20000 / 1000, 200000 / 1000])\n', (6734, 6765), True, 'import numpy as np\n'), ((10919, 10933), 'numpy.square', 'np.square', (['out'], {}), '(out)\n', (10928, 10933), True, 'import numpy as np\n'), ((2099, 2126), 'xt.model.tf_compat.tf.cast', 'tf.cast', (['x'], {'dtype': '"""float32"""'}), "(x, dtype='float32')\n", (2106, 2126), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((4382, 4398), 'xt.model.tf_compat.tf.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (4390, 4398), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n'), ((4542, 4558), 'xt.model.tf_compat.tf.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (4550, 4558), False, 'from xt.model.tf_compat import DTYPE_MAP, AdamOptimizer, Conv2D, Flatten, Lambda, Saver, global_variables_initializer, piecewise_constant, tf\n')] |
import numpy as np
import matplotlib.pyplot as plt
from policy_iterative_evaluation import print_policy, print_values
from grid_world import standard_grid, negative_grid
GAMMA = 0.9
ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R')
def random_action(a, epsilon = 0.1):
p = np.random.random()
if p < (1 - epsilon):
return a
else:
return np.random.choice(ALL_POSSIBLE_ACTIONS)
def max_dict(d):
max_val = float('-inf')
max_key = None
for k,v in d.items():
if v > max_val:
max_val = v
max_key = k
return max_key, max_val
def play_game(grid, policy):
s = (2,0)
a = random_action(policy[s])
grid.set_state(s)
states = grid.all_states()
states_actions_rewards = [(s,a,0)]
while True:
r = grid.move(a)
s = grid.current_state()
if grid.game_over():
states_actions_rewards.append((s, None, r))
break;
else:
a = random_action(policy[s])
states_actions_rewards.append((s,a,r))
G = 0
states_actions_returns = []
first = True
for s,a,r in reversed(states_actions_rewards):
if first:
first = False
else:
states_actions_returns.append((s,a,G))
G = r + GAMMA * G
states_actions_returns.reverse()
return states_actions_returns
if __name__ == '__main__':
grid = negative_grid(step_cost = -0.5)
states = grid.all_states()
print ("Rewards:")
print_values(grid.rewards, grid)
#initialize Policy
policy = {}
for s in grid.actions.keys():
policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)
#Intialize Q(s,a) and Returns
Q = {}
returns = {}
for s in states:
if s in grid.actions:
Q[s] = {}
for a in ALL_POSSIBLE_ACTIONS:
Q[s][a] = 0
returns[(s,a)] = []
else:
pass
deltas = []
for t in range(10000):
biggest_change = 0
if t % 1000 == 0:
print (t)
states_actions_returns = play_game(grid, policy)
seen_state_action_pairs = set()
for s,a,G in states_actions_returns:
sa = (s,a)
if sa not in seen_state_action_pairs:
old_q = Q[s][a]
returns[sa].append(G)
Q[s][a] = np.mean(returns[sa])
biggest_change = max(biggest_change, np.abs(old_q - Q[s][a]))
seen_state_action_pairs.add(sa)
deltas.append(biggest_change)
for s in policy.keys():
a, _ = max_dict(Q[s])
policy[s] = a
plt.plot(deltas)
plt.show()
V = {}
for s in policy.keys():
V[s] = max_dict(Q[s])[1]
print("Value:")
print_values(V, grid)
print("Policy:")
print_policy(policy, grid)
| [
"numpy.mean",
"numpy.abs",
"policy_iterative_evaluation.print_policy",
"numpy.random.random",
"numpy.random.choice",
"matplotlib.pyplot.plot",
"policy_iterative_evaluation.print_values",
"grid_world.negative_grid",
"matplotlib.pyplot.show"
] | [((284, 302), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (300, 302), True, 'import numpy as np\n'), ((1466, 1495), 'grid_world.negative_grid', 'negative_grid', ([], {'step_cost': '(-0.5)'}), '(step_cost=-0.5)\n', (1479, 1495), False, 'from grid_world import standard_grid, negative_grid\n'), ((1561, 1593), 'policy_iterative_evaluation.print_values', 'print_values', (['grid.rewards', 'grid'], {}), '(grid.rewards, grid)\n', (1573, 1593), False, 'from policy_iterative_evaluation import print_policy, print_values\n'), ((2754, 2770), 'matplotlib.pyplot.plot', 'plt.plot', (['deltas'], {}), '(deltas)\n', (2762, 2770), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2786), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2784, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2894, 2915), 'policy_iterative_evaluation.print_values', 'print_values', (['V', 'grid'], {}), '(V, grid)\n', (2906, 2915), False, 'from policy_iterative_evaluation import print_policy, print_values\n'), ((2943, 2969), 'policy_iterative_evaluation.print_policy', 'print_policy', (['policy', 'grid'], {}), '(policy, grid)\n', (2955, 2969), False, 'from policy_iterative_evaluation import print_policy, print_values\n'), ((377, 415), 'numpy.random.choice', 'np.random.choice', (['ALL_POSSIBLE_ACTIONS'], {}), '(ALL_POSSIBLE_ACTIONS)\n', (393, 415), True, 'import numpy as np\n'), ((1691, 1729), 'numpy.random.choice', 'np.random.choice', (['ALL_POSSIBLE_ACTIONS'], {}), '(ALL_POSSIBLE_ACTIONS)\n', (1707, 1729), True, 'import numpy as np\n'), ((2462, 2482), 'numpy.mean', 'np.mean', (['returns[sa]'], {}), '(returns[sa])\n', (2469, 2482), True, 'import numpy as np\n'), ((2537, 2560), 'numpy.abs', 'np.abs', (['(old_q - Q[s][a])'], {}), '(old_q - Q[s][a])\n', (2543, 2560), True, 'import numpy as np\n')] |
# Importing files from this project
import ResNet
import MCTS
import argparse
import Files
from keras.optimizers import SGD
# import Config and Gamelogic, from the game you want to train/play
parser = argparse.ArgumentParser(description='Command line for AZ!')
parser.add_argument("--game", default= "TicTacToe",
choices= ["TicTacToe", "FourInARow"], required= False, help= "Choose one of the games from the list")
parser.add_argument("--numSearch", type = int, default = 100, help = "This is number of searches preformed by MCTS")
parser.add_argument("--opponent", type = str, default= "4r_7", help = "Choose the agent you want to play against")
args = parser.parse_args()
typeOfGame = args.game
if typeOfGame == "TicTacToe":
print("Skal spille TicTacToe")
from TicTacToe import Config
from TicTacToe import Gamelogic
elif typeOfGame == "FourInARow":
print("Skal spille FourInARow")
from FourInARow import Config
from FourInARow import Gamelogic
from loss import softmax_cross_entropy_with_logits, softmax
#Importing other libraries
import numpy as np
# Creating and returning a tree with properties specified from the input
def get_tree(config, agent, game, dirichlet_noise=True):
tree = MCTS.MCTS(game, game.get_board(), agent, config)
return tree
# Generating data by self-play
def generate_data(game, agent, config, num_sim=100, games=1):
tree = get_tree(config, agent, game)
x = []
y_policy = []
y_value = []
for _ in range(games):
game.__init__()
history = []
policy_targets = []
player_moved_list = []
positions = []
while not game.is_final():
tree.reset_search()
tree.root.board_state = game.get_board()
tree.search_series(num_sim)
temp_move = tree.get_temperature_move(tree.root)
history.append(temp_move)
policy_targets.append(np.array(tree.get_posterior_probabilities()))
if typeOfGame == "FourInARow":
(tree.get_prior_probabilities(game.get_board().reshape(1, 6, 7, 2)))
if typeOfGame == "TicTacToe":
(tree.get_prior_probabilities(game.get_board().reshape(1,3,3,2)))
player_moved_list.append(game.get_turn())
positions.append(np.array(game.get_board()))
game.execute_move(temp_move)
print("________________")
game.print_board()
print("________________")
game_outcome = game.get_outcome()
if game_outcome == [1, -1]:
print("X vant")
elif game_outcome == [-1, 1]:
print ("O vant")
else:
print("Uavgjort")
value_targets = [game_outcome[x] for x in player_moved_list]
x = x + positions
y_policy = y_policy + policy_targets
y_value = y_value + value_targets
return np.array(x), np.array(y_policy), np.array(y_value)
# Training AlphaZero by generating data from self-play and fitting the network
def train(game, config, num_filters, num_res_blocks, num_sim=125, epochs=50, games_each_epoch=10,
batch_size=32, num_train_epochs=10):
h, w, d = config.board_dims[1:]
agent = ResNet.ResNet.build(h, w, d, num_filters, config.policy_output_dim, num_res_blocks=num_res_blocks)
agent.compile(loss=[softmax_cross_entropy_with_logits, 'mean_squared_error'],
optimizer=SGD(lr=0.001, momentum=0.9))
agent.summary()
for epoch in range(epochs):
x, y_pol, y_val = generate_data(game, agent, config, num_sim=num_sim, games=games_each_epoch)
print("Epoch")
print(x.shape)
raw = agent.predict(x)
for num in range(len(x)):
print("targets-predictions")
print(y_pol[num], y_val[num])
print(softmax(y_pol[num], raw[0][num]), raw[1][num])
agent.fit(x=x, y=[y_pol, y_val], batch_size=min(batch_size, len(x)), epochs=num_train_epochs, callbacks=[])
agent.save_weights("Models/"+Config.name+"/"+str(epoch)+".h5")
return agent
# Returns the best legal move based on the predictions from
# The neural network
def choose_best_legal_move(legal_moves, y_pred):
best_move = np.argmax(y_pred)
print("Best move", best_move)
if (y_pred[best_move] == 0):
return None
if best_move in legal_moves:
return best_move
else:
y_pred[best_move] = 0
return choose_best_legal_move(legal_moves, y_pred)
if __name__ == '__main__':
if typeOfGame == "FourInARow":
train(Gamelogic.FourInARow(), Config, 128, 7)
elif typeOfGame == "TicTacToe":
train(Gamelogic.TicTacToe(), Config, 128, 4)
| [
"ResNet.ResNet.build",
"argparse.ArgumentParser",
"FourInARow.Gamelogic.FourInARow",
"numpy.argmax",
"loss.softmax",
"numpy.array",
"keras.optimizers.SGD",
"FourInARow.Gamelogic.TicTacToe"
] | [((204, 263), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command line for AZ!"""'}), "(description='Command line for AZ!')\n", (227, 263), False, 'import argparse\n'), ((3252, 3354), 'ResNet.ResNet.build', 'ResNet.ResNet.build', (['h', 'w', 'd', 'num_filters', 'config.policy_output_dim'], {'num_res_blocks': 'num_res_blocks'}), '(h, w, d, num_filters, config.policy_output_dim,\n num_res_blocks=num_res_blocks)\n', (3271, 3354), False, 'import ResNet\n'), ((4256, 4273), 'numpy.argmax', 'np.argmax', (['y_pred'], {}), '(y_pred)\n', (4265, 4273), True, 'import numpy as np\n'), ((2927, 2938), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2935, 2938), True, 'import numpy as np\n'), ((2940, 2958), 'numpy.array', 'np.array', (['y_policy'], {}), '(y_policy)\n', (2948, 2958), True, 'import numpy as np\n'), ((2960, 2977), 'numpy.array', 'np.array', (['y_value'], {}), '(y_value)\n', (2968, 2977), True, 'import numpy as np\n'), ((3461, 3488), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.001)', 'momentum': '(0.9)'}), '(lr=0.001, momentum=0.9)\n', (3464, 3488), False, 'from keras.optimizers import SGD\n'), ((4596, 4618), 'FourInARow.Gamelogic.FourInARow', 'Gamelogic.FourInARow', ([], {}), '()\n', (4616, 4618), False, 'from FourInARow import Gamelogic\n'), ((3857, 3889), 'loss.softmax', 'softmax', (['y_pol[num]', 'raw[0][num]'], {}), '(y_pol[num], raw[0][num])\n', (3864, 3889), False, 'from loss import softmax_cross_entropy_with_logits, softmax\n'), ((4686, 4707), 'FourInARow.Gamelogic.TicTacToe', 'Gamelogic.TicTacToe', ([], {}), '()\n', (4705, 4707), False, 'from FourInARow import Gamelogic\n')] |
import string
import numpy as np
from scipy.spatial import distance
def softmax(array):
"""Returns the numerically stable softmax of a given array"""
return (np.exp(array-np.max(array)))/np.sum(np.exp(array-np.max(array)))
def cosine_similarity(a, b):
"""Custom cosine similarity"""
return np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b))
def norm_hamming(string1,string2):
"""Custom Normalized Hamming Distance"""
return distance.hamming(list(string1), list(string2))
def jaccard_binary(x,y):
"""Returns the similarity between two binary vectors"""
intersection = np.logical_and(x, y)
union = np.logical_or(x, y)
similarity = intersection.sum() / float(union.sum())
return similarity
def jaccard_similarity(doc1, doc2):
# List the unique words in a document
words_doc1 = set(doc1.lower().split())
words_doc2 = set(doc2.lower().split())
# Find the intersection of words list of doc1 & doc2
intersection = words_doc1.intersection(words_doc2)
# Find the union of words list of doc1 & doc2
union = words_doc1.union(words_doc2)
# Calculate Jaccard similarity score
# using length of intersection set divided by length of union set
return float(len(intersection)) / len(union)
| [
"numpy.logical_and",
"numpy.logical_or",
"numpy.max",
"numpy.dot",
"numpy.linalg.norm"
] | [((603, 623), 'numpy.logical_and', 'np.logical_and', (['x', 'y'], {}), '(x, y)\n', (617, 623), True, 'import numpy as np\n'), ((636, 655), 'numpy.logical_or', 'np.logical_or', (['x', 'y'], {}), '(x, y)\n', (649, 655), True, 'import numpy as np\n'), ((308, 320), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (314, 320), True, 'import numpy as np\n'), ((322, 339), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (336, 339), True, 'import numpy as np\n'), ((340, 357), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (354, 357), True, 'import numpy as np\n'), ((180, 193), 'numpy.max', 'np.max', (['array'], {}), '(array)\n', (186, 193), True, 'import numpy as np\n'), ((216, 229), 'numpy.max', 'np.max', (['array'], {}), '(array)\n', (222, 229), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
import numpy as np
import tf_utils as tu
def getT_wue_w():
T_wue_w = np.eye(4)
T_wue_w[1, 1] = -1
return T_wue_w
def getT_w_wue():
return np.linalg.inv(getT_wue_w())
def getT_c_cue():
T_cue_c = np.zeros((4, 4))
T_cue_c[0, 1] = 1
T_cue_c[1, 2] = -1
T_cue_c[2, 0] = 1
T_cue_c[3, 3] = 1
return T_cue_c
def getT_cue_c():
return np.linalg.inv(getT_c_cue())
def DEP_eulerToRotmatUE(roll_deg, pitch_deg, yaw_deg):
# the code below is from
# https://github.com/EpicGames/UnrealEngine/blob/release/Engine/Source/Runtime/Core/Public/Math/RotationTranslationMatrix.h#L32
# but the inverse of the yaw angle is needed to make things work...
assert False, "This implementation is wrong."
roll = np.deg2rad(roll_deg)
pitch = np.deg2rad(pitch_deg)
yaw = np.deg2rad(-yaw_deg)
sr, cr = np.sin(roll), np.cos(roll)
sp, cp = np.sin(pitch), np.cos(pitch)
sy, cy = np.sin(yaw), np.cos(yaw)
R = np.zeros((3, 3))
R[0, 0] = cp * cy
R[0, 1] = cp * sy
R[0, 2] = sp
R[1, 0] = sr * sp * cy - cr * sy
R[1, 1] = sr * sp * sy + cr * cy
R[1, 2] = - sr * cp
R[2, 0] = - (cr * sp * cy + sr * sy)
R[2, 1] = cy * sr - cr * sp * sy
R[2, 2] = cr * cp
return R
# consistent with https://github.com/EpicGames/UnrealEngine/blob/f794321ffcad597c6232bc706304c0c9b4e154b2/Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp#L540
def eulerToRotmatUE(roll_deg, pitch_deg, yaw_deg):
R_roll = tu.RotMatX(-roll_deg)
R_pitch = tu.RotMatY(-pitch_deg)
R_yaw = tu.RotMatZ(yaw_deg)
return np.linalg.multi_dot([R_yaw, R_pitch, R_roll])
def xyzpyrToTwcUE(xyzpyr):
xyz = xyzpyr[0:3]
pyr = xyzpyr[3:6]
Twc_ue = np.eye(4)
Twc_ue[0:3, 0:3] = eulerToRotmatUE(pyr[2], pyr[0], pyr[1])
Twc_ue[0:3, 3] = np.array(xyz)
return Twc_ue
def ueTwcToStandard(Twc_ue):
return np.linalg.multi_dot([getT_w_wue(), Twc_ue, getT_cue_c()])
def standardTwcToUE(Twc):
return np.linalg.multi_dot([getT_wue_w(), Twc, getT_c_cue()])
if __name__ == '__main__':
pass
| [
"numpy.eye",
"numpy.linalg.multi_dot",
"numpy.sin",
"numpy.array",
"numpy.deg2rad",
"numpy.zeros",
"numpy.cos",
"tf_utils.RotMatZ",
"tf_utils.RotMatX",
"tf_utils.RotMatY"
] | [((100, 109), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (106, 109), True, 'import numpy as np\n'), ((245, 261), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (253, 261), True, 'import numpy as np\n'), ((782, 802), 'numpy.deg2rad', 'np.deg2rad', (['roll_deg'], {}), '(roll_deg)\n', (792, 802), True, 'import numpy as np\n'), ((815, 836), 'numpy.deg2rad', 'np.deg2rad', (['pitch_deg'], {}), '(pitch_deg)\n', (825, 836), True, 'import numpy as np\n'), ((847, 867), 'numpy.deg2rad', 'np.deg2rad', (['(-yaw_deg)'], {}), '(-yaw_deg)\n', (857, 867), True, 'import numpy as np\n'), ((998, 1014), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1006, 1014), True, 'import numpy as np\n'), ((1523, 1544), 'tf_utils.RotMatX', 'tu.RotMatX', (['(-roll_deg)'], {}), '(-roll_deg)\n', (1533, 1544), True, 'import tf_utils as tu\n'), ((1559, 1581), 'tf_utils.RotMatY', 'tu.RotMatY', (['(-pitch_deg)'], {}), '(-pitch_deg)\n', (1569, 1581), True, 'import tf_utils as tu\n'), ((1594, 1613), 'tf_utils.RotMatZ', 'tu.RotMatZ', (['yaw_deg'], {}), '(yaw_deg)\n', (1604, 1613), True, 'import tf_utils as tu\n'), ((1626, 1671), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[R_yaw, R_pitch, R_roll]'], {}), '([R_yaw, R_pitch, R_roll])\n', (1645, 1671), True, 'import numpy as np\n'), ((1758, 1767), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1764, 1767), True, 'import numpy as np\n'), ((1852, 1865), 'numpy.array', 'np.array', (['xyz'], {}), '(xyz)\n', (1860, 1865), True, 'import numpy as np\n'), ((882, 894), 'numpy.sin', 'np.sin', (['roll'], {}), '(roll)\n', (888, 894), True, 'import numpy as np\n'), ((896, 908), 'numpy.cos', 'np.cos', (['roll'], {}), '(roll)\n', (902, 908), True, 'import numpy as np\n'), ((922, 935), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (928, 935), True, 'import numpy as np\n'), ((937, 950), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (943, 950), True, 'import numpy as np\n'), ((964, 975), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (970, 975), True, 'import numpy as np\n'), ((977, 988), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (983, 988), True, 'import numpy as np\n')] |
#
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import pytest
from sklearn.metrics import accuracy_score
from cuml.naive_bayes import MultinomialNB
from cuml.naive_bayes import BernoulliNB
from cuml.common.input_utils import sparse_scipy_to_cp
from numpy.testing import assert_allclose, assert_array_equal
from sklearn.naive_bayes import MultinomialNB as skNB
from sklearn.naive_bayes import BernoulliNB as skBNB
import math
import numpy as np
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_multinomial_basic_fit_predict_sparse(x_dtype, y_dtype, nlp_20news):
"""
Cupy Test
"""
X, y = nlp_20news
X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
y = y.astype(y_dtype)
# Priming it seems to lower the end-to-end runtime
model = MultinomialNB()
model.fit(X, y)
cp.cuda.Stream.null.synchronize()
model = MultinomialNB()
model.fit(X, y)
y_hat = model.predict(X)
y_hat = cp.asnumpy(y_hat)
y = cp.asnumpy(y)
assert accuracy_score(y, y_hat) >= 0.924
@pytest.mark.parametrize("x_dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_sparse_integral_dtype_fails(x_dtype, y_dtype, nlp_20news):
X, y = nlp_20news
X = X.astype(x_dtype)
y = y.astype(y_dtype)
# Priming it seems to lower the end-to-end runtime
model = MultinomialNB()
with pytest.raises(ValueError):
model.fit(X, y)
X = X.astype(cp.float32)
model.fit(X, y)
X = X.astype(x_dtype)
with pytest.raises(ValueError):
model.predict(X)
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64,
cp.int32])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_multinomial_basic_fit_predict_dense_numpy(x_dtype, y_dtype,
nlp_20news):
"""
Cupy Test
"""
X, y = nlp_20news
n_rows = 500
X = sparse_scipy_to_cp(X, cp.float32).tocsr()[:n_rows]
y = y[:n_rows].astype(y_dtype)
model = MultinomialNB()
model.fit(np.ascontiguousarray(cp.asnumpy(X.todense()).astype(x_dtype)), y)
y_hat = model.predict(X).get()
modelsk = skNB()
modelsk.fit(X.get(), y.get())
y_sk = model.predict(X.get())
assert_allclose(y_hat, y_sk)
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32,
cp.float32, cp.float64])
def test_multinomial_partial_fit(x_dtype, y_dtype, nlp_20news):
chunk_size = 500
X, y = nlp_20news
X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
y = y.astype(y_dtype)
X = X.tocsr()
model = MultinomialNB()
classes = np.unique(y)
total_fit = 0
for i in range(math.ceil(X.shape[0] / chunk_size)):
upper = i*chunk_size+chunk_size
if upper > X.shape[0]:
upper = -1
if upper > 0:
x = X[i*chunk_size:upper]
y_c = y[i*chunk_size:upper]
else:
x = X[i*chunk_size:]
y_c = y[i*chunk_size:]
model.partial_fit(x, y_c, classes=classes)
total_fit += (upper - (i*chunk_size))
if upper == -1:
break
y_hat = model.predict(X)
y_hat = cp.asnumpy(y_hat)
y = cp.asnumpy(y)
assert accuracy_score(y, y_hat) >= 0.924
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_multinomial_predict_proba(x_dtype, y_dtype, nlp_20news):
X, y = nlp_20news
cu_X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
cu_y = y.astype(y_dtype)
cu_X = cu_X.tocsr()
y = y.get()
cuml_model = MultinomialNB()
sk_model = skNB()
cuml_model.fit(cu_X, cu_y)
sk_model.fit(X, y)
cuml_proba = cuml_model.predict_proba(cu_X).get()
sk_proba = sk_model.predict_proba(X)
assert_allclose(cuml_proba, sk_proba, atol=1e-6, rtol=1e-2)
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_multinomial_predict_log_proba(x_dtype, y_dtype, nlp_20news):
X, y = nlp_20news
cu_X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
cu_y = y.astype(y_dtype)
cu_X = cu_X.tocsr()
y = y.get()
cuml_model = MultinomialNB()
sk_model = skNB()
cuml_model.fit(cu_X, cu_y)
sk_model.fit(X, y)
cuml_proba = cuml_model.predict_log_proba(cu_X).get()
sk_proba = sk_model.predict_log_proba(X)
assert_allclose(cuml_proba, sk_proba, atol=1e-2, rtol=1e-2)
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_multinomial_score(x_dtype, y_dtype, nlp_20news):
X, y = nlp_20news
cu_X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
cu_y = y.astype(y_dtype)
cu_X = cu_X.tocsr()
y = y.get()
cuml_model = MultinomialNB()
sk_model = skNB()
cuml_model.fit(cu_X, cu_y)
sk_model.fit(X, y)
cuml_score = cuml_model.score(cu_X, cu_y)
sk_score = sk_model.score(X, y)
THRES = 1e-4
assert sk_score - THRES <= cuml_score <= sk_score + THRES
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("is_sparse", [True, False])
def test_bernoulli(x_dtype, y_dtype, is_sparse, nlp_20news):
X, y = nlp_20news
n_rows = 500
X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
y = y.astype(y_dtype)
X = X.tocsr()[:n_rows]
y = y[:n_rows]
if not is_sparse:
X = X.todense()
sk_model = skBNB()
cuml_model = BernoulliNB()
sk_model.fit(X.get(), y.get())
cuml_model.fit(X, y)
sk_score = sk_model.score(X.get(), y.get())
cuml_score = cuml_model.score(X, y)
cuml_proba = cuml_model.predict_log_proba(X).get()
sk_proba = sk_model.predict_log_proba(X.get())
THRES = 1e-3
assert_array_equal(sk_model.class_count_, cuml_model.class_count_.get())
assert_allclose(sk_model.class_log_prior_,
cuml_model.class_log_prior_.get(), 1e-6)
assert_allclose(cuml_proba, sk_proba, atol=1e-2, rtol=1e-2)
assert sk_score - THRES <= cuml_score <= sk_score + THRES
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32,
cp.float32, cp.float64])
def test_bernoulli_partial_fit(x_dtype, y_dtype, nlp_20news):
chunk_size = 500
X, y = nlp_20news
X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
y = y.astype(y_dtype)
X = X.tocsr()
model = BernoulliNB()
modelsk = skBNB()
classes = np.unique(y)
for i in range(math.ceil(X.shape[0] / chunk_size)):
upper = i*chunk_size+chunk_size
if upper > X.shape[0]:
upper = -1
if upper > 0:
x = X[i*chunk_size:upper]
y_c = y[i*chunk_size:upper]
else:
x = X[i*chunk_size:]
y_c = y[i*chunk_size:]
model.partial_fit(x, y_c, classes=classes)
modelsk.partial_fit(x.get(), y_c.get(), classes=classes.get())
if upper == -1:
break
y_hat = model.predict(X).get()
y_sk = modelsk.predict(X.get())
assert_allclose(y_hat, y_sk)
| [
"cupy.asnumpy",
"cupy.cuda.Stream.null.synchronize",
"numpy.unique",
"math.ceil",
"numpy.testing.assert_allclose",
"pytest.mark.parametrize",
"sklearn.naive_bayes.MultinomialNB",
"cuml.naive_bayes.BernoulliNB",
"pytest.raises",
"sklearn.naive_bayes.BernoulliNB",
"cuml.naive_bayes.MultinomialNB",... | [((1020, 1080), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.float32, cp.float64]'], {}), "('x_dtype', [cp.float32, cp.float64])\n", (1043, 1080), False, 'import pytest\n'), ((1082, 1138), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.int64]'], {}), "('y_dtype', [cp.int32, cp.int64])\n", (1105, 1138), False, 'import pytest\n'), ((1675, 1731), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.int32, cp.int64]'], {}), "('x_dtype', [cp.int32, cp.int64])\n", (1698, 1731), False, 'import pytest\n'), ((1733, 1789), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.int64]'], {}), "('y_dtype', [cp.int32, cp.int64])\n", (1756, 1789), False, 'import pytest\n'), ((2220, 2290), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.float32, cp.float64, cp.int32]'], {}), "('x_dtype', [cp.float32, cp.float64, cp.int32])\n", (2243, 2290), False, 'import pytest\n'), ((2329, 2385), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.int64]'], {}), "('y_dtype', [cp.int32, cp.int64])\n", (2352, 2385), False, 'import pytest\n'), ((2955, 3015), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.float32, cp.float64]'], {}), "('x_dtype', [cp.float32, cp.float64])\n", (2978, 3015), False, 'import pytest\n'), ((3017, 3087), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.float32, cp.float64]'], {}), "('y_dtype', [cp.int32, cp.float32, cp.float64])\n", (3040, 3087), False, 'import pytest\n'), ((4019, 4079), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.float32, cp.float64]'], {}), "('x_dtype', [cp.float32, cp.float64])\n", (4042, 4079), False, 'import pytest\n'), ((4081, 4137), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.int64]'], {}), "('y_dtype', [cp.int32, cp.int64])\n", (4104, 4137), False, 'import pytest\n'), ((4633, 4693), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.float32, cp.float64]'], {}), "('x_dtype', [cp.float32, cp.float64])\n", (4656, 4693), False, 'import pytest\n'), ((4695, 4751), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.int64]'], {}), "('y_dtype', [cp.int32, cp.int64])\n", (4718, 4751), False, 'import pytest\n'), ((5259, 5319), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.float32, cp.float64]'], {}), "('x_dtype', [cp.float32, cp.float64])\n", (5282, 5319), False, 'import pytest\n'), ((5321, 5377), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.int64]'], {}), "('y_dtype', [cp.int32, cp.int64])\n", (5344, 5377), False, 'import pytest\n'), ((5868, 5928), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.float32, cp.float64]'], {}), "('x_dtype', [cp.float32, cp.float64])\n", (5891, 5928), False, 'import pytest\n'), ((5930, 5986), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.int64]'], {}), "('y_dtype', [cp.int32, cp.int64])\n", (5953, 5986), False, 'import pytest\n'), ((5988, 6039), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_sparse"""', '[True, False]'], {}), "('is_sparse', [True, False])\n", (6011, 6039), False, 'import pytest\n'), ((6959, 7019), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_dtype"""', '[cp.float32, cp.float64]'], {}), "('x_dtype', [cp.float32, cp.float64])\n", (6982, 7019), False, 'import pytest\n'), ((7021, 7091), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y_dtype"""', '[cp.int32, cp.float32, cp.float64]'], {}), "('y_dtype', [cp.int32, cp.float32, cp.float64])\n", (7044, 7091), False, 'import pytest\n'), ((1419, 1434), 'cuml.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (1432, 1434), False, 'from cuml.naive_bayes import MultinomialNB\n'), ((1460, 1493), 'cupy.cuda.Stream.null.synchronize', 'cp.cuda.Stream.null.synchronize', ([], {}), '()\n', (1491, 1493), True, 'import cupy as cp\n'), ((1507, 1522), 'cuml.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (1520, 1522), False, 'from cuml.naive_bayes import MultinomialNB\n'), ((1586, 1603), 'cupy.asnumpy', 'cp.asnumpy', (['y_hat'], {}), '(y_hat)\n', (1596, 1603), True, 'import cupy as cp\n'), ((1612, 1625), 'cupy.asnumpy', 'cp.asnumpy', (['y'], {}), '(y)\n', (1622, 1625), True, 'import cupy as cp\n'), ((2001, 2016), 'cuml.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (2014, 2016), False, 'from cuml.naive_bayes import MultinomialNB\n'), ((2696, 2711), 'cuml.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (2709, 2711), False, 'from cuml.naive_bayes import MultinomialNB\n'), ((2843, 2849), 'sklearn.naive_bayes.MultinomialNB', 'skNB', ([], {}), '()\n', (2847, 2849), True, 'from sklearn.naive_bayes import MultinomialNB as skNB\n'), ((2923, 2951), 'numpy.testing.assert_allclose', 'assert_allclose', (['y_hat', 'y_sk'], {}), '(y_hat, y_sk)\n', (2938, 2951), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((3347, 3362), 'cuml.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (3360, 3362), False, 'from cuml.naive_bayes import MultinomialNB\n'), ((3378, 3390), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3387, 3390), True, 'import numpy as np\n'), ((3930, 3947), 'cupy.asnumpy', 'cp.asnumpy', (['y_hat'], {}), '(y_hat)\n', (3940, 3947), True, 'import cupy as cp\n'), ((3956, 3969), 'cupy.asnumpy', 'cp.asnumpy', (['y'], {}), '(y)\n', (3966, 3969), True, 'import cupy as cp\n'), ((4375, 4390), 'cuml.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (4388, 4390), False, 'from cuml.naive_bayes import MultinomialNB\n'), ((4406, 4412), 'sklearn.naive_bayes.MultinomialNB', 'skNB', ([], {}), '()\n', (4410, 4412), True, 'from sklearn.naive_bayes import MultinomialNB as skNB\n'), ((4570, 4630), 'numpy.testing.assert_allclose', 'assert_allclose', (['cuml_proba', 'sk_proba'], {'atol': '(1e-06)', 'rtol': '(0.01)'}), '(cuml_proba, sk_proba, atol=1e-06, rtol=0.01)\n', (4585, 4630), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4993, 5008), 'cuml.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (5006, 5008), False, 'from cuml.naive_bayes import MultinomialNB\n'), ((5024, 5030), 'sklearn.naive_bayes.MultinomialNB', 'skNB', ([], {}), '()\n', (5028, 5030), True, 'from sklearn.naive_bayes import MultinomialNB as skNB\n'), ((5196, 5255), 'numpy.testing.assert_allclose', 'assert_allclose', (['cuml_proba', 'sk_proba'], {'atol': '(0.01)', 'rtol': '(0.01)'}), '(cuml_proba, sk_proba, atol=0.01, rtol=0.01)\n', (5211, 5255), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((5607, 5622), 'cuml.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (5620, 5622), False, 'from cuml.naive_bayes import MultinomialNB\n'), ((5638, 5644), 'sklearn.naive_bayes.MultinomialNB', 'skNB', ([], {}), '()\n', (5642, 5644), True, 'from sklearn.naive_bayes import MultinomialNB as skNB\n'), ((6331, 6338), 'sklearn.naive_bayes.BernoulliNB', 'skBNB', ([], {}), '()\n', (6336, 6338), True, 'from sklearn.naive_bayes import BernoulliNB as skBNB\n'), ((6356, 6369), 'cuml.naive_bayes.BernoulliNB', 'BernoulliNB', ([], {}), '()\n', (6367, 6369), False, 'from cuml.naive_bayes import BernoulliNB\n'), ((6834, 6893), 'numpy.testing.assert_allclose', 'assert_allclose', (['cuml_proba', 'sk_proba'], {'atol': '(0.01)', 'rtol': '(0.01)'}), '(cuml_proba, sk_proba, atol=0.01, rtol=0.01)\n', (6849, 6893), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((7349, 7362), 'cuml.naive_bayes.BernoulliNB', 'BernoulliNB', ([], {}), '()\n', (7360, 7362), False, 'from cuml.naive_bayes import BernoulliNB\n'), ((7377, 7384), 'sklearn.naive_bayes.BernoulliNB', 'skBNB', ([], {}), '()\n', (7382, 7384), True, 'from sklearn.naive_bayes import BernoulliNB as skBNB\n'), ((7400, 7412), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (7409, 7412), True, 'import numpy as np\n'), ((7990, 8018), 'numpy.testing.assert_allclose', 'assert_allclose', (['y_hat', 'y_sk'], {}), '(y_hat, y_sk)\n', (8005, 8018), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((1638, 1662), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1652, 1662), False, 'from sklearn.metrics import accuracy_score\n'), ((2027, 2052), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2040, 2052), False, 'import pytest\n'), ((2165, 2190), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2178, 2190), False, 'import pytest\n'), ((3430, 3464), 'math.ceil', 'math.ceil', (['(X.shape[0] / chunk_size)'], {}), '(X.shape[0] / chunk_size)\n', (3439, 3464), False, 'import math\n'), ((3982, 4006), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_hat'], {}), '(y, y_hat)\n', (3996, 4006), False, 'from sklearn.metrics import accuracy_score\n'), ((7433, 7467), 'math.ceil', 'math.ceil', (['(X.shape[0] / chunk_size)'], {}), '(X.shape[0] / chunk_size)\n', (7442, 7467), False, 'import math\n'), ((1278, 1308), 'cuml.common.input_utils.sparse_scipy_to_cp', 'sparse_scipy_to_cp', (['X', 'x_dtype'], {}), '(X, x_dtype)\n', (1296, 1308), False, 'from cuml.common.input_utils import sparse_scipy_to_cp\n'), ((3242, 3272), 'cuml.common.input_utils.sparse_scipy_to_cp', 'sparse_scipy_to_cp', (['X', 'x_dtype'], {}), '(X, x_dtype)\n', (3260, 3272), False, 'from cuml.common.input_utils import sparse_scipy_to_cp\n'), ((4239, 4269), 'cuml.common.input_utils.sparse_scipy_to_cp', 'sparse_scipy_to_cp', (['X', 'x_dtype'], {}), '(X, x_dtype)\n', (4257, 4269), False, 'from cuml.common.input_utils import sparse_scipy_to_cp\n'), ((4857, 4887), 'cuml.common.input_utils.sparse_scipy_to_cp', 'sparse_scipy_to_cp', (['X', 'x_dtype'], {}), '(X, x_dtype)\n', (4875, 4887), False, 'from cuml.common.input_utils import sparse_scipy_to_cp\n'), ((5471, 5501), 'cuml.common.input_utils.sparse_scipy_to_cp', 'sparse_scipy_to_cp', (['X', 'x_dtype'], {}), '(X, x_dtype)\n', (5489, 5501), False, 'from cuml.common.input_utils import sparse_scipy_to_cp\n'), ((6149, 6179), 'cuml.common.input_utils.sparse_scipy_to_cp', 'sparse_scipy_to_cp', (['X', 'x_dtype'], {}), '(X, x_dtype)\n', (6167, 6179), False, 'from cuml.common.input_utils import sparse_scipy_to_cp\n'), ((7244, 7274), 'cuml.common.input_utils.sparse_scipy_to_cp', 'sparse_scipy_to_cp', (['X', 'x_dtype'], {}), '(X, x_dtype)\n', (7262, 7274), False, 'from cuml.common.input_utils import sparse_scipy_to_cp\n'), ((2597, 2630), 'cuml.common.input_utils.sparse_scipy_to_cp', 'sparse_scipy_to_cp', (['X', 'cp.float32'], {}), '(X, cp.float32)\n', (2615, 2630), False, 'from cuml.common.input_utils import sparse_scipy_to_cp\n')] |
# Change: Modifying so that the ends are straight coarse bricks
import matplotlib.pyplot as plt
import numpy as np
from scipy import spatial
import csv
import os
def NodeGen2DV45(x0,xl,y0,yl,z0,elemLenX,elemLenY,numElemX,numElemY,shiftX,shiftY):
# Nodal coordinated
nodeX1=np.linspace(x0,xl,numElemX);
nodeY1=y0+np.zeros(np.shape(nodeX1)) #np.arange(0,specLenY+elemLenY,elemLenY);
#
nodeX2=np.linspace(x0+shiftX,xl-shiftX,numElemX-1);
nodeY2=y0+np.zeros(np.shape(nodeX2))+shiftY
#
# Create all nodes
count=1;
Node=np.array([[0,0,0,0]])
for j in range(0,int(numElemY)-1):
for i in range(0,len(nodeX1)):
Node=np.append(Node,[[int(count+i),nodeX1[i],nodeY1[i]+j*elemLenY,z0]],axis=0)
count=len(Node)
for i in range(0,len(nodeX2)):
Node=np.append(Node,[[int(count+i),nodeX2[i],nodeY2[i]+j*elemLenY,z0]],axis=0)
count=len(Node)
# last line
for i in range(0,len(nodeX1)):
Node=np.append(Node,[[int(count+i),nodeX1[i],nodeY1[i]+(j+1)*elemLenY,z0]],axis=0)
Node=Node[1:len(Node)]
return Node
def NodeGen2DV90(x0,xl,y0,yl,z0,elemLenX,elemLenY,numElemX,numElemY):
# Nodal coordinated
nodeX1=np.linspace(x0,xl,numElemX);
nodeY1=y0+np.zeros(np.shape(nodeX1)) #np.arange(0,specLenY+elemLenY,elemLenY)
# Create all nodes
count=1;
Node=np.array([[0,0,0,0]])
for j in range(0,int(numElemY)):
for i in range(0,len(nodeX1)):
Node=np.append(Node,[[int(count+i),nodeX1[i],nodeY1[i]+j*elemLenY,z0]],axis=0)
count=len(Node)
#
Node=Node[1:len(Node)]
elemLenX=nodeX1[1]-nodeX1[0]
return Node,elemLenX,elemLenY
def FindNodes(loc,Node):
NCorners=[[0,0,0,0]]
for i in range(len(loc)):
NCornersTmp=Node[(Node[:,1]==loc[i,0])]
NCornersTmp=NCornersTmp[(NCornersTmp[:,2]==loc[i,1])]
NCorners=np.append(NCorners,NCornersTmp, axis=0)
NCorners=NCorners[1:len(NCorners)]
return NCorners
def FindNodeRange(loc,Node,elemLenX,elemLenY):
loc=[loc[0]-1e-5,loc[1]-1e-5]
NCornersTmp=Node
NCornersTmp=Node[(Node[:,1]>=loc[0])]
NCornersTmp=NCornersTmp[(NCornersTmp[:,1]<=loc[0]+1.5*elemLenX)]
NCornersTmp=NCornersTmp[(NCornersTmp[:,2]>=loc[1])]
NCornersTmp=NCornersTmp[(NCornersTmp[:,2]<=loc[1]+1.5*elemLenY)]
return NCornersTmp
def FindBoundariesV2(Node,x0,xl,y0,yl,numElemX,numElemY):
# Find corners
#loc=np.array([[x0,y0],[xl,0],[0,yl],[xl,yl]])
loc=np.array([[x0,y0]])
NCorners= FindNodes(loc,Node)
# Find bottom edge
Xrange=np.linspace(x0,xl,numElemX)
Yrange=np.ones(np.shape(Xrange))*y0
loc=np.transpose(np.array([Xrange,Yrange]))
NBtmEdge= FindNodes(loc,Node)
# Find top edge
Xrange=np.linspace(x0,xl,numElemX)
Yrange=np.ones(np.shape(Xrange))*yl
loc=np.transpose(np.array([Xrange,Yrange]))
NTopEdge= FindNodes(loc,Node)
# Find left edge
Yrange=np.linspace(y0,yl,numElemY)
Xrange=np.ones(np.shape(Yrange))*x0
loc=np.transpose(np.array([Xrange,Yrange]))
NLeftEdge= FindNodes(loc,Node)
# Find right edge
Yrange=np.linspace(y0,yl,numElemY)
Xrange=np.ones(np.shape(Yrange))*xl
loc=np.transpose(np.array([Xrange,Yrange]))
NRightEdge= FindNodes(loc,Node)
NBoundary=np.append(NBtmEdge,NRightEdge,axis=0)
NBoundary=np.append(NBoundary,NTopEdge,axis=0)
NBoundary=np.append(NBoundary,NLeftEdge,axis=0)
return NCorners,NBtmEdge,NTopEdge,NLeftEdge,NRightEdge,NBoundary
def FindBoundaries(Node,specLenX,specLenY,elemLenX,elemLenY):
# Find corners
loc=np.array([[0,0],[specLenX,0],[0,specLenY],[specLenX,specLenY]])
NCorners= FindNodes(loc,Node)
# Find bottom edge
Xrange=np.arange(0,specLenX,elemLenX)
Yrange=np.ones(np.shape(Xrange))*0
loc=np.transpose(np.array([Xrange,Yrange]))
NBtmEdge= FindNodes(loc,Node)
# Find top edge
Xrange=np.arange(0,specLenX,elemLenX)
Yrange=np.ones(np.shape(Xrange))*specLenY
loc=np.transpose(np.array([Xrange,Yrange]))
NTopEdge= FindNodes(loc,Node)
# Find left edge
Yrange=np.arange(0,specLenY,elemLenY)
Xrange=np.ones(np.shape(Yrange))*0
loc=np.transpose(np.array([Xrange,Yrange]))
NLeftEdge= FindNodes(loc,Node)
# Find right edge
Yrange=np.arange(0,specLenY,elemLenY)
Xrange=np.ones(np.shape(Yrange))*specLenX
loc=np.transpose(np.array([Xrange,Yrange]))
NRightEdge= FindNodes(loc,Node)
NBoundary=np.append(NBtmEdge,NRightEdge,axis=0)
NBoundary=np.append(NBoundary,NTopEdge,axis=0)
NBoundary=np.append(NBoundary,NLeftEdge,axis=0)
return NCorners,NBtmEdge,NTopEdge,NLeftEdge,NRightEdge,NBoundary
def DefineElem2D45(Node,NBtmEdge,NTopEdge,NLeftEdge,NRightEdge,shiftX,shiftY):
A=spatial.cKDTree(Node[:,1:3])
# Find nearest
XYPnt=np.array([0.0,0.0])
ElemQuad=np.array([[0,0,0,0,0]])
ElemPyrd=np.array([[0,0,0,0]])
eleCount=1
for i in range(0,len(NBtmEdge)):
idx=np.ones([1,3])*-1
XYPnt=NBtmEdge[i,1:3]
distance1,idx1 = A.query([XYPnt[0]+shiftX,XYPnt[1]+shiftY],k=1,distance_upper_bound=2)
distance2,idx2 = A.query([XYPnt[0],XYPnt[1]],k=1,distance_upper_bound=2)
distance3,idx3 = A.query([XYPnt[0]+2*shiftX,XYPnt[1]],k=1,distance_upper_bound=2)
idx=[idx1,idx2,idx3]
idxTmp=np.unique(idx)
if len(idxTmp)==3:
ElemPyrd=np.append(ElemPyrd,[[eleCount,Node[idx[0],0],Node[idx[1],0],Node[idx[2],0] ]],axis=0)
eleCount=eleCount+1
for i in range(0,len(NTopEdge)):
idx=np.ones([1,3])*-1
XYPnt=NTopEdge[i,1:3]
distance1,idx1 = A.query([XYPnt[0]+shiftX,XYPnt[1]-shiftY],k=1,distance_upper_bound=2)
distance2,idx2 = A.query([XYPnt[0]+2*shiftX,XYPnt[1]],k=1,distance_upper_bound=2)
distance3,idx3 = A.query([XYPnt[0],XYPnt[1]],k=1,distance_upper_bound=2)
idx=[idx1,idx2,idx3]
idxTmp=np.unique(idx)
if len(idxTmp)==3:
ElemPyrd=np.append(ElemPyrd,[[eleCount,Node[idx[0],0],Node[idx[1],0],Node[idx[2],0] ]],axis=0)
eleCount=eleCount+1
for i in range(0,len(NLeftEdge)):
idx=np.ones([1,3])*-1
XYPnt=NLeftEdge[i,1:3]
distance1,idx1 = A.query([XYPnt[0],XYPnt[1]],k=1,distance_upper_bound=2)
distance2,idx2 = A.query([XYPnt[0]+shiftX,XYPnt[1]+shiftY],k=1,distance_upper_bound=2)
distance3,idx3 = A.query([XYPnt[0],XYPnt[1]+2*shiftY],k=1,distance_upper_bound=2)
idx=[idx1,idx2,idx3]
idxTmp=np.unique(idx)
if len(idxTmp)==3:
ElemPyrd=np.append(ElemPyrd,[[eleCount,Node[idx[0],0],Node[idx[1],0],Node[idx[2],0] ]],axis=0)
eleCount=eleCount+1
for i in range(0,len(NRightEdge)):
idx=np.ones([1,3])*-1
XYPnt=NRightEdge[i,1:3]
distance1,idx1 = A.query([XYPnt[0],XYPnt[1]+2*shiftY],k=1,distance_upper_bound=2)
distance2,idx2 = A.query([XYPnt[0]-shiftX,XYPnt[1]+shiftY],k=1,distance_upper_bound=2)
distance3,idx3 = A.query([XYPnt[0],XYPnt[1]],k=1,distance_upper_bound=2)
idx=[idx1,idx2,idx3]
idxTmp=np.unique(idx)
if len(idxTmp)==3:
ElemPyrd=np.append(ElemPyrd,[[eleCount,Node[idx[0],0],Node[idx[1],0],Node[idx[2],0] ]],axis=0)
eleCount=eleCount+1
for i in range(0,len(Node)):
idx=np.ones([1,4])*-1
XYPnt=Node[i,1:3]
distance1,idx1 = A.query([XYPnt[0]+shiftX,XYPnt[1]-shiftY],k=1,distance_upper_bound=2)
distance2,idx2 = A.query([XYPnt[0]+2*shiftX,XYPnt[1]],k=1,distance_upper_bound=2)
distance3,idx3 = A.query([XYPnt[0]+shiftX,XYPnt[1]+shiftY],k=1,distance_upper_bound=2)
distance4,idx4 = A.query([XYPnt[0],XYPnt[1]],k=1,distance_upper_bound=2)
idx=[idx1,idx2,idx3,idx4]
idxTmp=np.unique(idx)
if len(idxTmp)==4:
ElemQuad=np.append(ElemQuad,[[eleCount,Node[idx[0],0],Node[idx[1],0],Node[idx[2],0],Node[idx[3],0] ]],axis=0)
eleCount=eleCount+1
ElemQuad=ElemQuad[1:len(ElemQuad)]
ElemPyrd=ElemPyrd[1:len(ElemPyrd)]
return ElemQuad,ElemPyrd,eleCount
def DefineElem2D90(Node,shiftX,shiftY):
A=spatial.cKDTree(Node[:,1:3])
# Find nearest
XYPnt=np.array([0.0,0.0])
ElemQuad=np.array([[0,0,0,0,0]])
eleCount=1
for i in range(0,len(Node)):
idx=np.ones([1,4])*-1
XYPnt=Node[i,1:3]
distance1,idx1 = A.query([XYPnt[0],XYPnt[1]],k=1,distance_upper_bound=2)
distance2,idx2 = A.query([XYPnt[0]+shiftX,XYPnt[1]],k=1,distance_upper_bound=2)
distance3,idx3 = A.query([XYPnt[0]+shiftX,XYPnt[1]+shiftY],k=1,distance_upper_bound=2)
distance4,idx4 = A.query([XYPnt[0],XYPnt[1]+shiftY],k=1,distance_upper_bound=2)
idx=[idx1,idx2,idx3,idx4]
idxTmp=np.unique(idx)
if len(idxTmp)==4:
ElemQuad=np.append(ElemQuad,[[eleCount,Node[idx[0],0],Node[idx[1],0],Node[idx[2],0],Node[idx[3],0] ]],axis=0)
eleCount=eleCount+1
ElemQuad=ElemQuad[1:len(ElemQuad)]
return ElemQuad,eleCount
def NodeGen3D(Node,specLenZ1):
jmp=10**(np.ceil(np.log10(np.abs(max(Node[:,0]) + 1))))
# Creating 3D Node points
Node3D=Node
for i in range(1,len(specLenZ1)):
NodeTmp=np.ones(np.shape(Node))
NodeTmp[:,0]=Node[:,0]+np.ones(np.shape(Node[:,0]))*i*jmp
NodeTmp[:,1:3]=Node[:,1:3]
NodeTmp[:,3]=specLenZ1[i]
Node3D=np.append(Node3D,NodeTmp,axis=0)
return Node3D,jmp
#def NodeGen3DV2(Node,zt,maxNode):
# jmp=10**(np.ceil(np.log10(np.abs(maxNode + 1))))
#
# # Creating 3D Node points
# Node3D=Node
# NodeTmp=np.ones(np.shape(Node))
# NodeTmp[:,0]=Node[:,0]+np.ones(np.shape(Node[:,0]))*jmp
# NodeTmp[:,1:3]=Node[:,1:3]
# NodeTmp[:,3]=zt
# Node3D=np.append(Node3D,NodeTmp,axis=0)
#
# return Node3D,jmp
def DefineElem3D(ElemQuad,ElemPyrd,jmpNode,specLenZ1,plyStack):
# Creating 3D pyramid elements points - 1st ply
EleTmp=ElemPyrd[:,1:len(ElemPyrd)]
EleTmp=EleTmp+np.ones(np.shape(ElemPyrd[:,1:len(ElemPyrd)]))*jmpNode
ElemPyrd3D=np.append(ElemPyrd,EleTmp,axis=1)
ElemPyrd3DPly=ElemPyrd3D
# Generate dummy initial interface
ElemPyrd3DInt=np.zeros(np.shape(ElemPyrd3DPly[0,:]))
ElemPyrd3DInt=ElemPyrd3DInt.reshape(1,len(ElemPyrd3DInt))
# Generate dummy initial interface CAM
ElemPyrd3DCzm=np.zeros(np.shape(ElemPyrd3DPly[0,:]))
ElemPyrd3DCzm=ElemPyrd3DCzm.reshape(1,len(ElemPyrd3DCzm))
# Creating 3D quad elements points - 1st ply
EleTmp=ElemQuad[:,1:len(ElemQuad)]
EleTmp=EleTmp+np.ones(np.shape(ElemQuad[:,1:len(ElemQuad)]))*jmpNode
ElemQuad3D=np.append(ElemQuad,EleTmp,axis=1)
ElemQuad3DPly=ElemQuad3D
# Generate dummy initial interface
ElemQuad3DInt=np.zeros(np.shape(ElemQuad3DPly[0,:]))
ElemQuad3DInt=ElemQuad3DInt.reshape(1,len(ElemQuad3DInt))
# Generate dummy initial interface CZM
ElemQuad3DCzm=np.zeros(np.shape(ElemQuad3DPly[0,:]))
ElemQuad3DCzm=ElemQuad3DCzm.reshape(1,len(ElemQuad3DCzm))
ElemSetPly=[]
ElemSetInt=[]
ElemSetCzm=[]
jmpElem=10**(np.ceil(np.log10(np.abs(max(ElemQuad3D[:,0]) + 1))))
for i in range(1,len(specLenZ1)):
ElemSet=[]
# Pyramid elements
EleTmpNds=ElemPyrd3D[:,1:len(ElemPyrd3D)]
EleTmpNds=EleTmpNds+np.ones(np.shape(ElemPyrd3D[:,1:len(ElemPyrd3D)]))*(i-1)*jmpNode
EleTmpNums=ElemPyrd3D[:,0]
EleTmpNums=EleTmpNums+np.ones(np.shape(ElemPyrd3D[:,0]))*(i-1)*jmpElem
EleTmpNums=EleTmpNums.reshape(len(EleTmpNums),1)
EleTmpAdd=np.append(EleTmpNums,EleTmpNds,axis=1)
if plyStack[i-1]==-1:
ElemPyrd3DInt=np.append(ElemPyrd3DInt,EleTmpAdd,axis=0)
ElemSetInt=np.append(ElemSetInt,ElemPyrd3DInt[:,0])
elif plyStack[i-1]==-2:
ElemPyrd3DCzm=np.append(ElemPyrd3DCzm,EleTmpAdd,axis=0)
ElemSetCzm=np.append(ElemSetCzm,ElemPyrd3DCzm[:,0])
else:
ElemPyrd3DPly=np.append(ElemPyrd3DPly,EleTmpAdd,axis=0)
ElemSetPly=np.append(ElemSetPly,ElemPyrd3DPly[:,0])
ElemSet=np.append(ElemSet,EleTmpAdd[:,0])
# Quad element
EleTmpNds=ElemQuad3D[:,1:len(ElemQuad3D)]
EleTmpNds=EleTmpNds+np.ones(np.shape(ElemQuad3D[:,1:len(ElemQuad3D)]))*(i-1)*jmpNode
EleTmpNums=ElemQuad3D[:,0]
EleTmpNums=EleTmpNums+np.ones(np.shape(ElemQuad3D[:,0]))*(i-1)*jmpElem
EleTmpNums=EleTmpNums.reshape(len(EleTmpNums),1)
EleTmpAdd=np.append(EleTmpNums,EleTmpNds,axis=1)
if plyStack[i-1]==-1:
ElemQuad3DInt=np.append(ElemQuad3DInt,EleTmpAdd,axis=0)
ElemSetInt=np.append(ElemSetInt,ElemQuad3DInt[:,0])
elif plyStack[i-1]==-2:
ElemQuad3DCzm=np.append(ElemQuad3DCzm,EleTmpAdd,axis=0)
ElemSetCzm=np.append(ElemSetCzm,ElemQuad3DCzm[:,0])
else:
ElemQuad3DPly=np.append(ElemQuad3DPly,EleTmpAdd,axis=0)
ElemSetPly=np.append(ElemSetPly,ElemQuad3DPly[:,0])
ElemSet=np.append(ElemSet,EleTmpAdd[:,0])
writeEleSetV2(ElemSet,i)
writeSecOriV2(ElemSet,plyStack[i-1],i)
# Delete initial row
ElemPyrd3DInt=ElemPyrd3DInt[1:len(ElemPyrd3DInt)]
ElemQuad3DInt=ElemQuad3DInt[1:len(ElemQuad3DInt)]
ElemPyrd3DCzm=ElemPyrd3DCzm[1:len(ElemPyrd3DCzm)]
ElemQuad3DCzm=ElemQuad3DCzm[1:len(ElemQuad3DCzm)]
return ElemPyrd3DPly,ElemQuad3DPly,ElemPyrd3DInt,ElemQuad3DInt,ElemPyrd3DCzm,ElemQuad3DCzm,ElemSetPly,ElemSetInt,ElemSetCzm
#def DefineElem3DV2(ElemQuad,ElemPyrd,jmpNode):
# # Creating 3D pyramid elements points - 1st ply
# EleTmp=ElemPyrd[:,1:len(ElemPyrd)]
# EleTmp=EleTmp+np.ones(np.shape(ElemPyrd[:,1:len(ElemPyrd)]))*jmpNode
# ElemPyrd3D=np.append(ElemPyrd,EleTmp,axis=1)
# ElemPyrd3D=ElemPyrd3D
#
# # Creating 3D quad elements points - 1st ply
# EleTmp=ElemQuad[:,1:len(ElemQuad)]
# EleTmp=EleTmp+np.ones(np.shape(ElemQuad[:,1:len(ElemQuad)]))*jmpNode
# ElemQuad3D=np.append(ElemQuad,EleTmp,axis=1)
# ElemQuad3D=ElemQuad3D
#
# # initialize element set
# ElemSet=[]
# # increment in element number
# jmpElem=10**(np.ceil(np.log10(np.abs(max(ElemQuad3D[:,0]) + 1))))
#
# # Pyramid elements
# EleTmpNds=ElemPyrd3D[:,1:len(ElemPyrd3D)]
# EleTmpNds=EleTmpNds+np.ones(np.shape(ElemPyrd3D[:,1:len(ElemPyrd3D)]))*jmpNode
# EleTmpNums=ElemPyrd3D[:,0]
# EleTmpNums=EleTmpNums+np.ones(np.shape(ElemPyrd3D[:,0]))*jmpElem
# EleTmpNums=EleTmpNums.reshape(len(EleTmpNums),1)
# EleTmpAdd=np.append(EleTmpNums,EleTmpNds,axis=1)
# ElemPyrd3D=np.append(ElemPyrd3D,EleTmpAdd,axis=0)
# ElemSet=np.append(ElemSet,ElemPyrd3D[:,0])
#
# # Quad element
# EleTmpNds=ElemQuad3D[:,1:len(ElemQuad3D)]
# EleTmpNds=EleTmpNds+np.ones(np.shape(ElemQuad3D[:,1:len(ElemQuad3D)]))*jmpNode
# EleTmpNums=ElemQuad3D[:,0]
# EleTmpNums=EleTmpNums+np.ones(np.shape(ElemQuad3D[:,0]))*jmpElem
# EleTmpNums=EleTmpNums.reshape(len(EleTmpNums),1)
# EleTmpAdd=np.append(EleTmpNums,EleTmpNds,axis=1)
# ElemQuad3D=np.append(ElemQuad3D,EleTmpAdd,axis=0)
# ElemSet=np.append(ElemSet,ElemQuad3D[:,0])
#
# return ElemPyrd3D,ElemQuad3D,ElemSet
def DefineThk(specLenZ0,PlyStack,thkPly,thkInt,thkCzm):
specLenZ1=np.array([specLenZ0])
thk=specLenZ0
for i in range(0,len(PlyStack)):
if PlyStack[i]==-1:
thk=thk+thkInt
elif PlyStack[i]==-2:
thk=thk+thkCzm
else:
thk=thk+thkPly
specLenZ1=np.append(specLenZ1,thk)
return specLenZ1
#def writeEleSet(ElemSet):
# f = open('EleSetFile.inp', 'w')
# for i in range(0,len(ElemSet)):
# elemTmp='*ELSET, GENERATE, ELSET=SET'+str(int(ElemSet[i,0]))
# f.write("%s\n" % elemTmp) #
# elemTmp=str(int(ElemSet[i,1]))+','+str(int(ElemSet[i,2]))+str(int(ElemSet[i,1]))+','+str(int(ElemSet[i,2]))+str(int(ElemSet[i,1]))+','+str(int(ElemSet[i,2]))+str(int(ElemSet[i,1]))+','+str(int(ElemSet[i,2]))
# f.write("%s\n" % elemTmp)
# f.close()
def writeEleSetV2(ElemSet,idt):
ElemSet=ElemSet.astype(int)
f = open('EleSetFile.inp', 'a+')
elemTmp='*ELSET, ELSET=SET'+str(idt)
f.write("%s\n" % elemTmp) #
f.close()
ElemSetTmp1=ElemSet[0:len(ElemSet)//8*8].reshape(len(ElemSet)//8,8)
with open("EleSetFile.inp", "a") as f:
writer = csv.writer(f)
writer.writerows(ElemSetTmp1)
f.close()
if len(ElemSet)%8>0:
ElemSetTmp2=ElemSet[len(ElemSet)//8*8:len(ElemSet)]
with open("EleSetFile.inp", "a") as f:
writer = csv.writer(f)
writer.writerow(ElemSetTmp2)
f.close()
def writeNodeSetV2(NodeSet,idt):
NodeSet=NodeSet.astype(int)
f = open('NodeSetFile.inp', 'a+')
elemTmp='*NSET, NSET=NSET'+idt
f.write("%s\n" % elemTmp) #
f.close()
NodeSetTmp1=NodeSet[0:len(NodeSet)//8*8].reshape(len(NodeSet)//8,8)
with open("NodeSetFile.inp", "a") as f:
writer = csv.writer(f)
writer.writerows(NodeSetTmp1)
f.close()
if len(NodeSet)%8>0:
NodeSetTmp2=NodeSet[len(NodeSet)//8*8:len(NodeSet)]
with open("NodeSetFile.inp", "a") as f:
writer = csv.writer(f)
writer.writerow(NodeSetTmp2)
f.close()
#def writeSecOri(ElemSet,PlyStack):
# f = open('SecOri.inp', 'w+')
# for i in range(0,len(ElemSet)):
# if PlyStack[i]==-1:
# txtTmp1='*Orientation, name=PlyOri-'+str(int(ElemSet[i,0]))
# txtTmp2='1., 0., 0., 0., 1., 0.,'
# txtTmp3='3, 0'
# txtTmp4='*Solid Section, elset=SET'+str(int(ElemSet[i,0]))+', orientation=PlyOri-'+str(int(ElemSet[i,0]))+', material=matInt'
# elif PlyStack[i]==-2:
# txtTmp1='*Orientation, name=PlyOri-'+str(int(ElemSet[i,0]))
# txtTmp2='1., 0., 0., 0., 1., 0.,'
# txtTmp3='3, 0'
# txtTmp4='*Solid Section, elset=SET'+str(int(ElemSet[i,0]))+', orientation=PlyOri-'+str(int(ElemSet[i,0]))+', material=matCzm'
# else:
# txtTmp1='*Orientation, name=PlyOri-'+str(int(ElemSet[i,0]))
# txtTmp2='1., 0., 0., 0., 1., 0.,'
# txtTmp3='3,'+ str(PlyStack[i])
# txtTmp4='*Solid Section, elset=SET'+str(int(ElemSet[i,0]))+', orientation=PlyOri-'+str(int(ElemSet[i,0]))+', material=matLamina'
# txtTmp5=','
#
# f.write("%s\n" % txtTmp1) #
# f.write("%s\n" % txtTmp2) #
# f.write("%s\n" % txtTmp3) #
# f.write("%s\n" % txtTmp4) #
# f.write("%s\n" % txtTmp5) #
# f.close()
def writeSecOriV2(ElemSet,PlyStack,idt):
f = open('SecOri.inp', 'a+')
if PlyStack==-1:
txtTmp1='*Orientation, name=PlyOri-'+str(idt)
txtTmp2='1., 0., 0., 0., 1., 0.,'
txtTmp3='3, 0'
txtTmp4='*Solid Section, elset=SET'+str(idt)+', orientation=PlyOri-'+str(idt)+', material=matInt'
elif PlyStack==-2:
txtTmp1='*Orientation, name=PlyOri-'+str(idt)
txtTmp2='1., 0., 0., 0., 1., 0.,'
txtTmp3='3, 0'
txtTmp4='*Solid Section, elset=SET'+str(idt)+', orientation=PlyOri-'+str(idt)+', material=matCzm'
else:
txtTmp1='*Orientation, name=PlyOri-'+str(idt)
txtTmp2='1., 0., 0., 0., 1., 0.,'
txtTmp3='3,'+ str(PlyStack)
txtTmp4='*Solid Section, elset=SET'+str(idt)+', orientation=PlyOri-'+str(idt)+', material=matLamina'
txtTmp5=','
f.write("%s\n" % txtTmp1) #
f.write("%s\n" % txtTmp2) #
f.write("%s\n" % txtTmp3) #
f.write("%s\n" % txtTmp4) #
f.write("%s\n" % txtTmp5) #
f.close()
def plotElem(Elem,Node):
Elem=Elem.astype(int)
for i in range(0,len(Elem)):
size=len(Elem[i])
x=[]
y=[]
for k in range(1,size):
x=np.append(x,Node[Node[:,0]==Elem[i,k],1], axis=0)
y=np.append(y,Node[Node[:,0]==Elem[i,k],2], axis=0)
#plt.scatter(x,y)
if size==4:
plt.plot([x[0],x[1]],[y[0],y[1]],'r')
plt.plot([x[1],x[2]],[y[1],y[2]],'g')
plt.plot([x[2],x[0]],[y[2],y[0]],'k')
else:
plt.plot([x[0],x[1]],[y[0],y[1]],'r')
plt.plot([x[1],x[2]],[y[1],y[2]],'g')
plt.plot([x[2],x[3]],[y[2],y[3]],'k')
plt.plot([x[3],x[0]],[y[3],y[0]],'b')
###############################################################################
# Inputs
plyStack=[45,-1,-45,-1,45,-1,-45,-45,-1,45,-1,-45,-1,45]
#plyStack=[45,-2,-1,-2,-45,-2,-1,-2,45,-2,-1,-2,-45,-45,-2,-1,-2,45,-2,-1,-2,-45,-2,-1,-2,45]
elemLen=0.025 #0.0015*np.sqrt(2)/2; # desired element size
fiberAngle=45*np.pi/180
specLenX=2.75
blockLen=0.25
specLenYRatio=0.37
thkPly=0.0075
thkInt=0.0012
thkCzm=0.0
specLenZ0=0
meshAspectRatio=1
# Non zero start point
x0=0.0
x1=blockLen
x2=x1+specLenX
x3=x2+blockLen
y0=0
yl=y0+specLenX*specLenYRatio
z0=0
# Delete prior files
os.remove('EleSetFile.inp')
os.remove('SecOri.inp')
os.remove('NodeSetFile.inp')
# Derived parameters
elemLenX=np.sqrt(2)*elemLen # desired element length X
numElemX=int(np.ceil(specLenX/elemLenX)+1); #claculate desired element lentght
nodePartTemp=np.linspace(x1,x2,numElemX) # parition based on number of element requested
elemLenX=nodePartTemp[1]-nodePartTemp[0] # actual element lentgth from partitioning
elemLenY=elemLenX*meshAspectRatio; # desired element length Y
numElemY=int(np.ceil(yl/elemLenY)+1);
nodePartTemp=np.linspace(y0,yl,numElemY) # parition based on number of element requested
elemLenY=nodePartTemp[1]-nodePartTemp[0] # actual element lentgth from partitioning
specLenZ1=DefineThk(specLenZ0,plyStack,thkPly,thkInt,thkCzm)
# Shift distance
shiftX=elemLenX*np.cos(fiberAngle)**2;
shiftY=elemLenY*np.cos(fiberAngle)*np.sin(fiberAngle);
# Generate node array
NodeL, elemLenXBL,elemLenYBL=NodeGen2DV90(x0,x1,y0,yl,z0,elemLenX,elemLenY,6,numElemY)
maxNodeNum=np.max(NodeL[:,0])
NodeC=NodeGen2DV45(x1,x2,y0,yl,z0,elemLenX,elemLenY,numElemX,numElemY,shiftX,shiftY)
NodeC[:,0]=NodeC[:,0]+maxNodeNum
maxNodeNum=np.max(NodeC[:,0])
NodeR, elemLenXBR,elemLenYBR=NodeGen2DV90(x2,x3,y0,yl,z0,elemLenX,elemLenY,6,numElemY)
NodeR[:,0]=NodeR[:,0]+maxNodeNum
maxNodeNum=np.max(NodeR[:,0])
# Find boundaries
# <NCorners,NEdgeY0,NEdgeY1,NEdgeX0,NEdgeX1,NBoundary>
NCornersL,NEdgeY0L,NEdgeY1L,NEdgeX0L,NEdgeX1L,NBoundaryL=FindBoundariesV2(NodeL,x0,x1,y0,yl,numElemX,numElemY)
NCorners,NEdgeY0,NEdgeY1,NEdgeX0,NEdgeX1,NBoundary=FindBoundariesV2(NodeC,x1,x2,y0,yl,numElemX,numElemY)
NCornersR,NEdgeY0R,NEdgeY1R,NEdgeX0R,NEdgeX1R,NBoundaryR=FindBoundariesV2(NodeR,x2,x3,y0,yl,numElemX,numElemY)
#
for i in range(0,len(NEdgeX1L)):
NodeC[(NodeC[:,0]==NEdgeX0[i,0]).nonzero()[0][0],0]=NEdgeX1L[i,0]
for i in range(0,len(NEdgeX1)):
NodeR[(NodeR[:,0]==NEdgeX0R[i,0]).nonzero()[0][0],0]=NEdgeX1[i,0]
# Define 2D elements
# <ElemQuadL,ElemPyrdL,maxElemNoL>
ElemQuadL,maxElemNoL=DefineElem2D90(NodeL,elemLenXBL,elemLenYBL)
maxNodeNum=np.max(ElemQuadL[:,0])
ElemQuadC,ElemPyrdC,maxElemNoC=DefineElem2D45(NodeC,NEdgeY0,NEdgeY1,NEdgeX0,NEdgeX1,shiftX,shiftY)
ElemPyrdC[:,0]=ElemPyrdC[:,0]+maxNodeNum
ElemQuadC[:,0]=ElemQuadC[:,0]+maxNodeNum
maxNodeNum=np.max(ElemQuadC[:,0])
ElemQuadR,maxElemNoR=DefineElem2D90(NodeR,elemLenXBR,elemLenYBR)
ElemQuadR[:,0]=ElemQuadR[:,0]+maxNodeNum
maxNodeNum=np.max(ElemQuadR[:,0])
##
#plotElem(ElemQuadL,NodeL)
#plotElem(ElemQuadR,NodeR)
#plotElem(ElemQuadC,NodeC)
#plotElem(ElemPyrdC,NodeC)
# Collect Nodes
Node=NodeL
Node=np.append(Node,NodeC,axis=0)
Node=np.append(Node,NodeR,axis=0)
Tmp,NodeIdx=np.unique(Node[:,0],return_index=True)
Node=Node[NodeIdx]
# Collect elements
# Quad elements
ElemQuad=ElemQuadL
ElemQuad=np.append(ElemQuad,ElemQuadC,axis=0)
ElemQuad=np.append(ElemQuad,ElemQuadR,axis=0)
# Pyramid elements
ElemPyrd=ElemPyrdC
# Find boundaries
NCorners,NEdgeY0,NEdgeY1,NEdgeX0,NEdgeX1,NBoundary=FindBoundaries(Node,x3,yl,elemLenX,elemLenY)
# Generate 3D nodes using thickness sweep
Node3D,jmpNode=NodeGen3D(Node,specLenZ1)
# Find boundaries
NCorners3D,NEdgeY03D,NEdgeY13D,NEdgeX03D,NEdgeX13D,NBoundary3D=FindBoundariesV2(Node3D,x0,x3,y0,yl,numElemX,numElemY)
writeNodeSetV2(NEdgeX03D[:,0],'X0')
writeNodeSetV2(NEdgeX13D[:,0],'X1')
writeNodeSetV2(NEdgeY03D[:,0],'Y0')
writeNodeSetV2(NEdgeY13D[:,0],'Y1')
writeNodeSetV2(NCorners3D[:,0],'Crns')
writeNodeSetV2(NEdgeX0L[:,0],'X0BtmEdge')
# Define 3D elements
ElemPyrd3DPly,ElemQuad3DPly,ElemPyrd3DInt,ElemQuad3DInt,ElemPyrd3DCzm,ElemQuad3DCzm,ElemSetPly,ElemSetInt,ElemSetCzm=DefineElem3D(ElemQuad,ElemPyrd,jmpNode,specLenZ1,plyStack)
## Write data to csv file.
np.savetxt("Node3D.csv", Node3D, delimiter=",", fmt=('%1.2i','%1.6f','%1.6f','%1.6f'))
#
np.savetxt("ElemPyrd3DPly.csv", ElemPyrd3DPly, delimiter=",", fmt='%1.2i')
np.savetxt("ElemQuad3DPly.csv", ElemQuad3DPly, delimiter=",", fmt='%1.2i')
#if min(plyStack)==-1:
np.savetxt("ElemPyrd3DInt.csv", ElemPyrd3DInt, delimiter=",", fmt='%1.2i')
np.savetxt("ElemQuad3DInt.csv", ElemQuad3DInt, delimiter=",", fmt='%1.2i')
#if min(plyStack)==-1:
np.savetxt("ElemPyrd3DCzm.csv", ElemPyrd3DCzm, delimiter=",", fmt='%1.2i')
np.savetxt("ElemQuad3DCzm.csv", ElemQuad3DCzm, delimiter=",", fmt='%1.2i')
#
## Print stats
#print('Total nodes: ',max(Node3D[:,0]))
#print('Total pyramid elements - Ply: ',max(ElemPyrd3DPly[:,0]))
#print('Total quad elements - Ply: ',max(ElemQuad3DPly[:,0]))
##if min(plyStack)==-1:
#print('Total pyramid elements - Interface: ',max(ElemPyrd3DInt[:,0]))
#print('Total quad elements - Interface: ',max(ElemQuad3DInt[:,0]))
##if min(plyStack)==-1:
#print('Total pyramid elements - Czm: ',max(ElemPyrd3DCzm[:,0]))
#print('Total quad elements - Czm: ',max(ElemQuad3DCzm[:,0]))
#print('Total elements: ',max(ElemPyrd3DPly[:,0])+max(ElemQuad3DPly[:,0]))
| [
"numpy.ceil",
"numpy.sqrt",
"numpy.unique",
"scipy.spatial.cKDTree",
"numpy.ones",
"csv.writer",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.append",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.savetxt",
"numpy.sin",
"numpy.shape",
"numpy.arange",
"os.remove"
] | [((22006, 22033), 'os.remove', 'os.remove', (['"""EleSetFile.inp"""'], {}), "('EleSetFile.inp')\n", (22015, 22033), False, 'import os\n'), ((22035, 22058), 'os.remove', 'os.remove', (['"""SecOri.inp"""'], {}), "('SecOri.inp')\n", (22044, 22058), False, 'import os\n'), ((22060, 22088), 'os.remove', 'os.remove', (['"""NodeSetFile.inp"""'], {}), "('NodeSetFile.inp')\n", (22069, 22088), False, 'import os\n'), ((22263, 22292), 'numpy.linspace', 'np.linspace', (['x1', 'x2', 'numElemX'], {}), '(x1, x2, numElemX)\n', (22274, 22292), True, 'import numpy as np\n'), ((22542, 22571), 'numpy.linspace', 'np.linspace', (['y0', 'yl', 'numElemY'], {}), '(y0, yl, numElemY)\n', (22553, 22571), True, 'import numpy as np\n'), ((23006, 23025), 'numpy.max', 'np.max', (['NodeL[:, 0]'], {}), '(NodeL[:, 0])\n', (23012, 23025), True, 'import numpy as np\n'), ((23157, 23176), 'numpy.max', 'np.max', (['NodeC[:, 0]'], {}), '(NodeC[:, 0])\n', (23163, 23176), True, 'import numpy as np\n'), ((23310, 23329), 'numpy.max', 'np.max', (['NodeR[:, 0]'], {}), '(NodeR[:, 0])\n', (23316, 23329), True, 'import numpy as np\n'), ((24086, 24109), 'numpy.max', 'np.max', (['ElemQuadL[:, 0]'], {}), '(ElemQuadL[:, 0])\n', (24092, 24109), True, 'import numpy as np\n'), ((24307, 24330), 'numpy.max', 'np.max', (['ElemQuadC[:, 0]'], {}), '(ElemQuadC[:, 0])\n', (24313, 24330), True, 'import numpy as np\n'), ((24452, 24475), 'numpy.max', 'np.max', (['ElemQuadR[:, 0]'], {}), '(ElemQuadR[:, 0])\n', (24458, 24475), True, 'import numpy as np\n'), ((24630, 24660), 'numpy.append', 'np.append', (['Node', 'NodeC'], {'axis': '(0)'}), '(Node, NodeC, axis=0)\n', (24639, 24660), True, 'import numpy as np\n'), ((24665, 24695), 'numpy.append', 'np.append', (['Node', 'NodeR'], {'axis': '(0)'}), '(Node, NodeR, axis=0)\n', (24674, 24695), True, 'import numpy as np\n'), ((24707, 24747), 'numpy.unique', 'np.unique', (['Node[:, 0]'], {'return_index': '(True)'}), '(Node[:, 0], return_index=True)\n', (24716, 24747), True, 'import numpy as np\n'), ((24835, 24873), 'numpy.append', 'np.append', (['ElemQuad', 'ElemQuadC'], {'axis': '(0)'}), '(ElemQuad, ElemQuadC, axis=0)\n', (24844, 24873), True, 'import numpy as np\n'), ((24882, 24920), 'numpy.append', 'np.append', (['ElemQuad', 'ElemQuadR'], {'axis': '(0)'}), '(ElemQuad, ElemQuadR, axis=0)\n', (24891, 24920), True, 'import numpy as np\n'), ((25769, 25862), 'numpy.savetxt', 'np.savetxt', (['"""Node3D.csv"""', 'Node3D'], {'delimiter': '""","""', 'fmt': "('%1.2i', '%1.6f', '%1.6f', '%1.6f')"}), "('Node3D.csv', Node3D, delimiter=',', fmt=('%1.2i', '%1.6f',\n '%1.6f', '%1.6f'))\n", (25779, 25862), True, 'import numpy as np\n'), ((25860, 25934), 'numpy.savetxt', 'np.savetxt', (['"""ElemPyrd3DPly.csv"""', 'ElemPyrd3DPly'], {'delimiter': '""","""', 'fmt': '"""%1.2i"""'}), "('ElemPyrd3DPly.csv', ElemPyrd3DPly, delimiter=',', fmt='%1.2i')\n", (25870, 25934), True, 'import numpy as np\n'), ((25936, 26010), 'numpy.savetxt', 'np.savetxt', (['"""ElemQuad3DPly.csv"""', 'ElemQuad3DPly'], {'delimiter': '""","""', 'fmt': '"""%1.2i"""'}), "('ElemQuad3DPly.csv', ElemQuad3DPly, delimiter=',', fmt='%1.2i')\n", (25946, 26010), True, 'import numpy as np\n'), ((26036, 26110), 'numpy.savetxt', 'np.savetxt', (['"""ElemPyrd3DInt.csv"""', 'ElemPyrd3DInt'], {'delimiter': '""","""', 'fmt': '"""%1.2i"""'}), "('ElemPyrd3DInt.csv', ElemPyrd3DInt, delimiter=',', fmt='%1.2i')\n", (26046, 26110), True, 'import numpy as np\n'), ((26112, 26186), 'numpy.savetxt', 'np.savetxt', (['"""ElemQuad3DInt.csv"""', 'ElemQuad3DInt'], {'delimiter': '""","""', 'fmt': '"""%1.2i"""'}), "('ElemQuad3DInt.csv', ElemQuad3DInt, delimiter=',', fmt='%1.2i')\n", (26122, 26186), True, 'import numpy as np\n'), ((26212, 26286), 'numpy.savetxt', 'np.savetxt', (['"""ElemPyrd3DCzm.csv"""', 'ElemPyrd3DCzm'], {'delimiter': '""","""', 'fmt': '"""%1.2i"""'}), "('ElemPyrd3DCzm.csv', ElemPyrd3DCzm, delimiter=',', fmt='%1.2i')\n", (26222, 26286), True, 'import numpy as np\n'), ((26288, 26362), 'numpy.savetxt', 'np.savetxt', (['"""ElemQuad3DCzm.csv"""', 'ElemQuad3DCzm'], {'delimiter': '""","""', 'fmt': '"""%1.2i"""'}), "('ElemQuad3DCzm.csv', ElemQuad3DCzm, delimiter=',', fmt='%1.2i')\n", (26298, 26362), True, 'import numpy as np\n'), ((291, 320), 'numpy.linspace', 'np.linspace', (['x0', 'xl', 'numElemX'], {}), '(x0, xl, numElemX)\n', (302, 320), True, 'import numpy as np\n'), ((423, 474), 'numpy.linspace', 'np.linspace', (['(x0 + shiftX)', '(xl - shiftX)', '(numElemX - 1)'], {}), '(x0 + shiftX, xl - shiftX, numElemX - 1)\n', (434, 474), True, 'import numpy as np\n'), ((578, 602), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0]])\n', (586, 602), True, 'import numpy as np\n'), ((1294, 1323), 'numpy.linspace', 'np.linspace', (['x0', 'xl', 'numElemX'], {}), '(x0, xl, numElemX)\n', (1305, 1323), True, 'import numpy as np\n'), ((1460, 1484), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0]])\n', (1468, 1484), True, 'import numpy as np\n'), ((2624, 2644), 'numpy.array', 'np.array', (['[[x0, y0]]'], {}), '([[x0, y0]])\n', (2632, 2644), True, 'import numpy as np\n'), ((2721, 2750), 'numpy.linspace', 'np.linspace', (['x0', 'xl', 'numElemX'], {}), '(x0, xl, numElemX)\n', (2732, 2750), True, 'import numpy as np\n'), ((2913, 2942), 'numpy.linspace', 'np.linspace', (['x0', 'xl', 'numElemX'], {}), '(x0, xl, numElemX)\n', (2924, 2942), True, 'import numpy as np\n'), ((3106, 3135), 'numpy.linspace', 'np.linspace', (['y0', 'yl', 'numElemY'], {}), '(y0, yl, numElemY)\n', (3117, 3135), True, 'import numpy as np\n'), ((3301, 3330), 'numpy.linspace', 'np.linspace', (['y0', 'yl', 'numElemY'], {}), '(y0, yl, numElemY)\n', (3312, 3330), True, 'import numpy as np\n'), ((3477, 3516), 'numpy.append', 'np.append', (['NBtmEdge', 'NRightEdge'], {'axis': '(0)'}), '(NBtmEdge, NRightEdge, axis=0)\n', (3486, 3516), True, 'import numpy as np\n'), ((3530, 3568), 'numpy.append', 'np.append', (['NBoundary', 'NTopEdge'], {'axis': '(0)'}), '(NBoundary, NTopEdge, axis=0)\n', (3539, 3568), True, 'import numpy as np\n'), ((3582, 3621), 'numpy.append', 'np.append', (['NBoundary', 'NLeftEdge'], {'axis': '(0)'}), '(NBoundary, NLeftEdge, axis=0)\n', (3591, 3621), True, 'import numpy as np\n'), ((3790, 3860), 'numpy.array', 'np.array', (['[[0, 0], [specLenX, 0], [0, specLenY], [specLenX, specLenY]]'], {}), '([[0, 0], [specLenX, 0], [0, specLenY], [specLenX, specLenY]])\n', (3798, 3860), True, 'import numpy as np\n'), ((3931, 3963), 'numpy.arange', 'np.arange', (['(0)', 'specLenX', 'elemLenX'], {}), '(0, specLenX, elemLenX)\n', (3940, 3963), True, 'import numpy as np\n'), ((4125, 4157), 'numpy.arange', 'np.arange', (['(0)', 'specLenX', 'elemLenX'], {}), '(0, specLenX, elemLenX)\n', (4134, 4157), True, 'import numpy as np\n'), ((4327, 4359), 'numpy.arange', 'np.arange', (['(0)', 'specLenY', 'elemLenY'], {}), '(0, specLenY, elemLenY)\n', (4336, 4359), True, 'import numpy as np\n'), ((4524, 4556), 'numpy.arange', 'np.arange', (['(0)', 'specLenY', 'elemLenY'], {}), '(0, specLenY, elemLenY)\n', (4533, 4556), True, 'import numpy as np\n'), ((4709, 4748), 'numpy.append', 'np.append', (['NBtmEdge', 'NRightEdge'], {'axis': '(0)'}), '(NBtmEdge, NRightEdge, axis=0)\n', (4718, 4748), True, 'import numpy as np\n'), ((4762, 4800), 'numpy.append', 'np.append', (['NBoundary', 'NTopEdge'], {'axis': '(0)'}), '(NBoundary, NTopEdge, axis=0)\n', (4771, 4800), True, 'import numpy as np\n'), ((4814, 4853), 'numpy.append', 'np.append', (['NBoundary', 'NLeftEdge'], {'axis': '(0)'}), '(NBoundary, NLeftEdge, axis=0)\n', (4823, 4853), True, 'import numpy as np\n'), ((5017, 5046), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['Node[:, 1:3]'], {}), '(Node[:, 1:3])\n', (5032, 5046), False, 'from scipy import spatial\n'), ((5083, 5103), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5091, 5103), True, 'import numpy as np\n'), ((5117, 5144), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0]])\n', (5125, 5144), True, 'import numpy as np\n'), ((5155, 5179), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0]])\n', (5163, 5179), True, 'import numpy as np\n'), ((8606, 8635), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['Node[:, 1:3]'], {}), '(Node[:, 1:3])\n', (8621, 8635), False, 'from scipy import spatial\n'), ((8672, 8692), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8680, 8692), True, 'import numpy as np\n'), ((8706, 8733), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0]])\n', (8714, 8733), True, 'import numpy as np\n'), ((10629, 10664), 'numpy.append', 'np.append', (['ElemPyrd', 'EleTmp'], {'axis': '(1)'}), '(ElemPyrd, EleTmp, axis=1)\n', (10638, 10664), True, 'import numpy as np\n'), ((11206, 11241), 'numpy.append', 'np.append', (['ElemQuad', 'EleTmp'], {'axis': '(1)'}), '(ElemQuad, EleTmp, axis=1)\n', (11215, 11241), True, 'import numpy as np\n'), ((16033, 16054), 'numpy.array', 'np.array', (['[specLenZ0]'], {}), '([specLenZ0])\n', (16041, 16054), True, 'import numpy as np\n'), ((22123, 22133), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (22130, 22133), True, 'import numpy as np\n'), ((22861, 22879), 'numpy.sin', 'np.sin', (['fiberAngle'], {}), '(fiberAngle)\n', (22867, 22879), True, 'import numpy as np\n'), ((2010, 2050), 'numpy.append', 'np.append', (['NCorners', 'NCornersTmp'], {'axis': '(0)'}), '(NCorners, NCornersTmp, axis=0)\n', (2019, 2050), True, 'import numpy as np\n'), ((2812, 2838), 'numpy.array', 'np.array', (['[Xrange, Yrange]'], {}), '([Xrange, Yrange])\n', (2820, 2838), True, 'import numpy as np\n'), ((3004, 3030), 'numpy.array', 'np.array', (['[Xrange, Yrange]'], {}), '([Xrange, Yrange])\n', (3012, 3030), True, 'import numpy as np\n'), ((3197, 3223), 'numpy.array', 'np.array', (['[Xrange, Yrange]'], {}), '([Xrange, Yrange])\n', (3205, 3223), True, 'import numpy as np\n'), ((3392, 3418), 'numpy.array', 'np.array', (['[Xrange, Yrange]'], {}), '([Xrange, Yrange])\n', (3400, 3418), True, 'import numpy as np\n'), ((4024, 4050), 'numpy.array', 'np.array', (['[Xrange, Yrange]'], {}), '([Xrange, Yrange])\n', (4032, 4050), True, 'import numpy as np\n'), ((4225, 4251), 'numpy.array', 'np.array', (['[Xrange, Yrange]'], {}), '([Xrange, Yrange])\n', (4233, 4251), True, 'import numpy as np\n'), ((4420, 4446), 'numpy.array', 'np.array', (['[Xrange, Yrange]'], {}), '([Xrange, Yrange])\n', (4428, 4446), True, 'import numpy as np\n'), ((4624, 4650), 'numpy.array', 'np.array', (['[Xrange, Yrange]'], {}), '([Xrange, Yrange])\n', (4632, 4650), True, 'import numpy as np\n'), ((5622, 5636), 'numpy.unique', 'np.unique', (['idx'], {}), '(idx)\n', (5631, 5636), True, 'import numpy as np\n'), ((6251, 6265), 'numpy.unique', 'np.unique', (['idx'], {}), '(idx)\n', (6260, 6265), True, 'import numpy as np\n'), ((6882, 6896), 'numpy.unique', 'np.unique', (['idx'], {}), '(idx)\n', (6891, 6896), True, 'import numpy as np\n'), ((7511, 7525), 'numpy.unique', 'np.unique', (['idx'], {}), '(idx)\n', (7520, 7525), True, 'import numpy as np\n'), ((8218, 8232), 'numpy.unique', 'np.unique', (['idx'], {}), '(idx)\n', (8227, 8232), True, 'import numpy as np\n'), ((9251, 9265), 'numpy.unique', 'np.unique', (['idx'], {}), '(idx)\n', (9260, 9265), True, 'import numpy as np\n'), ((9927, 9961), 'numpy.append', 'np.append', (['Node3D', 'NodeTmp'], {'axis': '(0)'}), '(Node3D, NodeTmp, axis=0)\n', (9936, 9961), True, 'import numpy as np\n'), ((10761, 10790), 'numpy.shape', 'np.shape', (['ElemPyrd3DPly[0, :]'], {}), '(ElemPyrd3DPly[0, :])\n', (10769, 10790), True, 'import numpy as np\n'), ((10926, 10955), 'numpy.shape', 'np.shape', (['ElemPyrd3DPly[0, :]'], {}), '(ElemPyrd3DPly[0, :])\n', (10934, 10955), True, 'import numpy as np\n'), ((11342, 11371), 'numpy.shape', 'np.shape', (['ElemQuad3DPly[0, :]'], {}), '(ElemQuad3DPly[0, :])\n', (11350, 11371), True, 'import numpy as np\n'), ((11511, 11540), 'numpy.shape', 'np.shape', (['ElemQuad3DPly[0, :]'], {}), '(ElemQuad3DPly[0, :])\n', (11519, 11540), True, 'import numpy as np\n'), ((12182, 12222), 'numpy.append', 'np.append', (['EleTmpNums', 'EleTmpNds'], {'axis': '(1)'}), '(EleTmpNums, EleTmpNds, axis=1)\n', (12191, 12222), True, 'import numpy as np\n'), ((12720, 12755), 'numpy.append', 'np.append', (['ElemSet', 'EleTmpAdd[:, 0]'], {}), '(ElemSet, EleTmpAdd[:, 0])\n', (12729, 12755), True, 'import numpy as np\n'), ((13130, 13170), 'numpy.append', 'np.append', (['EleTmpNums', 'EleTmpNds'], {'axis': '(1)'}), '(EleTmpNums, EleTmpNds, axis=1)\n', (13139, 13170), True, 'import numpy as np\n'), ((13712, 13747), 'numpy.append', 'np.append', (['ElemSet', 'EleTmpAdd[:, 0]'], {}), '(ElemSet, EleTmpAdd[:, 0])\n', (13721, 13747), True, 'import numpy as np\n'), ((16302, 16327), 'numpy.append', 'np.append', (['specLenZ1', 'thk'], {}), '(specLenZ1, thk)\n', (16311, 16327), True, 'import numpy as np\n'), ((17181, 17194), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (17191, 17194), False, 'import csv\n'), ((17846, 17859), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (17856, 17859), False, 'import csv\n'), ((22183, 22211), 'numpy.ceil', 'np.ceil', (['(specLenX / elemLenX)'], {}), '(specLenX / elemLenX)\n', (22190, 22211), True, 'import numpy as np\n'), ((22503, 22525), 'numpy.ceil', 'np.ceil', (['(yl / elemLenY)'], {}), '(yl / elemLenY)\n', (22510, 22525), True, 'import numpy as np\n'), ((22802, 22820), 'numpy.cos', 'np.cos', (['fiberAngle'], {}), '(fiberAngle)\n', (22808, 22820), True, 'import numpy as np\n'), ((22842, 22860), 'numpy.cos', 'np.cos', (['fiberAngle'], {}), '(fiberAngle)\n', (22848, 22860), True, 'import numpy as np\n'), ((344, 360), 'numpy.shape', 'np.shape', (['nodeX1'], {}), '(nodeX1)\n', (352, 360), True, 'import numpy as np\n'), ((1347, 1363), 'numpy.shape', 'np.shape', (['nodeX1'], {}), '(nodeX1)\n', (1355, 1363), True, 'import numpy as np\n'), ((2769, 2785), 'numpy.shape', 'np.shape', (['Xrange'], {}), '(Xrange)\n', (2777, 2785), True, 'import numpy as np\n'), ((2961, 2977), 'numpy.shape', 'np.shape', (['Xrange'], {}), '(Xrange)\n', (2969, 2977), True, 'import numpy as np\n'), ((3154, 3170), 'numpy.shape', 'np.shape', (['Yrange'], {}), '(Yrange)\n', (3162, 3170), True, 'import numpy as np\n'), ((3349, 3365), 'numpy.shape', 'np.shape', (['Yrange'], {}), '(Yrange)\n', (3357, 3365), True, 'import numpy as np\n'), ((3982, 3998), 'numpy.shape', 'np.shape', (['Xrange'], {}), '(Xrange)\n', (3990, 3998), True, 'import numpy as np\n'), ((4176, 4192), 'numpy.shape', 'np.shape', (['Xrange'], {}), '(Xrange)\n', (4184, 4192), True, 'import numpy as np\n'), ((4378, 4394), 'numpy.shape', 'np.shape', (['Yrange'], {}), '(Yrange)\n', (4386, 4394), True, 'import numpy as np\n'), ((4575, 4591), 'numpy.shape', 'np.shape', (['Yrange'], {}), '(Yrange)\n', (4583, 4591), True, 'import numpy as np\n'), ((5250, 5265), 'numpy.ones', 'np.ones', (['[1, 3]'], {}), '([1, 3])\n', (5257, 5265), True, 'import numpy as np\n'), ((5687, 5784), 'numpy.append', 'np.append', (['ElemPyrd', '[[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[2], 0]]]'], {'axis': '(0)'}), '(ElemPyrd, [[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[\n 2], 0]]], axis=0)\n', (5696, 5784), True, 'import numpy as np\n'), ((5875, 5890), 'numpy.ones', 'np.ones', (['[1, 3]'], {}), '([1, 3])\n', (5882, 5890), True, 'import numpy as np\n'), ((6316, 6413), 'numpy.append', 'np.append', (['ElemPyrd', '[[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[2], 0]]]'], {'axis': '(0)'}), '(ElemPyrd, [[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[\n 2], 0]]], axis=0)\n', (6325, 6413), True, 'import numpy as np\n'), ((6505, 6520), 'numpy.ones', 'np.ones', (['[1, 3]'], {}), '([1, 3])\n', (6512, 6520), True, 'import numpy as np\n'), ((6947, 7044), 'numpy.append', 'np.append', (['ElemPyrd', '[[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[2], 0]]]'], {'axis': '(0)'}), '(ElemPyrd, [[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[\n 2], 0]]], axis=0)\n', (6956, 7044), True, 'import numpy as np\n'), ((7137, 7152), 'numpy.ones', 'np.ones', (['[1, 3]'], {}), '([1, 3])\n', (7144, 7152), True, 'import numpy as np\n'), ((7576, 7673), 'numpy.append', 'np.append', (['ElemPyrd', '[[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[2], 0]]]'], {'axis': '(0)'}), '(ElemPyrd, [[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[\n 2], 0]]], axis=0)\n', (7585, 7673), True, 'import numpy as np\n'), ((7757, 7772), 'numpy.ones', 'np.ones', (['[1, 4]'], {}), '([1, 4])\n', (7764, 7772), True, 'import numpy as np\n'), ((8283, 8397), 'numpy.append', 'np.append', (['ElemQuad', '[[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[2], 0], Node[idx[3], 0]]\n ]'], {'axis': '(0)'}), '(ElemQuad, [[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[\n 2], 0], Node[idx[3], 0]]], axis=0)\n', (8292, 8397), True, 'import numpy as np\n'), ((8799, 8814), 'numpy.ones', 'np.ones', (['[1, 4]'], {}), '([1, 4])\n', (8806, 8814), True, 'import numpy as np\n'), ((9318, 9432), 'numpy.append', 'np.append', (['ElemQuad', '[[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[2], 0], Node[idx[3], 0]]\n ]'], {'axis': '(0)'}), '(ElemQuad, [[eleCount, Node[idx[0], 0], Node[idx[1], 0], Node[idx[\n 2], 0], Node[idx[3], 0]]], axis=0)\n', (9327, 9432), True, 'import numpy as np\n'), ((9757, 9771), 'numpy.shape', 'np.shape', (['Node'], {}), '(Node)\n', (9765, 9771), True, 'import numpy as np\n'), ((12279, 12322), 'numpy.append', 'np.append', (['ElemPyrd3DInt', 'EleTmpAdd'], {'axis': '(0)'}), '(ElemPyrd3DInt, EleTmpAdd, axis=0)\n', (12288, 12322), True, 'import numpy as np\n'), ((12345, 12387), 'numpy.append', 'np.append', (['ElemSetInt', 'ElemPyrd3DInt[:, 0]'], {}), '(ElemSetInt, ElemPyrd3DInt[:, 0])\n', (12354, 12387), True, 'import numpy as np\n'), ((13227, 13270), 'numpy.append', 'np.append', (['ElemQuad3DInt', 'EleTmpAdd'], {'axis': '(0)'}), '(ElemQuad3DInt, EleTmpAdd, axis=0)\n', (13236, 13270), True, 'import numpy as np\n'), ((13293, 13335), 'numpy.append', 'np.append', (['ElemSetInt', 'ElemQuad3DInt[:, 0]'], {}), '(ElemSetInt, ElemQuad3DInt[:, 0])\n', (13302, 13335), True, 'import numpy as np\n'), ((17421, 17434), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (17431, 17434), False, 'import csv\n'), ((18087, 18100), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (18097, 18100), False, 'import csv\n'), ((20828, 20883), 'numpy.append', 'np.append', (['x', 'Node[Node[:, 0] == Elem[i, k], 1]'], {'axis': '(0)'}), '(x, Node[Node[:, 0] == Elem[i, k], 1], axis=0)\n', (20837, 20883), True, 'import numpy as np\n'), ((20893, 20948), 'numpy.append', 'np.append', (['y', 'Node[Node[:, 0] == Elem[i, k], 2]'], {'axis': '(0)'}), '(y, Node[Node[:, 0] == Elem[i, k], 2], axis=0)\n', (20902, 20948), True, 'import numpy as np\n'), ((21004, 21045), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[0], x[1]]', '[y[0], y[1]]', '"""r"""'], {}), "([x[0], x[1]], [y[0], y[1]], 'r')\n", (21012, 21045), True, 'import matplotlib.pyplot as plt\n'), ((21055, 21096), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[1], x[2]]', '[y[1], y[2]]', '"""g"""'], {}), "([x[1], x[2]], [y[1], y[2]], 'g')\n", (21063, 21096), True, 'import matplotlib.pyplot as plt\n'), ((21106, 21147), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[2], x[0]]', '[y[2], y[0]]', '"""k"""'], {}), "([x[2], x[0]], [y[2], y[0]], 'k')\n", (21114, 21147), True, 'import matplotlib.pyplot as plt\n'), ((21184, 21225), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[0], x[1]]', '[y[0], y[1]]', '"""r"""'], {}), "([x[0], x[1]], [y[0], y[1]], 'r')\n", (21192, 21225), True, 'import matplotlib.pyplot as plt\n'), ((21235, 21276), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[1], x[2]]', '[y[1], y[2]]', '"""g"""'], {}), "([x[1], x[2]], [y[1], y[2]], 'g')\n", (21243, 21276), True, 'import matplotlib.pyplot as plt\n'), ((21286, 21327), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[2], x[3]]', '[y[2], y[3]]', '"""k"""'], {}), "([x[2], x[3]], [y[2], y[3]], 'k')\n", (21294, 21327), True, 'import matplotlib.pyplot as plt\n'), ((21337, 21378), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[3], x[0]]', '[y[3], y[0]]', '"""b"""'], {}), "([x[3], x[0]], [y[3], y[0]], 'b')\n", (21345, 21378), True, 'import matplotlib.pyplot as plt\n'), ((492, 508), 'numpy.shape', 'np.shape', (['nodeX2'], {}), '(nodeX2)\n', (500, 508), True, 'import numpy as np\n'), ((12446, 12489), 'numpy.append', 'np.append', (['ElemPyrd3DCzm', 'EleTmpAdd'], {'axis': '(0)'}), '(ElemPyrd3DCzm, EleTmpAdd, axis=0)\n', (12455, 12489), True, 'import numpy as np\n'), ((12513, 12555), 'numpy.append', 'np.append', (['ElemSetCzm', 'ElemPyrd3DCzm[:, 0]'], {}), '(ElemSetCzm, ElemPyrd3DCzm[:, 0])\n', (12522, 12555), True, 'import numpy as np\n'), ((12596, 12639), 'numpy.append', 'np.append', (['ElemPyrd3DPly', 'EleTmpAdd'], {'axis': '(0)'}), '(ElemPyrd3DPly, EleTmpAdd, axis=0)\n', (12605, 12639), True, 'import numpy as np\n'), ((12662, 12704), 'numpy.append', 'np.append', (['ElemSetPly', 'ElemPyrd3DPly[:, 0]'], {}), '(ElemSetPly, ElemPyrd3DPly[:, 0])\n', (12671, 12704), True, 'import numpy as np\n'), ((13406, 13449), 'numpy.append', 'np.append', (['ElemQuad3DCzm', 'EleTmpAdd'], {'axis': '(0)'}), '(ElemQuad3DCzm, EleTmpAdd, axis=0)\n', (13415, 13449), True, 'import numpy as np\n'), ((13472, 13514), 'numpy.append', 'np.append', (['ElemSetCzm', 'ElemQuad3DCzm[:, 0]'], {}), '(ElemSetCzm, ElemQuad3DCzm[:, 0])\n', (13481, 13514), True, 'import numpy as np\n'), ((13576, 13619), 'numpy.append', 'np.append', (['ElemQuad3DPly', 'EleTmpAdd'], {'axis': '(0)'}), '(ElemQuad3DPly, EleTmpAdd, axis=0)\n', (13585, 13619), True, 'import numpy as np\n'), ((13642, 13684), 'numpy.append', 'np.append', (['ElemSetPly', 'ElemQuad3DPly[:, 0]'], {}), '(ElemSetPly, ElemQuad3DPly[:, 0])\n', (13651, 13684), True, 'import numpy as np\n'), ((9813, 9833), 'numpy.shape', 'np.shape', (['Node[:, 0]'], {}), '(Node[:, 0])\n', (9821, 9833), True, 'import numpy as np\n'), ((12064, 12090), 'numpy.shape', 'np.shape', (['ElemPyrd3D[:, 0]'], {}), '(ElemPyrd3D[:, 0])\n', (12072, 12090), True, 'import numpy as np\n'), ((13012, 13038), 'numpy.shape', 'np.shape', (['ElemQuad3D[:, 0]'], {}), '(ElemQuad3D[:, 0])\n', (13020, 13038), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import choice
# grayscale images
fashion_mnist = keras.datasets.fashion_mnist # load dataset
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() # split into testing and training
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# Data Preprocessing
# Data should be in between 0 and 1
train_images = train_images / 255.0
test_images = test_images / 255.0
# The model The neural network Sequential -> sequential neural network (not recurrent or convolutional)
# input layer: Flatten -> Take shape 28 x 28 (matrix) and flatten it into 784 pixels
# hidden layer: Dense layer (all neurons in the prev. layer are connected to the ones in this), 128 neurons,
# relu as activation function
# output layer: Dense layer, 10 output neurons, softmax as the activation function 10 output neurons because there are
# 10 classes to detect.
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)), # input layer (1)
keras.layers.Dense(128, activation='relu'), # hidden layer (2)
keras.layers.Dense(10, activation='softmax') # output layer (3)
])
# Architecture is defined
# Compile the model
# Optimizer: adam does the gradient descent etc.
# Loss function
# Metrics: we want to see the accuracy
# These are the hyper parameters
# The amount of neurons etc are also hyper parameters.
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Training the model
# Epochs is another hyper parameter
model.fit(train_images, train_labels, epochs=10)
print('')
# Test the model on the testing data
test_loss, test_accuracy = model.evaluate(test_images, test_labels, verbose=1)
print('Test accuracy:', test_accuracy)
# Make predictions
predictions = model.predict(test_images)
print(class_names[np.argmax(predictions[0])])
plt.figure()
plt.imshow(test_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
choice.do_it(test_images=test_images, test_labels=test_labels, model=model)
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.grid",
"choice.do_it",
"matplotlib.pyplot.colorbar",
"numpy.argmax",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((1980, 1992), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1990, 1992), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2019), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_images[0]'], {}), '(test_images[0])\n', (2003, 2019), True, 'import matplotlib.pyplot as plt\n'), ((2020, 2034), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2032, 2034), True, 'import matplotlib.pyplot as plt\n'), ((2035, 2050), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2043, 2050), True, 'import matplotlib.pyplot as plt\n'), ((2051, 2061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2059, 2061), True, 'import matplotlib.pyplot as plt\n'), ((2063, 2138), 'choice.do_it', 'choice.do_it', ([], {'test_images': 'test_images', 'test_labels': 'test_labels', 'model': 'model'}), '(test_images=test_images, test_labels=test_labels, model=model)\n', (2075, 2138), False, 'import choice\n'), ((1952, 1977), 'numpy.argmax', 'np.argmax', (['predictions[0]'], {}), '(predictions[0])\n', (1961, 1977), True, 'import numpy as np\n')] |
import cv2
import numpy
import math
from .cluster import Clusters, Cluster
CV_HARRIS_CORNER_THRESHOLD = 10e-03
MIN_CORNER_CLUSTER = 1
def init_harris_corners_and_cluster(monochrome_pil_img, polar_side_maximums, polar_side_minimums, origin):
harris_img = cv2.cornerHarris(numpy.array(monochrome_pil_img), 3, 3, 0.04)
harris_corners = []
for x in range(0, harris_img.shape[0]):
for y in range(0, harris_img.shape[1]):
if harris_img[x,y] > CV_HARRIS_CORNER_THRESHOLD:
harris_corners.append((x,y))
maxes_and_mins = list(polar_side_maximums)
for i in range(0, len(polar_side_minimums)):
maxes_and_mins.append(polar_side_minimums[i])
for i in range(0, len(maxes_and_mins)):
radius = maxes_and_mins[i][1]
angle = maxes_and_mins[i][0]
dx = int(radius * math.cos(angle))
dy = int(radius * math.sin(angle))
pixel = (origin[0] + dx, origin[1] - dy)
maxes_and_mins[i] = Cluster(pixel)
clusters = Clusters(harris_corners, maxes_and_mins)
clusters.fit_data_to_clusters(1, 0)
#remove_clusters_with_corners_under_threshold
i = 0
while i < len(clusters):
if len(clusters[i]) <= MIN_CORNER_CLUSTER:
del clusters[i]
else:
i += 1
return len(clusters)
| [
"math.cos",
"numpy.array",
"math.sin"
] | [((277, 308), 'numpy.array', 'numpy.array', (['monochrome_pil_img'], {}), '(monochrome_pil_img)\n', (288, 308), False, 'import numpy\n'), ((839, 854), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (847, 854), False, 'import math\n'), ((882, 897), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (890, 897), False, 'import math\n')] |
import numpy as np
from nose.plugins.attrib import attr
from cStringIO import StringIO
from nose.tools import raises
from microscopes.lda import utils
def test_docs_from_document_term_matrix():
dtm = [[2, 1], [3, 2]]
docs = [[0, 0, 1], [0, 0, 0, 1, 1]]
assert utils.docs_from_document_term_matrix(dtm) == docs
def test_docs_from_document_term_matrix_with_vocab():
dtm = [[2, 1], [3, 2]]
docs = [['cat', 'cat', 2], ['cat', 'cat', 'cat', 2, 2]]
gen_docs = utils.docs_from_document_term_matrix(dtm, vocab=['cat', 2])
assert gen_docs == docs
def test_docs_from_dtm_with_gaps():
dtm = [[2, 0, 1], [1, 1, 1]]
docs = [[0, 0, 2], [0, 1, 2]]
assert utils.docs_from_document_term_matrix(dtm) == docs
def test_docs_from_numpy_dtp():
dtm = np.array([[2, 1], [3, 2]])
docs = [[0, 0, 1], [0, 0, 0, 1, 1]]
assert utils.docs_from_document_term_matrix(dtm) == docs
def test_docs_from_ldac_simple():
stream = StringIO()
stream.write("2 0:2 1:1\n2 0:3 1:2")
stream.seek(0) # rewind stream
docs = [[0, 0, 1], [0, 0, 0, 1, 1]]
assert utils.docs_from_ldac(stream) == docs
stream = StringIO()
stream.write("2 1:1 0:2\n3 2:1 0:3 1:1")
stream.seek(0) # rewind stream
docs = [[1, 0, 0], [2, 0, 0, 0, 1]]
assert utils.docs_from_ldac(stream) == docs
@raises(AssertionError)
def test_bad_ldac_data():
stream = StringIO()
stream.write("2 0:1")
stream.seek(0) # rewind stream
utils.docs_from_ldac(stream)
def test_num_terms():
docs = [[0, 1, 2], [1, 2, 3]]
assert utils.num_terms(docs) == 4
def test_row_major_form_conversion():
l = [[1, 2, 3, 4], [1, 2, 3], [1, 2, 3, 4, 5, 6]]
rmf = utils.ragged_array_to_row_major_form(l)
assert utils.row_major_form_to_ragged_array(*rmf) == l
| [
"microscopes.lda.utils.num_terms",
"cStringIO.StringIO",
"microscopes.lda.utils.docs_from_ldac",
"microscopes.lda.utils.ragged_array_to_row_major_form",
"microscopes.lda.utils.docs_from_document_term_matrix",
"numpy.array",
"nose.tools.raises",
"microscopes.lda.utils.row_major_form_to_ragged_array"
] | [((1329, 1351), 'nose.tools.raises', 'raises', (['AssertionError'], {}), '(AssertionError)\n', (1335, 1351), False, 'from nose.tools import raises\n'), ((483, 542), 'microscopes.lda.utils.docs_from_document_term_matrix', 'utils.docs_from_document_term_matrix', (['dtm'], {'vocab': "['cat', 2]"}), "(dtm, vocab=['cat', 2])\n", (519, 542), False, 'from microscopes.lda import utils\n'), ((781, 807), 'numpy.array', 'np.array', (['[[2, 1], [3, 2]]'], {}), '([[2, 1], [3, 2]])\n', (789, 807), True, 'import numpy as np\n'), ((958, 968), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (966, 968), False, 'from cStringIO import StringIO\n'), ((1147, 1157), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (1155, 1157), False, 'from cStringIO import StringIO\n'), ((1391, 1401), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (1399, 1401), False, 'from cStringIO import StringIO\n'), ((1467, 1495), 'microscopes.lda.utils.docs_from_ldac', 'utils.docs_from_ldac', (['stream'], {}), '(stream)\n', (1487, 1495), False, 'from microscopes.lda import utils\n'), ((1696, 1735), 'microscopes.lda.utils.ragged_array_to_row_major_form', 'utils.ragged_array_to_row_major_form', (['l'], {}), '(l)\n', (1732, 1735), False, 'from microscopes.lda import utils\n'), ((275, 316), 'microscopes.lda.utils.docs_from_document_term_matrix', 'utils.docs_from_document_term_matrix', (['dtm'], {}), '(dtm)\n', (311, 316), False, 'from microscopes.lda import utils\n'), ((687, 728), 'microscopes.lda.utils.docs_from_document_term_matrix', 'utils.docs_from_document_term_matrix', (['dtm'], {}), '(dtm)\n', (723, 728), False, 'from microscopes.lda import utils\n'), ((859, 900), 'microscopes.lda.utils.docs_from_document_term_matrix', 'utils.docs_from_document_term_matrix', (['dtm'], {}), '(dtm)\n', (895, 900), False, 'from microscopes.lda import utils\n'), ((1096, 1124), 'microscopes.lda.utils.docs_from_ldac', 'utils.docs_from_ldac', (['stream'], {}), '(stream)\n', (1116, 1124), False, 'from microscopes.lda import utils\n'), ((1289, 1317), 'microscopes.lda.utils.docs_from_ldac', 'utils.docs_from_ldac', (['stream'], {}), '(stream)\n', (1309, 1317), False, 'from microscopes.lda import utils\n'), ((1565, 1586), 'microscopes.lda.utils.num_terms', 'utils.num_terms', (['docs'], {}), '(docs)\n', (1580, 1586), False, 'from microscopes.lda import utils\n'), ((1747, 1789), 'microscopes.lda.utils.row_major_form_to_ragged_array', 'utils.row_major_form_to_ragged_array', (['*rmf'], {}), '(*rmf)\n', (1783, 1789), False, 'from microscopes.lda import utils\n')] |
"""
Process the raw data to generate the tables we will use to..
implement the estimation.
"""
import numpy as np
import pandas as pd
from bld.project_paths import project_paths_join as ppj
# Prepare the original dataset.
raw_dict = {}
for dataset in 'pop_9314', 'pop_1517', 'gross_claims', 'gross_premiums', \
'hhl_finworth', 'cpi_oecd', 'paper_table':
raw_dict[dataset] = pd.read_csv(ppj('IN_DATA', '{}.csv'.format(dataset)))
earliest = min(np.unique(raw_dict['pop_9314']['TIME'].values))
before_miss_year = 2002
# Process the population data.
def pop_data():
"""
Select the population data which we will use to calculate the per capita..
based values.
"""
pop_raw1 = raw_dict['pop_9314']
pop_raw2 = raw_dict['pop_1517']
pop_filoc = pop_raw1[pop_raw1['LOCATION'].str.contains('USA')]
pop_filsb = pop_filoc[
(pop_filoc['Subject'].str.contains('Population mid-year estimates Total')) &
(pop_filoc['Subject'].str.contains('growth') == False)
].reset_index(drop=True)
pop_tv = pop_filsb[['TIME', 'Value']]
pop_tv['Value'] = pop_tv['Value'].apply(lambda x: x * 1000)
pop_9314 = pop_tv.rename(columns={'TIME': 'Year', 'Value': 'Population'})
pop_raw2['Value'] = pop_raw2['Value'].apply(lambda x: x * 1000)
pop_1517 = pop_raw2.rename(columns={'TIME': 'Year', 'Value': 'Population'})
final_pop = pd.merge(pop_9314, pop_1517, how='outer')
return final_pop
# Process the premiums data.
def premiums_data():
"""
Generate the premiums table.
Returns:
final_prem (pd.DataFrame)
"""
prem_raw = raw_dict['gross_premiums']
prem_fil = prem_raw[prem_raw['LOCATION'].str.contains('USA')]
[['TIME', 'Value']]
prem_fil['Value'] = prem_fil['Value'] * 1e6
final_prem = prem_fil.rename(
columns={'TIME': 'Year', 'Value': 'Premiums'}).reset_index(drop=True)
return final_prem
# Process the claims data.
def _clam_fil():
"""
Filter out the redundant claims data.
Returns:
known_clam (pd.DataFrame): claims table we have known
"""
clam_raw = raw_dict['gross_claims']
clam_fil = clam_raw[clam_raw['COU'].str.contains('USA') &
clam_raw['ITYP'].str.contains('TOT') &
clam_raw['OWN'].str.contains('TOT') &
clam_raw['Unit Code'].str.contains('USD') &
clam_raw['DB_RA'].str.contains('Total') &
clam_raw['Currency'].str.contains('US Dollars')
][['Year', 'Value']].reset_index(drop=True)
known_clam = clam_fil.rename(columns={'Value': 'Claims (mild)'})
return known_clam
def _known_growth(clam_table):
"""
Goal: use the mean annual growth rate we have known during the year..
2002-2017 to roughly complement the missing values (1993-2001).
Args:
clam_table (pd.DataFrame): known clean gross claims table.
Returns:
mean_growth (float64)
"""
growth_claims = []
gclaims_list = clam_table['Claims (mild)'].tolist()
for i in range(len(gclaims_list) - 1):
growth_claims.append(
(gclaims_list[i + 1] - gclaims_list[i]) / gclaims_list[i])
outliers = [max(growth_claims), min(growth_claims)]
for i in outliers:
growth_claims.remove(i)
mean_growth = np.mean(np.array(growth_claims))
return mean_growth
def _guess_clam(clam_table, growth_mean):
"""
Generate the guessed claims during the year 1993-2001.
Args:
clam_table (pd.DataFrame): known clean gross claims table
growth_mean (float64): mean growth rate of claims during 2002-2017
Returns:
guess_claims (list): guessed results
"""
guess_claims = []
guess_claims.append(clam_table['Claims (mild)'].tolist()[0])
for i in range(before_miss_year - earliest):
guess_claims.append((guess_claims[i] / (1 + growth_mean)))
guess_claims.reverse()
guess_claims = guess_claims[:-1]
return guess_claims
def _outlier_clam(clam_table, guessed_claims):
"""
Generate the claims and premiums tables in dollars with claim outlier.
Args:
clam_table (pd.DataFrame): known clean gross claims table
guessed_claims (list): guessed claims based on the known growth rate
Returns:
outlier_claims (pd.DataFrame): claims table with outlier
"""
year_before = []
for i in range(earliest, before_miss_year):
year_before.append(i)
clam_dict = {'Year': year_before, 'Claims (mild)': guessed_claims}
clam_df = pd.DataFrame(clam_dict).round(2)
only_claims = pd.merge(clam_df, clam_table, how='outer')
only_claims['Claims'] = only_claims['Claims (mild)'] * 1e6
only_claims = only_claims.drop(columns=['Claims (mild)'])
only_prems = premiums_data()
outlier_claims = pd.merge(only_claims, only_prems)
return outlier_claims
def _merge_clam(clam_table, guessed_claims):
"""
There is an outlier in the data (2011). The sharp rising of claims is..
due to the serious earthquake in the U.S. in 2011. So we replace it by
the mean of claims in 2010 and 2012. You can find the figure in the
presentation sildes.
Get rid of the outlier
Args:
clam_table (pd.DataFrame): known clean gross claims table
guessed_claims (list): guessed claims based on the known growth rate
Returns:
final_claims (pd.DataFrame):
final claims table after replacing the outlier
"""
final_claims = _outlier_clam(clam_table, guessed_claims)
adjust_claims = np.mean(final_claims[
(final_claims['Year'] == 2010) | (final_claims['Year'] == 2012)
].iloc[:, 1].values)
final_claims.iloc[18, 1] = adjust_claims
return final_claims
def claims_data():
"""
Generate the final claims data.
Returns:
final_table (pd.DataFrame)
"""
known_table = _clam_fil()
known_growth = _known_growth(known_table)
guessed_table = _guess_clam(known_table, known_growth)
final_table = _merge_clam(known_table, guessed_table)
return final_table
# Process the net worth data.
def wealth_data():
"""
Generate the net worth (per capita) table.
Returns:
final_weal (pd.DataFrame)
"""
weal_raw = raw_dict['hhl_finworth']
weal_fil = weal_raw[weal_raw['LOCATION'].str.contains('USA')]
final_weal = weal_fil[['TIME', 'Value']].rename(
columns={'TIME': 'Year', 'Value': 'Wealth'}).reset_index(drop=True)
return final_weal
# Produce the table on a per capita basis.
def table_pc():
"""
Generate the final table without price and moving-average adjustments.
Returns:
nonadj_pc (pd.DataFrame)
"""
gross_premiums = premiums_data()
gross_claims = claims_data()
wealth = wealth_data()
population = pop_data()
gross_precl = pd.merge(gross_premiums, gross_claims)
gross_table = pd.merge(gross_precl, population)
for i in 'Premiums', 'Claims':
gross_table[i] = gross_table['{}'.format(i)] / gross_table['Population']
pc_table = gross_table[['Year', 'Premiums', 'Claims']]
nonadj_pc = pd.merge(pc_table, wealth, how='outer')
return nonadj_pc
# Recalculate the table using the constant 2015 dollars.
def _cpi_data():
"""Get the U.S. CPI data."""
cpi_raw = raw_dict['cpi_oecd']
cpi_fil = cpi_raw[cpi_raw['LOCATION'].str.contains('USA')][['TIME', 'Value']].reset_index(drop=True)
final_cpi = cpi_fil.rename(columns={'TIME': 'Year', 'Value': 'CPI'})
return final_cpi
def cpi_adjust(nonadj_table, cpi_table):
"""
Use CPI data to recalculate the values in constant 2015 dollars, formula..
is (e.g. 2004): money in 2004 * (CPI_2015 / CPI_2004)
Arg:
nonadj_table (pd.DataFrame): claims/premiums/wealth with inflation
cpi_table (pd.DataFrame): CPI data of the U.S.
Returns:
final_constant (pd.DataFrame): adjusted table in constant 2015 dollars
"""
mer_table = pd.merge(nonadj_table, cpi_table)
temp_np = mer_table.iloc[:, 1:5].values
n_year = temp_np.shape[0]
for i in range(0, n_year):
temp_np[i:i + 1, 0:3] = temp_np[i:i + 1, 0:3] * \
(temp_np[n_year - 3:n_year - 2, 3] / temp_np[i:i + 1, 3])
mer_table.iloc[:, 1:] = temp_np
final_constant = mer_table.round(0).drop(columns=['CPI'])
return final_constant
# Recalculate the values with five-year moving average method to overcome the
# problem of possible noise
def five_moving(nonadj_table, cpi_table):
"""
Use five-moving average method to stabilize data.
Arg:
nonadj_table (pd.DataFrame): claims/premiums/wealth with inflation
cpi_table (pd.DataFrame): CPI data of the U.S.
Returns:
stab (pd.DataFrame): our final dataset for estimation
"""
nonstab = cpi_adjust(nonadj_table, cpi_table)
nonstab_np = nonstab.iloc[:, 1:3].values
n_year = nonstab_np.shape[0]
for i in range(0, n_year - 4):
nonstab_np[i + 2] = np.mean(nonstab_np[i:i + 5], axis=0)
nonstab.iloc[2:23, 1:3] = nonstab_np[2:23]
stab = nonstab.iloc[2:23].reset_index(drop=True).round(0)
stab = stab[['Year', 'Wealth', 'Premiums', 'Claims']]
return stab
# Output functions
def save_data(known_claim, guess_claim, nonadj_table, cpi_table):
"""
Generate the final tables (including the table in original paper).
Arg:
known_claim (pd.DataFrame): known gross claims table
guess_claim (pd.DataFrame):
guessed claims based on the known growth rate
nonadj_table (pd.DataFrame): claims/premiums/wealth with inflation
cpi_table (pd.DataFrame): CPI data of the U.S.
"""
known_claim = _clam_fil()
growth_mean = _known_growth(known_claim)
guess_claim = _guess_clam(known_claim, growth_mean)
clam_olt = _outlier_clam(known_claim, guess_claim)
clam_olt.to_csv(ppj('OUT_DATA', 'claims_outlier.csv'),
index=False, sep=',')
cpi_adj = cpi_adjust(nonadj_table, cpi_table)
cpi_adj.to_csv(ppj('OUT_DATA', 'cpi_adjust.csv'),
index=False, sep=',')
stab = five_moving(nonadj_table, cpi_table)
stab.to_csv(ppj('OUT_DATA', 'recent_table.csv'), index=False, sep=',')
data_in_paper = raw_dict['paper_table']
data_in_paper.to_csv(
ppj('OUT_DATA', 'szpiro_table.csv'), index=False, sep=',')
# Export the data we need
if __name__ == '__main__':
claim_known = _clam_fil()
mean_growth = _known_growth(claim_known)
claim_guess = _guess_clam(claim_known, mean_growth)
no_cpi_adj = table_pc()
cpi_table = _cpi_data()
save_data(claim_known, claim_guess, no_cpi_adj, cpi_table)
| [
"numpy.mean",
"numpy.unique",
"bld.project_paths.project_paths_join",
"pandas.merge",
"numpy.array",
"pandas.DataFrame"
] | [((462, 508), 'numpy.unique', 'np.unique', (["raw_dict['pop_9314']['TIME'].values"], {}), "(raw_dict['pop_9314']['TIME'].values)\n", (471, 508), True, 'import numpy as np\n'), ((1393, 1434), 'pandas.merge', 'pd.merge', (['pop_9314', 'pop_1517'], {'how': '"""outer"""'}), "(pop_9314, pop_1517, how='outer')\n", (1401, 1434), True, 'import pandas as pd\n'), ((4664, 4706), 'pandas.merge', 'pd.merge', (['clam_df', 'clam_table'], {'how': '"""outer"""'}), "(clam_df, clam_table, how='outer')\n", (4672, 4706), True, 'import pandas as pd\n'), ((4888, 4921), 'pandas.merge', 'pd.merge', (['only_claims', 'only_prems'], {}), '(only_claims, only_prems)\n', (4896, 4921), True, 'import pandas as pd\n'), ((5635, 5743), 'numpy.mean', 'np.mean', (["final_claims[(final_claims['Year'] == 2010) | (final_claims['Year'] == 2012)\n ].iloc[:, 1].values"], {}), "(final_claims[(final_claims['Year'] == 2010) | (final_claims['Year'] ==\n 2012)].iloc[:, 1].values)\n", (5642, 5743), True, 'import numpy as np\n'), ((6930, 6968), 'pandas.merge', 'pd.merge', (['gross_premiums', 'gross_claims'], {}), '(gross_premiums, gross_claims)\n', (6938, 6968), True, 'import pandas as pd\n'), ((6987, 7020), 'pandas.merge', 'pd.merge', (['gross_precl', 'population'], {}), '(gross_precl, population)\n', (6995, 7020), True, 'import pandas as pd\n'), ((7214, 7253), 'pandas.merge', 'pd.merge', (['pc_table', 'wealth'], {'how': '"""outer"""'}), "(pc_table, wealth, how='outer')\n", (7222, 7253), True, 'import pandas as pd\n'), ((8069, 8102), 'pandas.merge', 'pd.merge', (['nonadj_table', 'cpi_table'], {}), '(nonadj_table, cpi_table)\n', (8077, 8102), True, 'import pandas as pd\n'), ((3376, 3399), 'numpy.array', 'np.array', (['growth_claims'], {}), '(growth_claims)\n', (3384, 3399), True, 'import numpy as np\n'), ((9094, 9130), 'numpy.mean', 'np.mean', (['nonstab_np[i:i + 5]'], {'axis': '(0)'}), '(nonstab_np[i:i + 5], axis=0)\n', (9101, 9130), True, 'import numpy as np\n'), ((9992, 10029), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""claims_outlier.csv"""'], {}), "('OUT_DATA', 'claims_outlier.csv')\n", (9995, 10029), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((10143, 10176), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""cpi_adjust.csv"""'], {}), "('OUT_DATA', 'cpi_adjust.csv')\n", (10146, 10176), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((10284, 10319), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""recent_table.csv"""'], {}), "('OUT_DATA', 'recent_table.csv')\n", (10287, 10319), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((10422, 10457), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""szpiro_table.csv"""'], {}), "('OUT_DATA', 'szpiro_table.csv')\n", (10425, 10457), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((4612, 4635), 'pandas.DataFrame', 'pd.DataFrame', (['clam_dict'], {}), '(clam_dict)\n', (4624, 4635), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : logisticRegression.py
@Author : <NAME>
@Emial : <EMAIL>
@Date : 2022/02/23 16:35
@Description : 逻辑斯蒂回归
"""
import time
import numpy as np
def loadData(fileName):
"""
加载数据
@Args:
fileName: 需要加载的文件
@Returns:
dataList: 数据集
labelLise: 标签集
@Riase:
"""
dataList = []
labelList = []
# 打开文件
fr = open(fileName, 'r')
for line in fr.readlines():
curLine = line.strip().split(',')
if int(curLine[0]) == 0:
labelList.append(1)
else:
labelList.append(0)
dataList.append([int(num) / 255 for num in curLine[1:]])
return dataList, labelList
def predict(w, x):
"""
预测标签
@Args:
w: 权重
x: 样本
@Returns:
预测结果
@Riase:
"""
wx = np.dot(w, x)
P1 = np.exp(wx) / (1 + np.exp(wx))
if P1 >= 0.5:
return 1
return 0
def logisticRegression(trainDataList, trainLabelList, iter=20):
"""
逻辑斯蒂回归训练
@Args:
trainDataList: 训练集
trainLabelList: 训练集标签
iter: 迭代次数, default=20
@Returns:
w: 训练得到的权重
@Riase:
"""
# 将w与b合在一起,x需要增加一维
for i in range(len(trainDataList)):
trainDataList[i].append(1)
trainDataList = np.array(trainDataList)
w = np.zeros(trainDataList.shape[1])
# 设置步长
h = 0.001
# 迭代iter次进行随机梯度下降
for i in range(iter):
# 每次迭代遍历所有样本,进行随机梯度下降
print(f"Epoch: {i}, remaining {iter - i} ......")
for j in range(trainDataList.shape[0]):
# 随机梯度上升部分
# 我们需要极大化似然函数但是似然函数由于有求和项,
# 并不能直接对w求导得出最优w,所以针对似然函数求和部分中每一项进行单独地求导w,
# 得到针对该样本的梯度,并进行梯度上升(因为是要求似然函数的极大值,
# 所以是梯度上升,如果是极小值就梯度下降。梯度上升是加号,下降是减号)
# 求和式中每一项单独对w求导结果为:xi * yi - (exp(w * xi) * xi) / (1 + exp(w * xi))
wx = np.dot(w, trainDataList[j])
xi = trainDataList[j]
yi = trainLabelList[j]
# 梯度上升
w += h * (xi * yi - (np.exp(wx) * xi) / ( 1 + np.exp(wx)))
return w
def model_test(testDataList, testLabelList, w):
"""
模型测试
@Args:
testDataList: 测试数据集
testLabelList: 测试数据集标签
w: 学习到的权重
@Returns:
准确率
@Riase:
"""
for i in range(len(testDataList)):
testDataList[i].append(1)
# 错误值计数
errorCnt = 0
for i in range(len(testDataList)):
if testLabelList[i] != predict(w, testDataList[i]):
errorCnt += 1
# 返回正确率
return 1 - errorCnt / len(testDataList)
if __name__ == '__main__':
start = time.time()
# 获取训练集
print("Start read transSet......")
trainData, trainLabel = loadData("../data/mnist_train.csv")
# 获取测试集
print("Start read testSet......")
testData, testLabel = loadData("../data/mnist_test.csv")
# 开始训练
print("Start to train......")
w = logisticRegression(trainData, trainLabel)
# 验证正确率
print("Start to test......")
accuracy = model_test(testData, testLabel, w)
print(f"The accuracy is: {accuracy}")
end = time.time()
print(f"Time span: {end - start}") | [
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"time.time"
] | [((918, 930), 'numpy.dot', 'np.dot', (['w', 'x'], {}), '(w, x)\n', (924, 930), True, 'import numpy as np\n'), ((1392, 1415), 'numpy.array', 'np.array', (['trainDataList'], {}), '(trainDataList)\n', (1400, 1415), True, 'import numpy as np\n'), ((1424, 1456), 'numpy.zeros', 'np.zeros', (['trainDataList.shape[1]'], {}), '(trainDataList.shape[1])\n', (1432, 1456), True, 'import numpy as np\n'), ((2732, 2743), 'time.time', 'time.time', ([], {}), '()\n', (2741, 2743), False, 'import time\n'), ((3217, 3228), 'time.time', 'time.time', ([], {}), '()\n', (3226, 3228), False, 'import time\n'), ((940, 950), 'numpy.exp', 'np.exp', (['wx'], {}), '(wx)\n', (946, 950), True, 'import numpy as np\n'), ((958, 968), 'numpy.exp', 'np.exp', (['wx'], {}), '(wx)\n', (964, 968), True, 'import numpy as np\n'), ((1979, 2006), 'numpy.dot', 'np.dot', (['w', 'trainDataList[j]'], {}), '(w, trainDataList[j])\n', (1985, 2006), True, 'import numpy as np\n'), ((2129, 2139), 'numpy.exp', 'np.exp', (['wx'], {}), '(wx)\n', (2135, 2139), True, 'import numpy as np\n'), ((2154, 2164), 'numpy.exp', 'np.exp', (['wx'], {}), '(wx)\n', (2160, 2164), True, 'import numpy as np\n')] |
import mobula
import mobula.layers as L
import numpy as np
def go_eltwise(op):
a = np.array([1,0,6]).astype(np.float)
b = np.array([4,5,3]).astype(np.float)
print ("a: ", a)
print ("b: ", b)
data1 = L.Data(a)
data2 = L.Data(b)
coeffs = np.array([-1.0,1.2])
l = L.Eltwise([data1,data2], op = op, coeffs = coeffs)
l.reshape()
l.forward()
print ("Y: ", l.Y)
dY = np.array([7, 8, 9]).astype(np.float)
l.dY = dY
print ("dY: ", l.dY)
l.backward()
print ("dX: ", l.dX[0], l.dX[1])
c0, c1 = coeffs
if op == L.Eltwise.SUM:
Y = c0 * a + c1 * b
dX0 = c0 * dY
dX1 = c1 * dY
elif op == L.Eltwise.PROD:
Y = a * b * c0 * c1
dX0 = b * dY * c0 * c1
dX1 = a * dY * c0 * c1
elif op == L.Eltwise.MAX:
Y = np.max([c0*a,c1*b], 0)
i = np.argmax([c0*a,c1*b], 0)
dX0 = np.zeros(a.shape)
dX1 = np.zeros(b.shape)
dX0[i == 0] = dY[i == 0] * c0
dX1[i == 1] = dY[i == 1] * c1
print ("Y", l.Y, Y)
assert np.allclose(l.Y, Y)
assert np.allclose(l.dX[0], dX0)
assert np.allclose(l.dX[1], dX1)
def test_eltwise():
print ("TEST SUM")
go_eltwise(L.Eltwise.SUM)
print ("TEST PROD")
go_eltwise(L.Eltwise.PROD)
print ("TEST MAX")
go_eltwise(L.Eltwise.MAX)
| [
"numpy.allclose",
"mobula.layers.Data",
"numpy.argmax",
"numpy.max",
"numpy.array",
"numpy.zeros",
"mobula.layers.Eltwise"
] | [((220, 229), 'mobula.layers.Data', 'L.Data', (['a'], {}), '(a)\n', (226, 229), True, 'import mobula.layers as L\n'), ((242, 251), 'mobula.layers.Data', 'L.Data', (['b'], {}), '(b)\n', (248, 251), True, 'import mobula.layers as L\n'), ((265, 286), 'numpy.array', 'np.array', (['[-1.0, 1.2]'], {}), '([-1.0, 1.2])\n', (273, 286), True, 'import numpy as np\n'), ((294, 341), 'mobula.layers.Eltwise', 'L.Eltwise', (['[data1, data2]'], {'op': 'op', 'coeffs': 'coeffs'}), '([data1, data2], op=op, coeffs=coeffs)\n', (303, 341), True, 'import mobula.layers as L\n'), ((1064, 1083), 'numpy.allclose', 'np.allclose', (['l.Y', 'Y'], {}), '(l.Y, Y)\n', (1075, 1083), True, 'import numpy as np\n'), ((1095, 1120), 'numpy.allclose', 'np.allclose', (['l.dX[0]', 'dX0'], {}), '(l.dX[0], dX0)\n', (1106, 1120), True, 'import numpy as np\n'), ((1132, 1157), 'numpy.allclose', 'np.allclose', (['l.dX[1]', 'dX1'], {}), '(l.dX[1], dX1)\n', (1143, 1157), True, 'import numpy as np\n'), ((88, 107), 'numpy.array', 'np.array', (['[1, 0, 6]'], {}), '([1, 0, 6])\n', (96, 107), True, 'import numpy as np\n'), ((131, 150), 'numpy.array', 'np.array', (['[4, 5, 3]'], {}), '([4, 5, 3])\n', (139, 150), True, 'import numpy as np\n'), ((409, 428), 'numpy.array', 'np.array', (['[7, 8, 9]'], {}), '([7, 8, 9])\n', (417, 428), True, 'import numpy as np\n'), ((827, 854), 'numpy.max', 'np.max', (['[c0 * a, c1 * b]', '(0)'], {}), '([c0 * a, c1 * b], 0)\n', (833, 854), True, 'import numpy as np\n'), ((862, 892), 'numpy.argmax', 'np.argmax', (['[c0 * a, c1 * b]', '(0)'], {}), '([c0 * a, c1 * b], 0)\n', (871, 892), True, 'import numpy as np\n'), ((902, 919), 'numpy.zeros', 'np.zeros', (['a.shape'], {}), '(a.shape)\n', (910, 919), True, 'import numpy as np\n'), ((934, 951), 'numpy.zeros', 'np.zeros', (['b.shape'], {}), '(b.shape)\n', (942, 951), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.