code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#Name: <NAME>
#e-mail: <EMAIL>
"""
Python code for getting slice from a nifti volume
"""
import numpy as np
import SimpleITK as sitk
import os
def get_axial_Slice_from_Nifti(path_to_volume,coord):
"""
Routine to get axial slice from Nifti volume.
Parameters
----------
path_to_volume: String, path to nifti volume
coord: Integer, axial coordinate
Returns
-------
axial_slice: 2D Numpy Array
"""
img = sitk.ReadImage(os.path.dirname(__file__)+path_to_volume)
img_array = sitk.GetArrayFromImage(img)
axial_slice = img_array[coord,:,:]
return np.asarray(axial_slice)
| [
"numpy.asarray",
"os.path.dirname",
"SimpleITK.GetArrayFromImage"
] | [((561, 588), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (583, 588), True, 'import SimpleITK as sitk\n'), ((647, 670), 'numpy.asarray', 'np.asarray', (['axial_slice'], {}), '(axial_slice)\n', (657, 670), True, 'import numpy as np\n'), ((502, 527), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (517, 527), False, 'import os\n')] |
#!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for patch based image processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import struct
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import preprocess
import utils
from models.utils import get_net
from trainer import make_estimator
FLAGS = tf.flags.FLAGS
PATCH_H_COUNT = 3
PATCH_W_COUNT = 3
PATCH_COUNT = PATCH_H_COUNT * PATCH_W_COUNT
# It's supposed to be in the root folder, which is also pwd when running, if the
# instructions in the README are followed. Hence not a flag.
PERMUTATION_PATH = 'permutations_100_max.bin'
def apply_model(image_fn,
is_training,
num_outputs,
perms,
make_signature=False):
"""Creates the patch based model output from patches representations.
Args:
image_fn: function returns image tensor.
is_training: is training flag used for batch norm and drop out.
num_outputs: number of output classes.
perms: numpy array with shape [m, k], element range [0, PATCH_COUNT). k
stands for the patch numbers used in a permutation. m stands forthe number
of permutations. Each permutation is used to concat the patch inputs
[n*PATCH_COUNT, h, w, c] into tensor with shape [n*m, h, w, c*k].
make_signature: whether to create signature for hub module.
Returns:
out: output tensor with shape [n*m, 1, 1, num_outputs].
Raises:
ValueError: An error occurred when the architecture is unknown.
"""
images = image_fn()
net = get_net(num_classes=FLAGS.get_flag_value('embed_dim', 1000))
out, end_points = net(images, is_training,
weight_decay=FLAGS.get_flag_value('weight_decay', 1e-4))
print(end_points)
if not make_signature:
out = permutate_and_concat_batch_patches(out, perms)
out = fully_connected(out, num_outputs, is_training=is_training)
out = tf.squeeze(out, [1, 2])
if make_signature:
hub.add_signature(inputs={'image': images}, outputs=out)
hub.add_signature(
name='representation',
inputs={'image': images},
outputs=end_points)
return out
def image_grid(images, ny, nx, padding=0):
"""Create a batch of image grids from a batch of images.
Args:
images: A batch of patches (B,N,H,W,C)
ny: vertical number of images
nx: horizontal number of images
padding: number of zeros between images, if any.
Returns:
A tensor batch of image grids shaped (B,H*ny,W*nx,C), although that is a
simplifying lie: if padding is used h/w will be different.
"""
with tf.name_scope('grid_image'):
if padding:
padding = [padding, padding]
images = tf.pad(images, [[0, 0], [0, 0], padding, padding, [0, 0]])
return tf.concat([
tf.concat([images[:, y * nx + x] for x in range(nx)], axis=-2)
for y in range(ny)], axis=-3)
def creates_estimator_model(images, labels, perms, num_classes, mode):
"""Creates EstimatorSpec for the patch based self supervised models.
Args:
images: images
labels: self supervised labels (class indices)
perms: patch permutations
num_classes: number of different permutations
mode: model's mode: training, eval or prediction
Returns:
EstimatorSpec
"""
print(' +++ Mode: %s, images: %s, labels: %s' % (mode, images, labels))
images = tf.reshape(images, shape=[-1] + images.get_shape().as_list()[-3:])
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
with tf.variable_scope('module'):
image_fn = lambda: images
logits = apply_model(
image_fn=image_fn,
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
num_outputs=num_classes,
perms=perms,
make_signature=False)
else:
input_shape = utils.str2intlist(
FLAGS.get_flag_value('serving_input_shape', 'None,None,None,3'))
image_fn = lambda: tf.placeholder( # pylint: disable=g-long-lambda
shape=input_shape,
dtype=tf.float32)
apply_model_function = functools.partial(
apply_model,
image_fn=image_fn,
num_outputs=num_classes,
perms=perms,
make_signature=True)
tf_hub_module_spec = hub.create_module_spec(
apply_model_function, [(utils.TAGS_IS_TRAINING, {
'is_training': True
}), (set(), {
'is_training': False
})],
drop_collections=['summaries'])
tf_hub_module = hub.Module(tf_hub_module_spec, trainable=False, tags=set())
hub.register_module_for_export(tf_hub_module, export_name='module')
logits = tf_hub_module(images)
return make_estimator(mode, predictions=logits)
# build loss and accuracy
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
eval_metrics = (
lambda labels, logits: { # pylint: disable=g-long-lambda
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=-1))},
[labels, logits])
return make_estimator(mode, loss, eval_metrics, logits)
def fully_connected(inputs,
num_classes=100,
weight_decay=5e-4,
keep_prob=0.5,
is_training=True):
"""Two layers fully connected network copied from Alexnet fc7-fc8."""
net = inputs
_, _, w, _ = net.get_shape().as_list()
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=weight_decay)
net = tf.layers.conv2d(
net,
filters=4096,
kernel_size=w,
padding='same',
kernel_initializer=tf.truncated_normal_initializer(0.0, 0.005),
bias_initializer=tf.constant_initializer(0.1),
kernel_regularizer=kernel_regularizer)
net = tf.layers.batch_normalization(
net, momentum=0.997, epsilon=1e-5, fused=None, training=is_training)
net = tf.nn.relu(net)
if is_training:
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = tf.layers.conv2d(
net,
filters=num_classes,
kernel_size=1,
padding='same',
kernel_initializer=tf.truncated_normal_initializer(0.0, 0.005),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=kernel_regularizer)
return net
def generate_patch_locations():
"""Generates relative patch locations."""
perms = np.array([(i, 4) for i in range(9) if i != 4])
return perms, len(perms)
def load_permutations():
"""Loads a set of pre-defined permutations."""
with tf.gfile.Open(PERMUTATION_PATH, 'rb') as f:
int32_size = 4
s = f.read(int32_size * 2)
[num_perms, c] = struct.unpack('<ll', s)
perms = []
for _ in range(num_perms * c):
s = f.read(int32_size)
x = struct.unpack('<l', s)
perms.append(x[0])
perms = np.reshape(perms, [num_perms, c])
# The bin file used index [1,9] for permutation, updated to [0, 8] for index.
perms = perms - 1
assert np.min(perms) == 0 and np.max(perms) == PATCH_COUNT - 1
return perms, num_perms
def permutate_and_concat_image_patches(patch_embeddings, perms):
"""Permutates patches from an image according to permutations.
Args:
patch_embeddings: input tensor with shape [PATCH_COUNT, h, w, c], where
PATCH_COUNT is the patch number per image.
perms: numpy array with shape [m, k], with element in range
[0, PATCH_COUNT). Permutation is used to concat the patches.
Returns:
out: output tensor with shape [m, h, w, c*k].
"""
_, h, w, c = patch_embeddings.get_shape().as_list()
if isinstance(perms, np.ndarray):
num_perms, perm_len = perms.shape
else:
num_perms, perm_len = perms.get_shape().as_list()
def permutate_patch(perm):
permed = tf.gather(patch_embeddings, perm, axis=0)
concat_tensor = tf.transpose(permed, perm=[1, 2, 3, 0])
concat_tensor = tf.reshape(
concat_tensor, shape=[-1, h, w, perm_len * c])
return concat_tensor
permed_patches = tf.stack([
permutate_patch(perms[i]) for i in range(num_perms)
])
return permed_patches
def permutate_and_concat_batch_patches(batch_patch_embeddings, perms):
"""Permutates patches from a mini batch according to permutations.
Args:
batch_patch_embeddings: input tensor with shape [n*PATCH_COUNT, h, w, c] or
[n*PATCH_COUNT, c], where PATCH_COUNT is the patch number per image
and n is the number of images in this mini batch.
perms: numpy array with shape [m, k], with element in range
[0, PATCH_COUNT). Permutation is used to concat the patches.
Returns:
out: output tensor with shape [n*m, h, w, c*k].
"""
print(' +++ permutate patches input: %s' % batch_patch_embeddings)
if len(batch_patch_embeddings.get_shape().as_list()) == 4:
_, h, w, c = batch_patch_embeddings.get_shape().as_list()
elif len(batch_patch_embeddings.get_shape().as_list()) == 2:
_, c = batch_patch_embeddings.get_shape().as_list()
h, w = (1, 1)
else:
raise ValueError('Unexpected batch_patch_embeddings shape: %s' %
batch_patch_embeddings.get_shape().as_list())
patches = tf.reshape(batch_patch_embeddings, shape=[-1, PATCH_COUNT, h, w, c])
patches = tf.stack([
permutate_and_concat_image_patches(patches[i], perms)
for i in range(patches.get_shape().as_list()[0])
])
patches = tf.reshape(patches, shape=[-1, h, w, perms.shape[1] * c])
print(' +++ permutate patches output: %s' % batch_patch_embeddings)
return patches
def get_patch_representation(
images,
hub_module,
patch_preprocess='crop_patches,standardization',
is_training=False,
target_features=9000,
pooling_fn=None,
combine_patches='concat',
signature='representation'):
"""Permutates patches from a mini batch according to permutations.
Args:
images: input images, can be full image (NHWC) or image patchs (NPHWC).
hub_module: hub module.
patch_preprocess: preprocess applied to the image. Note that preprocess may
require setting parameters in the FLAGS.config file.
is_training: is training mode.
target_features: target feature dimension. Note that the features might
exceed this number if there're too many channels.
pooling_fn: pooling method applied to the features.
combine_patches: one of {'concat', 'max_pool', 'avg_pool'}.
signature: signature for the hub module.
Returns:
out: output representation tensors.
Raises:
ValueError: unsupported combine_patches.
"""
if patch_preprocess:
preprocess_fn = preprocess.get_preprocess_fn(patch_preprocess, is_training)
images = preprocess_fn({'image': images})['image']
assert len(images.get_shape().as_list()) == 5, 'Shape must match NPHWC.'
_, num_of_patches, h, w, c = images.get_shape().as_list()
images = tf.reshape(images, shape=[-1, h, w, c])
out_tensors = hub_module(
images,
signature=signature,
as_dict=True)
if combine_patches == 'concat':
target_features = target_features // num_of_patches
if pooling_fn is not None:
out_tensors = pooling_fn(out_tensors)
for k, t in out_tensors.iteritems():
if len(t.get_shape().as_list()) == 2:
t = t[:, None, None, :]
assert len(t.get_shape().as_list()) == 4, 'Unsupported rank %d' % len(
t.get_shape().as_list())
# Take patch-dimension out of batch-dimension: [NP]HWC -> NPHWC
t = tf.reshape(t, [-1, num_of_patches] + t.get_shape().as_list()[-3:])
if combine_patches == 'concat':
# [N, P, H, W, C] -> [N, H, W, P*C]
_, p, h, w, c = t.get_shape().as_list()
out_tensors[k] = tf.reshape(
tf.transpose(t, perm=[0, 2, 3, 4, 1]), tf.stack([-1, h, w, p * c]))
elif combine_patches == 'max_pool':
# Reduce max on P channel of NPHWC.
out_tensors[k] = tf.reduce_max(t, axis=1)
elif combine_patches == 'avg_pool':
# Reduce mean on P channel of NPHWC.
out_tensors[k] = tf.reduce_mean(t, axis=1)
else:
raise ValueError(
'Unsupported combine patches method %s.' % combine_patches)
return out_tensors
| [
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"trainer.make_estimator",
"tensorflow.reduce_max",
"tensorflow.layers.batch_normalization",
"tensorflow.nn.relu",
"tensorflow.gather",
"tensorflow.pad",
"tensorflow.variable_scope",
"tensorflow.s... | [((5372, 5448), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (5418, 5448), True, 'import tensorflow as tf\n'), ((5465, 5485), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (5479, 5485), True, 'import tensorflow as tf\n'), ((5717, 5765), 'trainer.make_estimator', 'make_estimator', (['mode', 'loss', 'eval_metrics', 'logits'], {}), '(mode, loss, eval_metrics, logits)\n', (5731, 5765), False, 'from trainer import make_estimator\n'), ((6097, 6149), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', ([], {'scale': 'weight_decay'}), '(scale=weight_decay)\n', (6129, 6149), True, 'import tensorflow as tf\n'), ((6426, 6530), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['net'], {'momentum': '(0.997)', 'epsilon': '(1e-05)', 'fused': 'None', 'training': 'is_training'}), '(net, momentum=0.997, epsilon=1e-05, fused=\n None, training=is_training)\n', (6455, 6530), True, 'import tensorflow as tf\n'), ((6540, 6555), 'tensorflow.nn.relu', 'tf.nn.relu', (['net'], {}), '(net)\n', (6550, 6555), True, 'import tensorflow as tf\n'), ((9747, 9815), 'tensorflow.reshape', 'tf.reshape', (['batch_patch_embeddings'], {'shape': '[-1, PATCH_COUNT, h, w, c]'}), '(batch_patch_embeddings, shape=[-1, PATCH_COUNT, h, w, c])\n', (9757, 9815), True, 'import tensorflow as tf\n'), ((9973, 10030), 'tensorflow.reshape', 'tf.reshape', (['patches'], {'shape': '[-1, h, w, perms.shape[1] * c]'}), '(patches, shape=[-1, h, w, perms.shape[1] * c])\n', (9983, 10030), True, 'import tensorflow as tf\n'), ((11439, 11478), 'tensorflow.reshape', 'tf.reshape', (['images'], {'shape': '[-1, h, w, c]'}), '(images, shape=[-1, h, w, c])\n', (11449, 11478), True, 'import tensorflow as tf\n'), ((2562, 2585), 'tensorflow.squeeze', 'tf.squeeze', (['out', '[1, 2]'], {}), '(out, [1, 2])\n', (2572, 2585), True, 'import tensorflow as tf\n'), ((2612, 2668), 'tensorflow_hub.add_signature', 'hub.add_signature', ([], {'inputs': "{'image': images}", 'outputs': 'out'}), "(inputs={'image': images}, outputs=out)\n", (2629, 2668), True, 'import tensorflow_hub as hub\n'), ((2673, 2764), 'tensorflow_hub.add_signature', 'hub.add_signature', ([], {'name': '"""representation"""', 'inputs': "{'image': images}", 'outputs': 'end_points'}), "(name='representation', inputs={'image': images}, outputs=\n end_points)\n", (2690, 2764), True, 'import tensorflow_hub as hub\n'), ((3242, 3269), 'tensorflow.name_scope', 'tf.name_scope', (['"""grid_image"""'], {}), "('grid_image')\n", (3255, 3269), True, 'import tensorflow as tf\n'), ((4697, 4809), 'functools.partial', 'functools.partial', (['apply_model'], {'image_fn': 'image_fn', 'num_outputs': 'num_classes', 'perms': 'perms', 'make_signature': '(True)'}), '(apply_model, image_fn=image_fn, num_outputs=num_classes,\n perms=perms, make_signature=True)\n', (4714, 4809), False, 'import functools\n'), ((5179, 5246), 'tensorflow_hub.register_module_for_export', 'hub.register_module_for_export', (['tf_hub_module'], {'export_name': '"""module"""'}), "(tf_hub_module, export_name='module')\n", (5209, 5246), True, 'import tensorflow_hub as hub\n'), ((5293, 5333), 'trainer.make_estimator', 'make_estimator', (['mode'], {'predictions': 'logits'}), '(mode, predictions=logits)\n', (5307, 5333), False, 'from trainer import make_estimator\n'), ((6584, 6623), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['net'], {'keep_prob': 'keep_prob'}), '(net, keep_prob=keep_prob)\n', (6597, 6623), True, 'import tensorflow as tf\n'), ((7152, 7189), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['PERMUTATION_PATH', '"""rb"""'], {}), "(PERMUTATION_PATH, 'rb')\n", (7165, 7189), True, 'import tensorflow as tf\n'), ((7267, 7290), 'struct.unpack', 'struct.unpack', (['"""<ll"""', 's'], {}), "('<ll', s)\n", (7280, 7290), False, 'import struct\n'), ((7440, 7473), 'numpy.reshape', 'np.reshape', (['perms', '[num_perms, c]'], {}), '(perms, [num_perms, c])\n', (7450, 7473), True, 'import numpy as np\n'), ((8365, 8406), 'tensorflow.gather', 'tf.gather', (['patch_embeddings', 'perm'], {'axis': '(0)'}), '(patch_embeddings, perm, axis=0)\n', (8374, 8406), True, 'import tensorflow as tf\n'), ((8427, 8466), 'tensorflow.transpose', 'tf.transpose', (['permed'], {'perm': '[1, 2, 3, 0]'}), '(permed, perm=[1, 2, 3, 0])\n', (8439, 8466), True, 'import tensorflow as tf\n'), ((8487, 8544), 'tensorflow.reshape', 'tf.reshape', (['concat_tensor'], {'shape': '[-1, h, w, perm_len * c]'}), '(concat_tensor, shape=[-1, h, w, perm_len * c])\n', (8497, 8544), True, 'import tensorflow as tf\n'), ((11177, 11236), 'preprocess.get_preprocess_fn', 'preprocess.get_preprocess_fn', (['patch_preprocess', 'is_training'], {}), '(patch_preprocess, is_training)\n', (11205, 11236), False, 'import preprocess\n'), ((3337, 3395), 'tensorflow.pad', 'tf.pad', (['images', '[[0, 0], [0, 0], padding, padding, [0, 0]]'], {}), '(images, [[0, 0], [0, 0], padding, padding, [0, 0]])\n', (3343, 3395), True, 'import tensorflow as tf\n'), ((4157, 4184), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""module"""'], {}), "('module')\n", (4174, 4184), True, 'import tensorflow as tf\n'), ((4567, 4618), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': 'input_shape', 'dtype': 'tf.float32'}), '(shape=input_shape, dtype=tf.float32)\n', (4581, 4618), True, 'import tensorflow as tf\n'), ((6275, 6318), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', (['(0.0)', '(0.005)'], {}), '(0.0, 0.005)\n', (6306, 6318), True, 'import tensorflow as tf\n'), ((6343, 6371), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (6366, 6371), True, 'import tensorflow as tf\n'), ((6756, 6799), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', (['(0.0)', '(0.005)'], {}), '(0.0, 0.005)\n', (6787, 6799), True, 'import tensorflow as tf\n'), ((6824, 6846), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (6844, 6846), True, 'import tensorflow as tf\n'), ((7380, 7402), 'struct.unpack', 'struct.unpack', (['"""<l"""', 's'], {}), "('<l', s)\n", (7393, 7402), False, 'import struct\n'), ((7584, 7597), 'numpy.min', 'np.min', (['perms'], {}), '(perms)\n', (7590, 7597), True, 'import numpy as np\n'), ((7607, 7620), 'numpy.max', 'np.max', (['perms'], {}), '(perms)\n', (7613, 7620), True, 'import numpy as np\n'), ((12263, 12300), 'tensorflow.transpose', 'tf.transpose', (['t'], {'perm': '[0, 2, 3, 4, 1]'}), '(t, perm=[0, 2, 3, 4, 1])\n', (12275, 12300), True, 'import tensorflow as tf\n'), ((12302, 12329), 'tensorflow.stack', 'tf.stack', (['[-1, h, w, p * c]'], {}), '([-1, h, w, p * c])\n', (12310, 12329), True, 'import tensorflow as tf\n'), ((12436, 12460), 'tensorflow.reduce_max', 'tf.reduce_max', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (12449, 12460), True, 'import tensorflow as tf\n'), ((12567, 12592), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (12581, 12592), True, 'import tensorflow as tf\n'), ((5654, 5680), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (5663, 5680), True, 'import tensorflow as tf\n')] |
from numpy.core.numeric import identity
from .model import Model
from ..util.metrics import mse
import numpy as np
class LinearRegression(Model):
def __init__(self, gd=False, epochs=1000, lr=0.001):
"""Linear regression Model
epochs: number of epochs
lr: learning rate for GD
"""
super(LinearRegression, self).__init__()
self.gd = gd
self.theta = None
self.epochs = epochs
self.lr = lr
def fit(self, dataset):
X, Y = dataset.getXy()
# add the x with only 1 that corresponds to the independent term
X = np.hstack(
(np.ones((X.shape[0], 1)), X))
self.X = X
self.Y = Y
# Closed form or GD
self.train_gd(X, Y) if self.gd else self.train_closed(X, Y) # implement closed train form (see notes)
self.is_fitted = True
def cost(self):
y_pred = np.dot(self.X, self.theta)
return mse(self.Y, y_pred) / 2
def train_closed(self, X, Y):
"""Uses closed form linear algebra to fit the model.
theta=inv(XT*X)*XT*y
"""
self.theta = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y)
def train_gd(self, X, Y):
m = X.shape[0]
n = X.shape[1]
self.history = {}
self.theta = np.zeros(n)
for epoch in range(self.epochs):
grad = 1 / m * (X.dot(self.theta) - Y).dot(X) # gradient by definition
self.theta -= self.lr * grad
self.history[epoch] = [self.theta[:], self.cost()]
def predict(self, X):
assert self.is_fitted, "Model must be fit before predicting"
_x = np.hstack(([1], X))
return np.dot(self.theta, _x)
class LinearRegressionReg(LinearRegression):
def __init__(self, gd=False, epochs=100, lr=0.01, lbd=1):
"""Linear regression model with L2 regularization
:param bool gd: if True uses gradient descent (GD) to train the model
otherwise closed form lineal algebra. Default False.
:param int epochs: Number of epochs for GD
:param int lr: Learning rate for GD
:param float lbd: lambda for the regularization"""
super(LinearRegressionReg, self).__init__()
self.gd = gd
self.epochs = epochs
self.lr = lr
self.lbd = lbd
self.theta = None
def train_closed(self, X, Y):
"""Use closed form linear algebra to fit the model.
theta=inv(XT*X+lbd*I')*XT*y"""
n = X.shape[1]
identity = np.eye(n)
identity[0, 0] = 0
self.theta = np.linalg.inv(X.T.dot(X) + self.lbd * identity).dot(X.T).dot(Y)
self.is_fitted = True
def train_gd(self, X, Y):
"""Uses gradient descent to fit the model"""
m = X.shape[0]
n = X.shape[1]
self.history = {}
self.theta = np.zeros(n)
lbds = np.full(m, self.lbd)
lbds[0] = 0 # so that theta(0) is excluded from regularization form
for epoch in range(self.epochs):
grad = (X.dot(self.theta) - Y).dot(X) # gradient by definition
self.theta -= (self.lr / m) * (lbds + grad)
self.history[epoch] = [self.theta[:], self.cost()]
def predict(self, X):
assert self.is_fitted, "Model must be fit before predicting"
_x = np.hstack(([1], X))
return np.dot(self.theta, _x)
def cost(self):
y_pred = np.dot(self.X, self.theta)
return mse(self.Y, y_pred) / 2
| [
"numpy.full",
"numpy.eye",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.dot"
] | [((913, 939), 'numpy.dot', 'np.dot', (['self.X', 'self.theta'], {}), '(self.X, self.theta)\n', (919, 939), True, 'import numpy as np\n'), ((1305, 1316), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1313, 1316), True, 'import numpy as np\n'), ((1657, 1676), 'numpy.hstack', 'np.hstack', (['([1], X)'], {}), '(([1], X))\n', (1666, 1676), True, 'import numpy as np\n'), ((1692, 1714), 'numpy.dot', 'np.dot', (['self.theta', '_x'], {}), '(self.theta, _x)\n', (1698, 1714), True, 'import numpy as np\n'), ((2544, 2553), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2550, 2553), True, 'import numpy as np\n'), ((2876, 2887), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2884, 2887), True, 'import numpy as np\n'), ((2904, 2924), 'numpy.full', 'np.full', (['m', 'self.lbd'], {}), '(m, self.lbd)\n', (2911, 2924), True, 'import numpy as np\n'), ((3349, 3368), 'numpy.hstack', 'np.hstack', (['([1], X)'], {}), '(([1], X))\n', (3358, 3368), True, 'import numpy as np\n'), ((3384, 3406), 'numpy.dot', 'np.dot', (['self.theta', '_x'], {}), '(self.theta, _x)\n', (3390, 3406), True, 'import numpy as np\n'), ((3445, 3471), 'numpy.dot', 'np.dot', (['self.X', 'self.theta'], {}), '(self.X, self.theta)\n', (3451, 3471), True, 'import numpy as np\n'), ((636, 660), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (643, 660), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.distributions import Categorical, Normal
import rl_sandbox.constants as c
class RLAgent():
def __init__(self, model, learning_algorithm):
self.model = model
self.learning_algorithm = learning_algorithm
def update(self, curr_obs, curr_h_state, action, reward, done, info, next_obs, next_h_state):
return self.learning_algorithm.update(curr_obs,
curr_h_state,
action,
reward,
done,
info,
next_obs,
next_h_state)
def compute_action(self, obs, **kwargs):
raise NotImplementedError
def reset(self):
# Returns initial hidden state
if hasattr(self.model, c.INITIALIZE_HIDDEN_STATE):
return self.model.initialize_hidden_state().numpy().astype(np.float32)
return np.array([np.nan], dtype=np.float32)
class ACAgent(RLAgent):
def __init__(self, model, learning_algorithm, preprocess=lambda obs: obs):
super().__init__(model=model,
learning_algorithm=learning_algorithm)
self.preprocess = preprocess
def preprocess(self, obs):
return obs
def compute_action(self, obs, hidden_state):
obs = torch.tensor(obs).unsqueeze(0)
obs = self.preprocess(obs)
hidden_state = torch.tensor(hidden_state).unsqueeze(0)
action, value, hidden_state, log_prob, entropy, mean, variance = self.model.compute_action(
obs, hidden_state)
act_info = {c.VALUE: value,
c.LOG_PROB: log_prob,
c.ENTROPY: entropy,
c.MEAN: mean,
c.VARIANCE: variance}
return action, hidden_state, act_info
def deterministic_action(self, obs, hidden_state):
obs = torch.tensor(obs).unsqueeze(0)
obs = self.preprocess(obs)
hidden_state = torch.tensor(hidden_state).unsqueeze(0)
action, value, hidden_state, log_prob, entropy = self.model.deterministic_action(
obs, hidden_state)
act_info = {c.VALUE: value,
c.LOG_PROB: log_prob,
c.ENTROPY: entropy}
return action, hidden_state, act_info
| [
"numpy.array",
"torch.tensor"
] | [((1110, 1146), 'numpy.array', 'np.array', (['[np.nan]'], {'dtype': 'np.float32'}), '([np.nan], dtype=np.float32)\n', (1118, 1146), True, 'import numpy as np\n'), ((1506, 1523), 'torch.tensor', 'torch.tensor', (['obs'], {}), '(obs)\n', (1518, 1523), False, 'import torch\n'), ((1595, 1621), 'torch.tensor', 'torch.tensor', (['hidden_state'], {}), '(hidden_state)\n', (1607, 1621), False, 'import torch\n'), ((2076, 2093), 'torch.tensor', 'torch.tensor', (['obs'], {}), '(obs)\n', (2088, 2093), False, 'import torch\n'), ((2165, 2191), 'torch.tensor', 'torch.tensor', (['hidden_state'], {}), '(hidden_state)\n', (2177, 2191), False, 'import torch\n')] |
from torch.utils.data import Dataset
from mol_tree import MolTree
import numpy as np
class MoleculeDataset(Dataset):
def __init__(self, data_file):
with open(data_file) as f:
self.data = [line.strip("\r\n ").split()[0] for line in f]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
#Convert from smiles string to mol_tree, which contains self.mol and self.smiles
#as fields. It also has a set of nodes that I believe are the cliques. There is
#a lot of extra computation if we don't use the junction tree representation.
smiles = self.data[idx]
mol_tree = MolTree(smiles)
mol_tree.recover()
mol_tree.assemble()
return mol_tree
class PropDataset(Dataset):
def __init__(self, data_file, prop_file):
self.prop_data = np.loadtxt(prop_file)
with open(data_file) as f:
self.data = [line.strip("\r\n ").split()[0] for line in f]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
smiles = self.data[idx]
mol_tree = MolTree(smiles)
mol_tree.recover()
mol_tree.assemble()
return mol_tree, self.prop_data[idx] | [
"numpy.loadtxt",
"mol_tree.MolTree"
] | [((665, 680), 'mol_tree.MolTree', 'MolTree', (['smiles'], {}), '(smiles)\n', (672, 680), False, 'from mol_tree import MolTree\n'), ((861, 882), 'numpy.loadtxt', 'np.loadtxt', (['prop_file'], {}), '(prop_file)\n', (871, 882), True, 'import numpy as np\n'), ((1131, 1146), 'mol_tree.MolTree', 'MolTree', (['smiles'], {}), '(smiles)\n', (1138, 1146), False, 'from mol_tree import MolTree\n')] |
import numpy as np
import math
from scipy.special import comb
def pwm(x, n=4):
r"""Return a list with the n first probability weighted moments (:math:`b_r`).
.. math::
b_r = \frac{\sum_{i=1}^{n_s} x_i {i \choose r}}{n_s {n_s - 1\choose r}}
where:
:math:`n_s` --- size of the sample *x*
See, for example, `<NAME> (2014) <http://file.scirp.org/Html/1-1720182_49981.htm>`_ (eq. 17)
:param x: sample values
:type x: list or numpy.array
:param n: number of returned probability weighted moments (:math:`b_r`)
:type n:
:return: (*list*) probability weighted moments (:math:`b_r`)
"""
x = np.sort(x)
ns = len(x)
return [sum([comb(i, r) * x[i] for i in range(ns)]) / (ns * comb(ns - 1, r)) for r in range(n)]
def lmoments(x, n=4, ratio=True, lcv=False):
r"""Return a list with the n first L-moments of the sample x.
.. math::
\lambda_{r + 1} = \sum_{k=0}^{r} (-1)^{r - k} {r \choose k} {r + k \choose k} b_k
with:
:math:`0 \leq r \leq n - 1`
where:
:math:`b_k` --- first probability weighted moments (see :func:`pwm`)
See, for example, `<NAME> (2014) <http://file.scirp.org/Html/1-1720182_49981.htm>`_ (eq. 26)
If ratio is True, replace :math:`\lambda_r` with :math:`\lambda_r/\lambda_2` for :math:`r \geq 3`, where
:math:`\lambda_3/\lambda_2` is the L-skewness and :math:`\lambda_4/\lambda_2` is the L-kurtosis.
If lcv is True, replace :math:`\lambda_2` with the coefficient of L-variation :math:`\lambda_2/\lambda_1`.
For a non-negative random variable, this lies in the interval (0,1) and is identical to the Gini coefficient
(see https://en.wikipedia.org/wiki/L-moment).
:param x: sample values
:type x: list or numpy.array
:param n: number of returned probability weighted moments (:math:`b_r`)
:type n: int
:param ratio: if True, replace :math:`\lambda_r` with :math:`\lambda_r/\lambda_2` for :math:`r \geq 3`.
Default :math:`ratio = True`
:type ratio: bool
:param lcv: if True, replace :math:`\lambda_2` with :math:`\lambda_2/\lambda_1`
:type lcv: bool
:return: (*list*) L-moments of the sample x
"""
b = pwm(x, n)
result = [sum([(-1)**(r-k) * comb(r, k) * comb(r + k, k) * b[k] for k in range(r+1)]) for r in range(n)]
if ratio:
result[2:] = [r / result[1] for r in result[2:]]
if lcv:
result[1] /= result[0]
return result
def lmoments_parameter_estimation_generalized_logistic(lambda1, lambda2, tau):
"""Return the location, scale and shape or the generalized logistic distribution
Based on SUBROUTINE PELGLO of the LMOMENTS Fortran package version 3.04, July 2005
:param lambda1: L-moment-1
:param lambda2: L-moment-2
:param tau: L-moment-3 / L-moment-2
:return: (*float*) location, scale and shape
"""
assert lambda2 > 0 and -1 < -tau < 1
try:
k = -tau
a = math.sin(k * math.pi) / (k * math.pi)
s = lambda2 * a
m = lambda1 - (s / k) * (1.0 - 1.0 / a)
return m, s, k
except ZeroDivisionError:
return lambda1, lambda2, 0.0
def lmoments_parameter_estimation_gamma(lambda1, lambda2):
"""Return the location and scale of the gamma distribution.
Based on SUBROUTINE PELGAM of the LMOMENTS Fortran package version 3.04, July 2005
:param lambda1: L-moment-1 (:math:`\lambda_1`)
:type lambda1: float
:param lambda2: L-moment-2 (:math:`\lambda_2`)
:type lambda12: float
:return: (*float*) location and scale
"""
if lambda1 <= lambda2 or lambda2 <= 0.0:
return None, None
cv = lambda2 / lambda1
if cv >= 0.5:
t = 1.0 - cv
alpha = t * (0.7213 - t * 0.5947) / (1.0 + t * (-2.1817 + t * 1.2113))
else:
t = math.pi * cv * cv
alpha = (1.0 - 0.3080 * t) / (t * (1.0 + t * (-0.05812 + t * 0.01765)))
return alpha, lambda1/alpha
def genloglogistic_cdf(x, loc, scale, shape):
"""Return the cumulative distribution function of the generalized logistic distribution
Based on SUBROUTINE CDFGLO of the LMOMENTS Fortran package version 3.04, July 2005
:param x: sample values
:type x: numpy.array
:param loc: location parameter (:math:`\mu`)
:type loc: float
:param scale: scale parameter (:math:`\sigma` > 0)
:type scale: float
:param shape: shape parameter (:math:`\kappa`)
:type shape: float
:return: (*numpy.array*) cdf
"""
x = (x - loc) / scale
try:
shape = round(shape, 15)
x = np.log(1 - shape * x) / shape
return 1.0 / (1 + np.exp(x))
except ZeroDivisionError:
return 1.0 / (1 + np.exp(-x))
| [
"numpy.log",
"scipy.special.comb",
"math.sin",
"numpy.sort",
"numpy.exp"
] | [((654, 664), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (661, 664), True, 'import numpy as np\n'), ((2959, 2980), 'math.sin', 'math.sin', (['(k * math.pi)'], {}), '(k * math.pi)\n', (2967, 2980), False, 'import math\n'), ((4571, 4592), 'numpy.log', 'np.log', (['(1 - shape * x)'], {}), '(1 - shape * x)\n', (4577, 4592), True, 'import numpy as np\n'), ((745, 760), 'scipy.special.comb', 'comb', (['(ns - 1)', 'r'], {}), '(ns - 1, r)\n', (749, 760), False, 'from scipy.special import comb\n'), ((4627, 4636), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (4633, 4636), True, 'import numpy as np\n'), ((4694, 4704), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (4700, 4704), True, 'import numpy as np\n'), ((698, 708), 'scipy.special.comb', 'comb', (['i', 'r'], {}), '(i, r)\n', (702, 708), False, 'from scipy.special import comb\n'), ((2269, 2283), 'scipy.special.comb', 'comb', (['(r + k)', 'k'], {}), '(r + k, k)\n', (2273, 2283), False, 'from scipy.special import comb\n'), ((2256, 2266), 'scipy.special.comb', 'comb', (['r', 'k'], {}), '(r, k)\n', (2260, 2266), False, 'from scipy.special import comb\n')] |
import tempfile
import numpy as np
import pytest
from openff.evaluator import unit
from openff.evaluator.backends import ComputeResources
from openff.evaluator.protocols.reweighting import (
ConcatenateObservables,
ConcatenateTrajectories,
ReweightDielectricConstant,
ReweightObservable,
)
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils import get_data_filename
from openff.evaluator.utils.observables import ObservableArray, ObservableFrame
def test_concatenate_trajectories():
import mdtraj
coordinate_path = get_data_filename("test/trajectories/water.pdb")
trajectory_path = get_data_filename("test/trajectories/water.dcd")
original_trajectory = mdtraj.load(trajectory_path, top=coordinate_path)
with tempfile.TemporaryDirectory() as temporary_directory:
concatenate_protocol = ConcatenateTrajectories("concatenate_protocol")
concatenate_protocol.input_coordinate_paths = [coordinate_path, coordinate_path]
concatenate_protocol.input_trajectory_paths = [trajectory_path, trajectory_path]
concatenate_protocol.execute(temporary_directory, ComputeResources())
final_trajectory = mdtraj.load(
concatenate_protocol.output_trajectory_path, top=coordinate_path
)
assert len(final_trajectory) == len(original_trajectory) * 2
@pytest.mark.parametrize(
"observables",
[
[ObservableArray(value=np.zeros((2, 3)) * unit.kelvin)],
[ObservableArray(value=np.zeros((2, 3)) * unit.kelvin)] * 2,
[
ObservableFrame(
{"Temperature": ObservableArray(value=np.zeros((2, 3)) * unit.kelvin)}
)
],
[
ObservableFrame(
{"Temperature": ObservableArray(value=np.zeros((2, 3)) * unit.kelvin)}
)
]
* 2,
],
)
def test_concatenate_observables(observables):
concatenate_protocol = ConcatenateObservables("")
concatenate_protocol.input_observables = observables
concatenate_protocol.execute()
assert len(concatenate_protocol.output_observables) == 2 * len(observables)
def test_reweight_observables():
with tempfile.TemporaryDirectory() as directory:
reweight_protocol = ReweightObservable("")
reweight_protocol.observable = ObservableArray(value=np.zeros(10) * unit.kelvin)
reweight_protocol.reference_reduced_potentials = [
ObservableArray(value=np.zeros(10) * unit.dimensionless)
]
reweight_protocol.frame_counts = [10]
reweight_protocol.target_reduced_potentials = ObservableArray(
value=np.zeros(10) * unit.dimensionless
)
reweight_protocol.bootstrap_uncertainties = True
reweight_protocol.required_effective_samples = 0
reweight_protocol.execute(directory, ComputeResources())
def test_reweight_dielectric_constant():
with tempfile.TemporaryDirectory() as directory:
reweight_protocol = ReweightDielectricConstant("")
reweight_protocol.dipole_moments = ObservableArray(
value=np.zeros((10, 3)) * unit.elementary_charge * unit.nanometers
)
reweight_protocol.volumes = ObservableArray(
value=np.ones((10, 1)) * unit.nanometer ** 3
)
reweight_protocol.reference_reduced_potentials = [
ObservableArray(value=np.zeros(10) * unit.dimensionless)
]
reweight_protocol.target_reduced_potentials = ObservableArray(
value=np.zeros(10) * unit.dimensionless
)
reweight_protocol.thermodynamic_state = ThermodynamicState(
298.15 * unit.kelvin, 1.0 * unit.atmosphere
)
reweight_protocol.frame_counts = [10]
reweight_protocol.bootstrap_uncertainties = True
reweight_protocol.required_effective_samples = 0
reweight_protocol.execute(directory, ComputeResources())
| [
"tempfile.TemporaryDirectory",
"openff.evaluator.protocols.reweighting.ConcatenateObservables",
"openff.evaluator.utils.get_data_filename",
"numpy.zeros",
"numpy.ones",
"openff.evaluator.backends.ComputeResources",
"mdtraj.load",
"openff.evaluator.thermodynamics.ThermodynamicState",
"openff.evaluato... | [((585, 633), 'openff.evaluator.utils.get_data_filename', 'get_data_filename', (['"""test/trajectories/water.pdb"""'], {}), "('test/trajectories/water.pdb')\n", (602, 633), False, 'from openff.evaluator.utils import get_data_filename\n'), ((656, 704), 'openff.evaluator.utils.get_data_filename', 'get_data_filename', (['"""test/trajectories/water.dcd"""'], {}), "('test/trajectories/water.dcd')\n", (673, 704), False, 'from openff.evaluator.utils import get_data_filename\n'), ((732, 781), 'mdtraj.load', 'mdtraj.load', (['trajectory_path'], {'top': 'coordinate_path'}), '(trajectory_path, top=coordinate_path)\n', (743, 781), False, 'import mdtraj\n'), ((1964, 1990), 'openff.evaluator.protocols.reweighting.ConcatenateObservables', 'ConcatenateObservables', (['""""""'], {}), "('')\n", (1986, 1990), False, 'from openff.evaluator.protocols.reweighting import ConcatenateObservables, ConcatenateTrajectories, ReweightDielectricConstant, ReweightObservable\n'), ((792, 821), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (819, 821), False, 'import tempfile\n'), ((878, 925), 'openff.evaluator.protocols.reweighting.ConcatenateTrajectories', 'ConcatenateTrajectories', (['"""concatenate_protocol"""'], {}), "('concatenate_protocol')\n", (901, 925), False, 'from openff.evaluator.protocols.reweighting import ConcatenateObservables, ConcatenateTrajectories, ReweightDielectricConstant, ReweightObservable\n'), ((1210, 1287), 'mdtraj.load', 'mdtraj.load', (['concatenate_protocol.output_trajectory_path'], {'top': 'coordinate_path'}), '(concatenate_protocol.output_trajectory_path, top=coordinate_path)\n', (1221, 1287), False, 'import mdtraj\n'), ((2209, 2238), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2236, 2238), False, 'import tempfile\n'), ((2282, 2304), 'openff.evaluator.protocols.reweighting.ReweightObservable', 'ReweightObservable', (['""""""'], {}), "('')\n", (2300, 2304), False, 'from openff.evaluator.protocols.reweighting import ConcatenateObservables, ConcatenateTrajectories, ReweightDielectricConstant, ReweightObservable\n'), ((2943, 2972), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2970, 2972), False, 'import tempfile\n'), ((3016, 3046), 'openff.evaluator.protocols.reweighting.ReweightDielectricConstant', 'ReweightDielectricConstant', (['""""""'], {}), "('')\n", (3042, 3046), False, 'from openff.evaluator.protocols.reweighting import ConcatenateObservables, ConcatenateTrajectories, ReweightDielectricConstant, ReweightObservable\n'), ((3635, 3698), 'openff.evaluator.thermodynamics.ThermodynamicState', 'ThermodynamicState', (['(298.15 * unit.kelvin)', '(1.0 * unit.atmosphere)'], {}), '(298.15 * unit.kelvin, 1.0 * unit.atmosphere)\n', (3653, 3698), False, 'from openff.evaluator.thermodynamics import ThermodynamicState\n'), ((1162, 1180), 'openff.evaluator.backends.ComputeResources', 'ComputeResources', ([], {}), '()\n', (1178, 1180), False, 'from openff.evaluator.backends import ComputeResources\n'), ((2870, 2888), 'openff.evaluator.backends.ComputeResources', 'ComputeResources', ([], {}), '()\n', (2886, 2888), False, 'from openff.evaluator.backends import ComputeResources\n'), ((3926, 3944), 'openff.evaluator.backends.ComputeResources', 'ComputeResources', ([], {}), '()\n', (3942, 3944), False, 'from openff.evaluator.backends import ComputeResources\n'), ((2366, 2378), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (2374, 2378), True, 'import numpy as np\n'), ((2667, 2679), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (2675, 2679), True, 'import numpy as np\n'), ((3267, 3283), 'numpy.ones', 'np.ones', (['(10, 1)'], {}), '((10, 1))\n', (3274, 3283), True, 'import numpy as np\n'), ((3543, 3555), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (3551, 3555), True, 'import numpy as np\n'), ((1463, 1479), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (1471, 1479), True, 'import numpy as np\n'), ((2487, 2499), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (2495, 2499), True, 'import numpy as np\n'), ((3125, 3142), 'numpy.zeros', 'np.zeros', (['(10, 3)'], {}), '((10, 3))\n', (3133, 3142), True, 'import numpy as np\n'), ((3409, 3421), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (3417, 3421), True, 'import numpy as np\n'), ((1528, 1544), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (1536, 1544), True, 'import numpy as np\n'), ((1659, 1675), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (1667, 1675), True, 'import numpy as np\n'), ((1810, 1826), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (1818, 1826), True, 'import numpy as np\n')] |
from sklearn.preprocessing import scale
import numpy as np
def preprocess(X):
"""
R(N*M)
:param X:
:return:
"""
X_average = X.mean(axis=0)
X = X - X_average
# sklearn does'nt make the variance to 1, cs229 suggest to do that.
# that a difference
# std_sigma = X.std(axis=0)
# X = X / std_sigma
X_scale = scale(X, axis=0)
# assert (X == X_scale).all(), 'preprocess error'
return X
def pca_with_svd(X, n_dimensions=2):
X = preprocess(X)
u, d, v_t = np.linalg.svd(X)
result_vector = v_t[:n_dimensions, :]
reduced_x_self = np.dot(X, result_vector.T)
return reduced_x_self
def pca(X, n_dimensions=2, algorithm='default'):
"""
:param n_dimensions: the dimension of subspace of original space
:param algorithm: can use default and svd method
:return:
"""
X = preprocess(X)
if algorithm == 'svd':
return pca_with_svd(X, n_dimensions)
dimensions = X.shape[1]
n_samples = X.shape[0]
Sigma = np.zeros((dimensions, dimensions))
for index in range(n_samples):
co_occurrence = np.dot(X[index, :][:, None], X[index, :][None, :])
Sigma += np.cov(co_occurrence)
Sigma = Sigma/n_samples
# eigenvalues can be duplicated
eigenvalues, eigenvectors = np.linalg.eig(Sigma)
eig_vals_sorted = np.sort(eigenvalues)
eig_vecs_sorted = eigenvectors[:, eigenvalues.argsort()]
result_eigen_vectors = eig_vecs_sorted[:, -n_dimensions:]
reduced_x_self = np.flip(np.dot(X, result_eigen_vectors))
return reduced_x_self | [
"sklearn.preprocessing.scale",
"numpy.zeros",
"numpy.linalg.eig",
"numpy.sort",
"numpy.linalg.svd",
"numpy.dot",
"numpy.cov"
] | [((358, 374), 'sklearn.preprocessing.scale', 'scale', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (363, 374), False, 'from sklearn.preprocessing import scale\n'), ((520, 536), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {}), '(X)\n', (533, 536), True, 'import numpy as np\n'), ((601, 627), 'numpy.dot', 'np.dot', (['X', 'result_vector.T'], {}), '(X, result_vector.T)\n', (607, 627), True, 'import numpy as np\n'), ((1021, 1055), 'numpy.zeros', 'np.zeros', (['(dimensions, dimensions)'], {}), '((dimensions, dimensions))\n', (1029, 1055), True, 'import numpy as np\n'), ((1304, 1324), 'numpy.linalg.eig', 'np.linalg.eig', (['Sigma'], {}), '(Sigma)\n', (1317, 1324), True, 'import numpy as np\n'), ((1348, 1368), 'numpy.sort', 'np.sort', (['eigenvalues'], {}), '(eigenvalues)\n', (1355, 1368), True, 'import numpy as np\n'), ((1116, 1166), 'numpy.dot', 'np.dot', (['X[index, :][:, None]', 'X[index, :][None, :]'], {}), '(X[index, :][:, None], X[index, :][None, :])\n', (1122, 1166), True, 'import numpy as np\n'), ((1184, 1205), 'numpy.cov', 'np.cov', (['co_occurrence'], {}), '(co_occurrence)\n', (1190, 1205), True, 'import numpy as np\n'), ((1523, 1554), 'numpy.dot', 'np.dot', (['X', 'result_eigen_vectors'], {}), '(X, result_eigen_vectors)\n', (1529, 1554), True, 'import numpy as np\n')] |
import os
import sqlite3 as db
import datetime
import socket
import numpy as np
import healpy as hp
import pandas as pd
import matplotlib.path as mplPath
from rubin_sim.utils import _hpid2RaDec, xyz_angular_radius, _buildTree, _xyz_from_ra_dec
from rubin_sim.site_models import FieldsDatabase
import rubin_sim
def smallest_signed_angle(a1, a2):
"""
via https://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles"""
TwoPi = 2.*np.pi
x = a1 % TwoPi
y = a2 % TwoPi
a = (x - y) % TwoPi
b = (y - x) % TwoPi
result = b+0
alb = np.where(a < b)[0]
result[alb] = -1.*a[alb]
return result
class int_rounded(object):
"""
Class to help force comparisons be made on scaled up integers,
preventing machine precision issues cross-platforms
Parameters
----------
inval : number-like thing
Some number that we want to compare
scale : float (1e5)
How much to scale inval before rounding and converting to an int.
"""
def __init__(self, inval, scale=1e5):
self.initial = inval
self.value = np.round(inval * scale).astype(int)
self.scale = scale
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __lt__(self, other):
return self.value < other.value
def __le__(self, other):
return self.value <= other.value
def __gt__(self, other):
return self.value > other.value
def __ge__(self, other):
return self.value >= other.value
def __repr__(self):
return str(self.initial)
def __add__(self, other):
out_scale = np.min([self.scale, other.scale])
result = int_rounded(self.initial + other.initial, scale=out_scale)
return result
def __sub__(self, other):
out_scale = np.min([self.scale, other.scale])
result = int_rounded(self.initial - other.initial, scale=out_scale)
return result
def __mul__(self, other):
out_scale = np.min([self.scale, other.scale])
result = int_rounded(self.initial * other.initial, scale=out_scale)
return result
def __div__(self, other):
out_scale = np.min([self.scale, other.scale])
result = int_rounded(self.initial / other.initial, scale=out_scale)
return result
def set_default_nside(nside=None):
"""
Utility function to set a default nside value across the scheduler.
XXX-there might be a better way to do this.
Parameters
----------
nside : int (None)
A valid healpixel nside.
"""
if not hasattr(set_default_nside, 'nside'):
if nside is None:
nside = 32
set_default_nside.nside = nside
if nside is not None:
set_default_nside.nside = nside
return set_default_nside.nside
def restore_scheduler(observationId, scheduler, observatory, filename, filter_sched=None):
"""Put the scheduler and observatory in the state they were in. Handy for checking reward fucnction
Parameters
----------
observationId : int
The ID of the last observation that should be completed
scheduler : rubin_sim.scheduler.scheduler object
Scheduler object.
observatory : rubin_sim.schedler.observatory.Model_observatory
The observaotry object
filename : str
The output sqlite dayabase to use
filter_sched : rubin_sim.scheduler.scheduler object
The filter scheduler. Note that we don't look up the official end of the previous night,
so there is potential for the loaded filters to not match.
"""
sc = schema_converter()
# load up the observations
observations = sc.opsim2obs(filename)
good_obs = np.where(observations['ID'] <= observationId)[0]
observations = observations[good_obs]
# replay the observations back into the scheduler
for obs in observations:
scheduler.add_observation(obs)
if filter_sched is not None:
filter_sched.add_observation(obs)
if filter_sched is not None:
# Make sure we have mounted the right filters for the night
# XXX--note, this might not be exact, but should work most of the time.
mjd_start_night = np.min(observations['mjd'][np.where(observations['night'] == obs['night'])])
observatory.mjd = mjd_start_night
conditions = observatory.return_conditions()
filters_needed = filter_sched(conditions)
else:
filters_needed = ['u', 'g', 'r', 'i', 'y']
# update the observatory
observatory.mjd = obs['mjd'] + observatory.observatory.visit_time(obs)/3600./24.
observatory.observatory.parked = False
observatory.observatory.current_RA_rad = obs['RA']
observatory.observatory.current_dec_rad = obs['dec']
observatory.observatory.current_rotSkyPos_rad = obs['rotSkyPos']
observatory.observatory.cumulative_azimuth_rad = obs['cummTelAz']
observatory.observatory.mounted_filters = filters_needed
# Note that we haven't updated last_az_rad, etc, but those values should be ignored.
return scheduler, observatory
def int_binned_stat(ids, values, statistic=np.mean):
"""
Like scipy.binned_statistic, but for unique int ids
"""
uids = np.unique(ids)
order = np.argsort(ids)
ordered_ids = ids[order]
ordered_values = values[order]
left = np.searchsorted(ordered_ids, uids, side='left')
right = np.searchsorted(ordered_ids, uids, side='right')
stat_results = []
for le, ri in zip(left, right):
stat_results.append(statistic(ordered_values[le:ri]))
return uids, np.array(stat_results)
def gnomonic_project_toxy(RA1, Dec1, RAcen, Deccen):
"""Calculate x/y projection of RA1/Dec1 in system with center at RAcen, Deccen.
Input radians. Grabbed from sims_selfcal"""
# also used in Global Telescope Network website
cosc = np.sin(Deccen) * np.sin(Dec1) + np.cos(Deccen) * np.cos(Dec1) * np.cos(RA1-RAcen)
x = np.cos(Dec1) * np.sin(RA1-RAcen) / cosc
y = (np.cos(Deccen)*np.sin(Dec1) - np.sin(Deccen)*np.cos(Dec1)*np.cos(RA1-RAcen)) / cosc
return x, y
def gnomonic_project_tosky(x, y, RAcen, Deccen):
"""Calculate RA/Dec on sky of object with x/y and RA/Cen of field of view.
Returns Ra/Dec in radians."""
denom = np.cos(Deccen) - y * np.sin(Deccen)
RA = RAcen + np.arctan2(x, denom)
Dec = np.arctan2(np.sin(Deccen) + y * np.cos(Deccen), np.sqrt(x*x + denom*denom))
return RA, Dec
def match_hp_resolution(in_map, nside_out, UNSEEN2nan=True):
"""Utility to convert healpix map resolution if needed and change hp.UNSEEN values to
np.nan.
Parameters
----------
in_map : np.array
A valie healpix map
nside_out : int
The desired resolution to convert in_map to
UNSEEN2nan : bool (True)
If True, convert any hp.UNSEEN values to np.nan
"""
current_nside = hp.npix2nside(np.size(in_map))
if current_nside != nside_out:
out_map = hp.ud_grade(in_map, nside_out=nside_out)
else:
out_map = in_map
if UNSEEN2nan:
out_map[np.where(out_map == hp.UNSEEN)] = np.nan
return out_map
def raster_sort(x0, order=['x', 'y'], xbin=1.):
"""XXXX--depriciated, use tsp instead.
Do a sort to scan a grid up and down. Simple starting guess to traveling salesman.
Parameters
----------
x0 : array
order : list
Keys for the order x0 should be sorted in.
xbin : float (1.)
The binsize to round off the first coordinate into
returns
-------
array sorted so that it rasters up and down.
"""
coords = x0.copy()
bins = np.arange(coords[order[0]].min()-xbin/2., coords[order[0]].max()+3.*xbin/2., xbin)
# digitize my bins
coords[order[0]] = np.digitize(coords[order[0]], bins)
order1 = np.argsort(coords, order=order)
coords = coords[order1]
places_to_invert = np.where(np.diff(coords[order[-1]]) < 0)[0]
if np.size(places_to_invert) > 0:
places_to_invert += 1
indx = np.arange(coords.size)
index_sorted = np.zeros(indx.size, dtype=int)
index_sorted[0:places_to_invert[0]] = indx[0:places_to_invert[0]]
for i, inv_pt in enumerate(places_to_invert[:-1]):
if i % 2 == 0:
index_sorted[inv_pt:places_to_invert[i+1]] = indx[inv_pt:places_to_invert[i+1]][::-1]
else:
index_sorted[inv_pt:places_to_invert[i+1]] = indx[inv_pt:places_to_invert[i+1]]
if np.size(places_to_invert) % 2 != 0:
index_sorted[places_to_invert[-1]:] = indx[places_to_invert[-1]:][::-1]
else:
index_sorted[places_to_invert[-1]:] = indx[places_to_invert[-1]:]
return order1[index_sorted]
else:
return order1
class schema_converter(object):
"""
Record how to convert an observation array to the standard opsim schema
"""
def __init__(self):
# Conversion dictionary, keys are opsim schema, values are observation dtype names
self.convert_dict = {'observationId': 'ID', 'night': 'night',
'observationStartMJD': 'mjd',
'observationStartLST': 'lmst', 'numExposures': 'nexp',
'visitTime': 'visittime', 'visitExposureTime': 'exptime',
'proposalId': 'survey_id', 'fieldId': 'field_id',
'fieldRA': 'RA', 'fieldDec': 'dec', 'altitude': 'alt', 'azimuth': 'az',
'filter': 'filter', 'airmass': 'airmass', 'skyBrightness': 'skybrightness',
'cloud': 'clouds', 'seeingFwhm500': 'FWHM_500',
'seeingFwhmGeom': 'FWHM_geometric', 'seeingFwhmEff': 'FWHMeff',
'fiveSigmaDepth': 'fivesigmadepth', 'slewTime': 'slewtime',
'slewDistance': 'slewdist', 'paraAngle': 'pa', 'rotTelPos': 'rotTelPos',
'rotSkyPos': 'rotSkyPos', 'moonRA': 'moonRA',
'moonDec': 'moonDec', 'moonAlt': 'moonAlt', 'moonAz': 'moonAz',
'moonDistance': 'moonDist', 'moonPhase': 'moonPhase',
'sunAlt': 'sunAlt', 'sunAz': 'sunAz', 'solarElong': 'solarElong', 'note':'note'}
# Column(s) not bothering to remap: 'observationStartTime': None,
self.inv_map = {v: k for k, v in self.convert_dict.items()}
# angles to converts
self.angles_rad2deg = ['fieldRA', 'fieldDec', 'altitude', 'azimuth', 'slewDistance',
'paraAngle', 'rotTelPos', 'rotSkyPos', 'moonRA', 'moonDec',
'moonAlt', 'moonAz', 'moonDistance', 'sunAlt', 'sunAz', 'solarElong',
'cummTelAz']
# Put LMST into degrees too
self.angles_hours2deg = ['observationStartLST']
def obs2opsim(self, obs_array, filename=None, info=None, delete_past=False):
"""convert an array of observations into a pandas dataframe with Opsim schema
"""
if delete_past:
try:
os.remove(filename)
except OSError:
pass
df = pd.DataFrame(obs_array)
df = df.rename(index=str, columns=self.inv_map)
for colname in self.angles_rad2deg:
df[colname] = np.degrees(df[colname])
for colname in self.angles_hours2deg:
df[colname] = df[colname] * 360./24.
if filename is not None:
con = db.connect(filename)
df.to_sql('observations', con, index=False)
if info is not None:
df = pd.DataFrame(info)
df.to_sql('info', con)
def opsim2obs(self, filename):
"""convert an opsim schema dataframe into an observation array.
"""
con = db.connect(filename)
df = pd.read_sql('select * from observations;', con)
for key in self.angles_rad2deg:
df[key] = np.radians(df[key])
for key in self.angles_hours2deg:
df[key] = df[key] * 24./360.
df = df.rename(index=str, columns=self.convert_dict)
blank = empty_observation()
final_result = np.empty(df.shape[0], dtype=blank.dtype)
# XXX-ugh, there has to be a better way.
for i, key in enumerate(df.columns):
if key in self.inv_map.keys():
final_result[key] = df[key].values
return final_result
def empty_observation():
"""Return a numpy array that could be a handy observation record
XXX: Should this really be "empty visit"? Should we have "visits" made
up of multple "observations" to support multi-exposure time visits?
XXX-Could add a bool flag for "observed". Then easy to track all proposed
observations. Could also add an mjd_min, mjd_max for when an observation should be observed.
That way we could drop things into the queue for DD fields.
XXX--might be nice to add a generic "sched_note" str field, to record any metadata that
would be useful to the scheduler once it's observed. and/or observationID.
Returns
-------
numpy array
The numpy fields have the following structure
RA : float
The Right Acension of the observation (center of the field) (Radians)
dec : float
Declination of the observation (Radians)
mjd : float
Modified Julian Date at the start of the observation (time shutter opens)
exptime : float
Total exposure time of the visit (seconds)
filter : str
The filter used. Should be one of u, g, r, i, z, y.
rotSkyPos : float
The rotation angle of the camera relative to the sky E of N (Radians)
nexp : int
Number of exposures in the visit.
airmass : float
Airmass at the center of the field
FWHMeff : float
The effective seeing FWHM at the center of the field. (arcsec)
skybrightness : float
The surface brightness of the sky background at the center of the
field. (mag/sq arcsec)
night : int
The night number of the observation (days)
flush_by_mjd : float
If we hit this MJD, we should flush the queue and refill it.
cummTelAz : float
The cummulative telescope rotation in azimuth
"""
names = ['ID', 'RA', 'dec', 'mjd', 'flush_by_mjd', 'exptime', 'filter', 'rotSkyPos', 'nexp',
'airmass', 'FWHM_500', 'FWHMeff', 'FWHM_geometric', 'skybrightness', 'night',
'slewtime', 'visittime', 'slewdist', 'fivesigmadepth',
'alt', 'az', 'pa', 'clouds', 'moonAlt', 'sunAlt', 'note',
'field_id', 'survey_id', 'block_id',
'lmst', 'rotTelPos', 'moonAz', 'sunAz', 'sunRA', 'sunDec', 'moonRA', 'moonDec',
'moonDist', 'solarElong', 'moonPhase', 'cummTelAz']
types = [int, float, float, float, float, float, 'U1', float, int,
float, float, float, float, float, int,
float, float, float, float,
float, float, float, float, float, float, 'U40',
int, int, int,
float, float, float, float, float, float, float, float,
float, float, float, float]
result = np.zeros(1, dtype=list(zip(names, types)))
return result
def scheduled_observation():
"""Make an array for pre-scheduling observations
mjd_tol : float
The tolerance on how early an observation can execute (days).
"""
# Standard things from the usual observations
names = ['ID', 'RA', 'dec', 'mjd', 'flush_by_mjd', 'exptime', 'filter', 'rotSkyPos', 'nexp',
'note']
types = [int, float, float, float, float, float, 'U1', float, float, 'U40']
names += ['mjd_tol', 'dist_tol', 'alt_min', 'alt_max', 'HA_max', 'HA_min', 'observed']
types += [float, float, float, float, float, float, bool]
result = np.zeros(1, dtype=list(zip(names, types)))
return result
def read_fields():
"""Read in the Field coordinates
Returns
-------
fields : `numpy.array`
With RA and dec in radians.
"""
query = 'select fieldId, fieldRA, fieldDEC from Field;'
fd = FieldsDatabase()
fields = np.array(list(fd.get_field_set(query)))
# order by field ID
fields = fields[fields[:,0].argsort()]
names = ['RA', 'dec']
types = [float, float]
result = np.zeros(np.size(fields[:, 1]), dtype=list(zip(names, types)))
result['RA'] = np.radians(fields[:, 1])
result['dec'] = np.radians(fields[:, 2])
return result
def hp_kd_tree(nside=None, leafsize=100, scale=1e5):
"""
Generate a KD-tree of healpixel locations
Parameters
----------
nside : int
A valid healpix nside
leafsize : int (100)
Leafsize of the kdtree
Returns
-------
tree : scipy kdtree
"""
if nside is None:
nside = set_default_nside()
hpid = np.arange(hp.nside2npix(nside))
ra, dec = _hpid2RaDec(nside, hpid)
return _buildTree(ra, dec, leafsize, scale=scale)
class hp_in_lsst_fov(object):
"""
Return the healpixels within a pointing. A very simple LSST camera model with
no chip/raft gaps.
"""
def __init__(self, nside=None, fov_radius=1.75, scale=1e5):
"""
Parameters
----------
fov_radius : float (1.75)
Radius of the filed of view in degrees
"""
if nside is None:
nside = set_default_nside()
self.tree = hp_kd_tree(nside=nside, scale=scale)
self.radius = np.round(xyz_angular_radius(fov_radius)*scale).astype(int)
self.scale = scale
def __call__(self, ra, dec, **kwargs):
"""
Parameters
----------
ra : float
RA in radians
dec : float
Dec in radians
Returns
-------
indx : numpy array
The healpixels that are within the FoV
"""
x, y, z = _xyz_from_ra_dec(np.max(ra), np.max(dec))
x = np.round(x * self.scale).astype(int)
y = np.round(y * self.scale).astype(int)
z = np.round(z * self.scale).astype(int)
indices = self.tree.query_ball_point((x, y, z), self.radius)
return np.array(indices)
class hp_in_comcam_fov(object):
"""
Return the healpixels within a ComCam pointing. Simple camera model
with no chip gaps.
"""
def __init__(self, nside=None, side_length=0.7):
"""
Parameters
----------
side_length : float (0.7)
The length of one side of the square field of view (degrees).
"""
if nside is None:
nside = set_default_nside()
self.nside = nside
self.tree = hp_kd_tree(nside=nside)
self.side_length = np.radians(side_length)
self.inner_radius = xyz_angular_radius(side_length/2.)
self.outter_radius = xyz_angular_radius(side_length/2.*np.sqrt(2.))
# The positions of the raft corners, unrotated
self.corners_x = np.array([-self.side_length/2., -self.side_length/2., self.side_length/2.,
self.side_length/2.])
self.corners_y = np.array([self.side_length/2., -self.side_length/2., -self.side_length/2.,
self.side_length/2.])
def __call__(self, ra, dec, rotSkyPos=0.):
"""
Parameters
----------
ra : float
RA in radians
dec : float
Dec in radians
rotSkyPos : float
The rotation angle of the camera in radians
Returns
-------
indx : numpy array
The healpixels that are within the FoV
"""
x, y, z = _xyz_from_ra_dec(np.max(ra), np.max(dec))
# Healpixels within the inner circle
indices = self.tree.query_ball_point((x, y, z), self.inner_radius)
# Healpixels withing the outer circle
indices_all = np.array(self.tree.query_ball_point((x, y, z), self.outter_radius))
indices_to_check = indices_all[np.in1d(indices_all, indices, invert=True)]
cos_rot = np.cos(rotSkyPos)
sin_rot = np.sin(rotSkyPos)
x_rotated = self.corners_x*cos_rot - self.corners_y*sin_rot
y_rotated = self.corners_x*sin_rot + self.corners_y*cos_rot
# Draw the square that we want to check if points are in.
bbPath = mplPath.Path(np.array([[x_rotated[0], y_rotated[0]],
[x_rotated[1], y_rotated[1]],
[x_rotated[2], y_rotated[2]],
[x_rotated[3], y_rotated[3]],
[x_rotated[0], y_rotated[0]]]))
ra_to_check, dec_to_check = _hpid2RaDec(self.nside, indices_to_check)
# Project the indices to check to the tangent plane, see if they fall inside the polygon
x, y = gnomonic_project_toxy(ra_to_check, dec_to_check, ra, dec)
for i, xcheck in enumerate(x):
# I wonder if I can do this all at once rather than a loop?
if bbPath.contains_point((x[i], y[i])):
indices.append(indices_to_check[i])
return np.array(indices)
def run_info_table(observatory, extra_info=None):
"""
Make a little table for recording the information about a run
"""
observatory_info = observatory.get_info()
if extra_info is not None:
for key in extra_info:
observatory_info.append([key, extra_info[key]])
observatory_info = np.array(observatory_info)
n_feature_entries = 3
names = ['Parameter', 'Value']
dtypes = ['|U200', '|U200']
result = np.zeros(observatory_info[:, 0].size + n_feature_entries,
dtype=list(zip(names, dtypes)))
# Fill in info about the run
result[0]['Parameter'] = 'Date, ymd'
now = datetime.datetime.now()
result[0]['Value'] = '%i, %i, %i' % (now.year, now.month, now.day)
result[1]['Parameter'] = 'hostname'
result[1]['Value'] = socket.gethostname()
result[2]['Parameter'] = 'rubin_sim.__version__'
result[2]['Value'] = rubin_sim.__version__
result[3:]['Parameter'] = observatory_info[:, 0]
result[3:]['Value'] = observatory_info[:, 1]
return result
def inrange(inval, minimum=-1., maximum=1.):
"""
Make sure values are within min/max
"""
inval = np.array(inval)
below = np.where(inval < minimum)
inval[below] = minimum
above = np.where(inval > maximum)
inval[above] = maximum
return inval
def warm_start(scheduler, observations, mjd_key='mjd'):
"""Replay a list of observations into the scheduler
Parameters
----------
scheduler : scheduler object
observations : np.array
An array of observation (e.g., from sqlite2observations)
"""
# Check that observations are in order
observations.sort(order=mjd_key)
for observation in observations:
scheduler.add_observation(observation)
return scheduler
def season_calc(night, offset=0, modulo=None, max_season=None, season_length=365.25, floor=True):
"""
Compute what season a night is in with possible offset and modulo
using convention that night -365 to 0 is season -1.
Parameters
----------
night : int or array
The night we want to convert to a season
offset : float or array (0)
Offset to be applied to night (days)
modulo : int (None)
If the season should be modulated (i.e., so we can get all even years)
(seasons, years w/default season_length)
max_season : int (None)
For any season above this value (before modulo), set to -1
season_length : float (365.25)
How long to consider one season (nights)
floor : bool (True)
If true, take the floor of the season. Otherwise, returns season as a float
"""
if np.size(night) == 1:
night = np.ravel(np.array([night]))
result = night + offset
result = result/season_length
if floor:
result = np.floor(result)
if max_season is not None:
over_indx = np.where(int_rounded(result) >= int_rounded(max_season))
if modulo is not None:
neg = np.where(int_rounded(result) < int_rounded(0))
result = result % modulo
result[neg] = -1
if max_season is not None:
result[over_indx] = -1
if floor:
result = result.astype(int)
return result
def create_season_offset(nside, sun_RA_rad):
"""
Make an offset map so seasons roll properly
"""
hpindx = np.arange(hp.nside2npix(nside))
ra, dec = _hpid2RaDec(nside, hpindx)
offset = ra - sun_RA_rad + 2.*np.pi
offset = offset % (np.pi*2)
offset = offset * 365.25/(np.pi*2)
offset = -offset - 365.25
return offset
class TargetoO(object):
"""Class to hold information about a target of opportunity object
Parameters
----------
tooid : int
Unique ID for the ToO.
footprints : np.array
np.array healpix maps. 1 for areas to observe, 0 for no observe.
mjd_start : float
The MJD the ToO starts
duration : float
Duration of the ToO (days).
"""
def __init__(self, tooid, footprint, mjd_start, duration):
self.footprint = footprint
self.duration = duration
self.id = tooid
self.mjd_start = mjd_start
class Sim_targetoO_server(object):
"""Wrapper to deliver a targetoO object at the right time
"""
def __init__(self, targetoO_list):
self.targetoO_list = targetoO_list
self.mjd_starts = np.array([too.mjd_start for too in self.targetoO_list])
durations = np.array([too.duration for too in self.targetoO_list])
self.mjd_ends = self.mjd_starts + durations
def __call__(self, mjd):
in_range = np.where((mjd > self.mjd_starts) & (mjd < self.mjd_ends))[0]
result = None
if in_range.size > 0:
result = [self.targetoO_list[i] for i in in_range]
return result
| [
"rubin_sim.utils.xyz_angular_radius",
"os.remove",
"numpy.arctan2",
"numpy.empty",
"rubin_sim.utils._hpid2RaDec",
"numpy.floor",
"healpy.ud_grade",
"numpy.argsort",
"numpy.sin",
"numpy.arange",
"numpy.round",
"numpy.unique",
"pandas.DataFrame",
"numpy.degrees",
"socket.gethostname",
"n... | [((5308, 5322), 'numpy.unique', 'np.unique', (['ids'], {}), '(ids)\n', (5317, 5322), True, 'import numpy as np\n'), ((5335, 5350), 'numpy.argsort', 'np.argsort', (['ids'], {}), '(ids)\n', (5345, 5350), True, 'import numpy as np\n'), ((5428, 5475), 'numpy.searchsorted', 'np.searchsorted', (['ordered_ids', 'uids'], {'side': '"""left"""'}), "(ordered_ids, uids, side='left')\n", (5443, 5475), True, 'import numpy as np\n'), ((5488, 5536), 'numpy.searchsorted', 'np.searchsorted', (['ordered_ids', 'uids'], {'side': '"""right"""'}), "(ordered_ids, uids, side='right')\n", (5503, 5536), True, 'import numpy as np\n'), ((7850, 7885), 'numpy.digitize', 'np.digitize', (['coords[order[0]]', 'bins'], {}), '(coords[order[0]], bins)\n', (7861, 7885), True, 'import numpy as np\n'), ((7899, 7930), 'numpy.argsort', 'np.argsort', (['coords'], {'order': 'order'}), '(coords, order=order)\n', (7909, 7930), True, 'import numpy as np\n'), ((16284, 16300), 'rubin_sim.site_models.FieldsDatabase', 'FieldsDatabase', ([], {}), '()\n', (16298, 16300), False, 'from rubin_sim.site_models import FieldsDatabase\n'), ((16570, 16594), 'numpy.radians', 'np.radians', (['fields[:, 1]'], {}), '(fields[:, 1])\n', (16580, 16594), True, 'import numpy as np\n'), ((16615, 16639), 'numpy.radians', 'np.radians', (['fields[:, 2]'], {}), '(fields[:, 2])\n', (16625, 16639), True, 'import numpy as np\n'), ((17074, 17098), 'rubin_sim.utils._hpid2RaDec', '_hpid2RaDec', (['nside', 'hpid'], {}), '(nside, hpid)\n', (17085, 17098), False, 'from rubin_sim.utils import _hpid2RaDec, xyz_angular_radius, _buildTree, _xyz_from_ra_dec\n'), ((17110, 17152), 'rubin_sim.utils._buildTree', '_buildTree', (['ra', 'dec', 'leafsize'], {'scale': 'scale'}), '(ra, dec, leafsize, scale=scale)\n', (17120, 17152), False, 'from rubin_sim.utils import _hpid2RaDec, xyz_angular_radius, _buildTree, _xyz_from_ra_dec\n'), ((21673, 21699), 'numpy.array', 'np.array', (['observatory_info'], {}), '(observatory_info)\n', (21681, 21699), True, 'import numpy as np\n'), ((22005, 22028), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22026, 22028), False, 'import datetime\n'), ((22166, 22186), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (22184, 22186), False, 'import socket\n'), ((22525, 22540), 'numpy.array', 'np.array', (['inval'], {}), '(inval)\n', (22533, 22540), True, 'import numpy as np\n'), ((22553, 22578), 'numpy.where', 'np.where', (['(inval < minimum)'], {}), '(inval < minimum)\n', (22561, 22578), True, 'import numpy as np\n'), ((22618, 22643), 'numpy.where', 'np.where', (['(inval > maximum)'], {}), '(inval > maximum)\n', (22626, 22643), True, 'import numpy as np\n'), ((24754, 24780), 'rubin_sim.utils._hpid2RaDec', '_hpid2RaDec', (['nside', 'hpindx'], {}), '(nside, hpindx)\n', (24765, 24780), False, 'from rubin_sim.utils import _hpid2RaDec, xyz_angular_radius, _buildTree, _xyz_from_ra_dec\n'), ((585, 600), 'numpy.where', 'np.where', (['(a < b)'], {}), '(a < b)\n', (593, 600), True, 'import numpy as np\n'), ((1709, 1742), 'numpy.min', 'np.min', (['[self.scale, other.scale]'], {}), '([self.scale, other.scale])\n', (1715, 1742), True, 'import numpy as np\n'), ((1892, 1925), 'numpy.min', 'np.min', (['[self.scale, other.scale]'], {}), '([self.scale, other.scale])\n', (1898, 1925), True, 'import numpy as np\n'), ((2075, 2108), 'numpy.min', 'np.min', (['[self.scale, other.scale]'], {}), '([self.scale, other.scale])\n', (2081, 2108), True, 'import numpy as np\n'), ((2258, 2291), 'numpy.min', 'np.min', (['[self.scale, other.scale]'], {}), '([self.scale, other.scale])\n', (2264, 2291), True, 'import numpy as np\n'), ((3787, 3832), 'numpy.where', 'np.where', (["(observations['ID'] <= observationId)"], {}), "(observations['ID'] <= observationId)\n", (3795, 3832), True, 'import numpy as np\n'), ((5676, 5698), 'numpy.array', 'np.array', (['stat_results'], {}), '(stat_results)\n', (5684, 5698), True, 'import numpy as np\n'), ((6364, 6378), 'numpy.cos', 'np.cos', (['Deccen'], {}), '(Deccen)\n', (6370, 6378), True, 'import numpy as np\n'), ((6417, 6437), 'numpy.arctan2', 'np.arctan2', (['x', 'denom'], {}), '(x, denom)\n', (6427, 6437), True, 'import numpy as np\n'), ((6496, 6526), 'numpy.sqrt', 'np.sqrt', (['(x * x + denom * denom)'], {}), '(x * x + denom * denom)\n', (6503, 6526), True, 'import numpy as np\n'), ((6988, 7003), 'numpy.size', 'np.size', (['in_map'], {}), '(in_map)\n', (6995, 7003), True, 'import numpy as np\n'), ((7058, 7098), 'healpy.ud_grade', 'hp.ud_grade', (['in_map'], {'nside_out': 'nside_out'}), '(in_map, nside_out=nside_out)\n', (7069, 7098), True, 'import healpy as hp\n'), ((8033, 8058), 'numpy.size', 'np.size', (['places_to_invert'], {}), '(places_to_invert)\n', (8040, 8058), True, 'import numpy as np\n'), ((8109, 8131), 'numpy.arange', 'np.arange', (['coords.size'], {}), '(coords.size)\n', (8118, 8131), True, 'import numpy as np\n'), ((8155, 8185), 'numpy.zeros', 'np.zeros', (['indx.size'], {'dtype': 'int'}), '(indx.size, dtype=int)\n', (8163, 8185), True, 'import numpy as np\n'), ((11316, 11339), 'pandas.DataFrame', 'pd.DataFrame', (['obs_array'], {}), '(obs_array)\n', (11328, 11339), True, 'import pandas as pd\n'), ((11961, 11981), 'sqlite3.connect', 'db.connect', (['filename'], {}), '(filename)\n', (11971, 11981), True, 'import sqlite3 as db\n'), ((11995, 12042), 'pandas.read_sql', 'pd.read_sql', (['"""select * from observations;"""', 'con'], {}), "('select * from observations;', con)\n", (12006, 12042), True, 'import pandas as pd\n'), ((12330, 12370), 'numpy.empty', 'np.empty', (['df.shape[0]'], {'dtype': 'blank.dtype'}), '(df.shape[0], dtype=blank.dtype)\n', (12338, 12370), True, 'import numpy as np\n'), ((16497, 16518), 'numpy.size', 'np.size', (['fields[:, 1]'], {}), '(fields[:, 1])\n', (16504, 16518), True, 'import numpy as np\n'), ((17038, 17058), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (17051, 17058), True, 'import healpy as hp\n'), ((18351, 18368), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (18359, 18368), True, 'import numpy as np\n'), ((18901, 18924), 'numpy.radians', 'np.radians', (['side_length'], {}), '(side_length)\n', (18911, 18924), True, 'import numpy as np\n'), ((18953, 18990), 'rubin_sim.utils.xyz_angular_radius', 'xyz_angular_radius', (['(side_length / 2.0)'], {}), '(side_length / 2.0)\n', (18971, 18990), False, 'from rubin_sim.utils import _hpid2RaDec, xyz_angular_radius, _buildTree, _xyz_from_ra_dec\n'), ((19144, 19257), 'numpy.array', 'np.array', (['[-self.side_length / 2.0, -self.side_length / 2.0, self.side_length / 2.0, \n self.side_length / 2.0]'], {}), '([-self.side_length / 2.0, -self.side_length / 2.0, self.\n side_length / 2.0, self.side_length / 2.0])\n', (19152, 19257), True, 'import numpy as np\n'), ((19300, 19413), 'numpy.array', 'np.array', (['[self.side_length / 2.0, -self.side_length / 2.0, -self.side_length / 2.0, \n self.side_length / 2.0]'], {}), '([self.side_length / 2.0, -self.side_length / 2.0, -self.\n side_length / 2.0, self.side_length / 2.0])\n', (19308, 19413), True, 'import numpy as np\n'), ((20243, 20260), 'numpy.cos', 'np.cos', (['rotSkyPos'], {}), '(rotSkyPos)\n', (20249, 20260), True, 'import numpy as np\n'), ((20279, 20296), 'numpy.sin', 'np.sin', (['rotSkyPos'], {}), '(rotSkyPos)\n', (20285, 20296), True, 'import numpy as np\n'), ((20885, 20926), 'rubin_sim.utils._hpid2RaDec', '_hpid2RaDec', (['self.nside', 'indices_to_check'], {}), '(self.nside, indices_to_check)\n', (20896, 20926), False, 'from rubin_sim.utils import _hpid2RaDec, xyz_angular_radius, _buildTree, _xyz_from_ra_dec\n'), ((21329, 21346), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (21337, 21346), True, 'import numpy as np\n'), ((24024, 24038), 'numpy.size', 'np.size', (['night'], {}), '(night)\n', (24031, 24038), True, 'import numpy as np\n'), ((24182, 24198), 'numpy.floor', 'np.floor', (['result'], {}), '(result)\n', (24190, 24198), True, 'import numpy as np\n'), ((24718, 24738), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (24731, 24738), True, 'import healpy as hp\n'), ((25736, 25791), 'numpy.array', 'np.array', (['[too.mjd_start for too in self.targetoO_list]'], {}), '([too.mjd_start for too in self.targetoO_list])\n', (25744, 25791), True, 'import numpy as np\n'), ((25812, 25866), 'numpy.array', 'np.array', (['[too.duration for too in self.targetoO_list]'], {}), '([too.duration for too in self.targetoO_list])\n', (25820, 25866), True, 'import numpy as np\n'), ((5949, 5963), 'numpy.sin', 'np.sin', (['Deccen'], {}), '(Deccen)\n', (5955, 5963), True, 'import numpy as np\n'), ((5966, 5978), 'numpy.sin', 'np.sin', (['Dec1'], {}), '(Dec1)\n', (5972, 5978), True, 'import numpy as np\n'), ((6013, 6032), 'numpy.cos', 'np.cos', (['(RA1 - RAcen)'], {}), '(RA1 - RAcen)\n', (6019, 6032), True, 'import numpy as np\n'), ((6039, 6051), 'numpy.cos', 'np.cos', (['Dec1'], {}), '(Dec1)\n', (6045, 6051), True, 'import numpy as np\n'), ((6054, 6073), 'numpy.sin', 'np.sin', (['(RA1 - RAcen)'], {}), '(RA1 - RAcen)\n', (6060, 6073), True, 'import numpy as np\n'), ((6385, 6399), 'numpy.sin', 'np.sin', (['Deccen'], {}), '(Deccen)\n', (6391, 6399), True, 'import numpy as np\n'), ((6459, 6473), 'numpy.sin', 'np.sin', (['Deccen'], {}), '(Deccen)\n', (6465, 6473), True, 'import numpy as np\n'), ((7169, 7199), 'numpy.where', 'np.where', (['(out_map == hp.UNSEEN)'], {}), '(out_map == hp.UNSEEN)\n', (7177, 7199), True, 'import numpy as np\n'), ((11466, 11489), 'numpy.degrees', 'np.degrees', (['df[colname]'], {}), '(df[colname])\n', (11476, 11489), True, 'import numpy as np\n'), ((11637, 11657), 'sqlite3.connect', 'db.connect', (['filename'], {}), '(filename)\n', (11647, 11657), True, 'import sqlite3 as db\n'), ((12105, 12124), 'numpy.radians', 'np.radians', (['df[key]'], {}), '(df[key])\n', (12115, 12124), True, 'import numpy as np\n'), ((18094, 18104), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (18100, 18104), True, 'import numpy as np\n'), ((18106, 18117), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (18112, 18117), True, 'import numpy as np\n'), ((19860, 19870), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (19866, 19870), True, 'import numpy as np\n'), ((19872, 19883), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (19878, 19883), True, 'import numpy as np\n'), ((20180, 20222), 'numpy.in1d', 'np.in1d', (['indices_all', 'indices'], {'invert': '(True)'}), '(indices_all, indices, invert=True)\n', (20187, 20222), True, 'import numpy as np\n'), ((20530, 20700), 'numpy.array', 'np.array', (['[[x_rotated[0], y_rotated[0]], [x_rotated[1], y_rotated[1]], [x_rotated[2],\n y_rotated[2]], [x_rotated[3], y_rotated[3]], [x_rotated[0], y_rotated[0]]]'], {}), '([[x_rotated[0], y_rotated[0]], [x_rotated[1], y_rotated[1]], [\n x_rotated[2], y_rotated[2]], [x_rotated[3], y_rotated[3]], [x_rotated[0\n ], y_rotated[0]]])\n', (20538, 20700), True, 'import numpy as np\n'), ((24070, 24087), 'numpy.array', 'np.array', (['[night]'], {}), '([night])\n', (24078, 24087), True, 'import numpy as np\n'), ((25968, 26025), 'numpy.where', 'np.where', (['((mjd > self.mjd_starts) & (mjd < self.mjd_ends))'], {}), '((mjd > self.mjd_starts) & (mjd < self.mjd_ends))\n', (25976, 26025), True, 'import numpy as np\n'), ((1113, 1136), 'numpy.round', 'np.round', (['(inval * scale)'], {}), '(inval * scale)\n', (1121, 1136), True, 'import numpy as np\n'), ((4319, 4366), 'numpy.where', 'np.where', (["(observations['night'] == obs['night'])"], {}), "(observations['night'] == obs['night'])\n", (4327, 4366), True, 'import numpy as np\n'), ((5981, 5995), 'numpy.cos', 'np.cos', (['Deccen'], {}), '(Deccen)\n', (5987, 5995), True, 'import numpy as np\n'), ((5998, 6010), 'numpy.cos', 'np.cos', (['Dec1'], {}), '(Dec1)\n', (6004, 6010), True, 'import numpy as np\n'), ((6088, 6102), 'numpy.cos', 'np.cos', (['Deccen'], {}), '(Deccen)\n', (6094, 6102), True, 'import numpy as np\n'), ((6103, 6115), 'numpy.sin', 'np.sin', (['Dec1'], {}), '(Dec1)\n', (6109, 6115), True, 'import numpy as np\n'), ((6146, 6165), 'numpy.cos', 'np.cos', (['(RA1 - RAcen)'], {}), '(RA1 - RAcen)\n', (6152, 6165), True, 'import numpy as np\n'), ((6480, 6494), 'numpy.cos', 'np.cos', (['Deccen'], {}), '(Deccen)\n', (6486, 6494), True, 'import numpy as np\n'), ((7991, 8017), 'numpy.diff', 'np.diff', (['coords[order[-1]]'], {}), '(coords[order[-1]])\n', (7998, 8017), True, 'import numpy as np\n'), ((8575, 8600), 'numpy.size', 'np.size', (['places_to_invert'], {}), '(places_to_invert)\n', (8582, 8600), True, 'import numpy as np\n'), ((11233, 11252), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (11242, 11252), False, 'import os\n'), ((11768, 11786), 'pandas.DataFrame', 'pd.DataFrame', (['info'], {}), '(info)\n', (11780, 11786), True, 'import pandas as pd\n'), ((18131, 18155), 'numpy.round', 'np.round', (['(x * self.scale)'], {}), '(x * self.scale)\n', (18139, 18155), True, 'import numpy as np\n'), ((18180, 18204), 'numpy.round', 'np.round', (['(y * self.scale)'], {}), '(y * self.scale)\n', (18188, 18204), True, 'import numpy as np\n'), ((18229, 18253), 'numpy.round', 'np.round', (['(z * self.scale)'], {}), '(z * self.scale)\n', (18237, 18253), True, 'import numpy as np\n'), ((19051, 19063), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (19058, 19063), True, 'import numpy as np\n'), ((6118, 6132), 'numpy.sin', 'np.sin', (['Deccen'], {}), '(Deccen)\n', (6124, 6132), True, 'import numpy as np\n'), ((6133, 6145), 'numpy.cos', 'np.cos', (['Dec1'], {}), '(Dec1)\n', (6139, 6145), True, 'import numpy as np\n'), ((17672, 17702), 'rubin_sim.utils.xyz_angular_radius', 'xyz_angular_radius', (['fov_radius'], {}), '(fov_radius)\n', (17690, 17702), False, 'from rubin_sim.utils import _hpid2RaDec, xyz_angular_radius, _buildTree, _xyz_from_ra_dec\n')] |
# A NADE that has Bernoullis for output distribution
from __future__ import division
from Model.Model import SizeParameter, TensorParameter
from NADE import NADE
from ParameterInitialiser import Gaussian
from Utils.Estimation import Estimation
from Utils.nnet import sigmoid, logsumexp
from Utils.theano_helpers import constantX, floatX
from itertools import izip
import numpy as np
import theano
import theano.tensor as T
class OrderlessBernoulliNADE(NADE):
def __init__(self, n_visible, n_hidden, n_layers, nonlinearity="RLU"):
NADE.__init__(self, n_visible, n_hidden, nonlinearity)
self.add_parameter(SizeParameter("n_layers"))
self.n_layers = n_layers
self.add_parameter(TensorParameter("Wflags", (n_visible, n_hidden), theano=True), optimise=True, regularise=True)
self.add_parameter(TensorParameter("W1", (n_visible, n_hidden), theano=True), optimise=True, regularise=True)
self.add_parameter(TensorParameter("b1", (n_hidden), theano=True), optimise=True, regularise=False)
if self.n_layers > 1:
self.add_parameter(TensorParameter("Ws", (n_layers, n_hidden, n_hidden), theano=True), optimise=True, regularise=True)
self.add_parameter(TensorParameter("bs", (n_layers, n_hidden), theano=True), optimise=True, regularise=False)
self.add_parameter(TensorParameter("V", (n_visible, n_hidden), theano=True), optimise=True, regularise=True)
self.add_parameter(TensorParameter("c", (n_visible), theano=True), optimise=True, regularise=False)
self.setup_n_orderings(1)
self.recompile()
@classmethod
def create_from_params(cls, params):
n_visible, n_hidden, n_layers = (params["n_visible"], params["n_hidden"], params["n_layers"])
model = cls(n_visible, n_hidden, n_layers, params["nonlinearity"])
model.set_parameters(params)
return model
@classmethod
def create_from_smaller_NADE(cls, small_NADE, add_n_hiddens=1, W_initialiser=Gaussian(std=0.01), marginal=None):
n_visible, n_hidden, n_layers, nonlinearity = (small_NADE.n_visible, small_NADE.n_hidden, small_NADE.n_layers, small_NADE.parameters["nonlinearity"].get_name())
model = cls(n_visible, n_hidden, n_layers + add_n_hiddens, nonlinearity)
# Copy first layer
model.Wflags.set_value(small_NADE.Wflags.get_value())
model.W1.set_value(small_NADE.W1.get_value())
model.b1.set_value(small_NADE.b1.get_value())
# Copy the hidden layers from the smaller NADE and initialise the rest
Ws = W_initialiser.get_tensor(model.Ws.get_value().shape)
bs = W_initialiser.get_tensor(model.bs.get_value().shape)
if n_layers > 1:
Ws[0:n_layers - 1, :, :] = small_NADE.Ws.get_value()[0:n_layers - 1, :, :]
bs[0:n_layers - 1, :] = small_NADE.bs.get_value()[0:n_layers - 1, :]
model.Ws.set_value(Ws)
model.bs.set_value(bs)
model.V.set_value(W_initialiser.get_tensor(model.V.get_value().shape))
if marginal is None:
model.c.set_value(small_NADE.c.get_value())
else:
model.c.set_value(-np.log((1 - marginal) / marginal).astype(floatX))
return model
def recompile(self):
x = T.matrix('x', dtype=floatX)
m = T.matrix('m', dtype=floatX)
logdensity = self.sym_mask_logdensity_estimator(x, m)
self.compiled_mask_logdensity_estimator = theano.function([x, m], logdensity, allow_input_downcast=True)
def setup_n_orderings(self, n=None, orderings=None):
assert(not (n is None and orderings is None))
self.orderings = list()
if orderings is not None:
self.orderings = orderings
self.n_orderings = len(orderings)
else:
self.n_orderings = n
from copy import copy
for _ in xrange(self.n_orderings):
o = range(self.n_visible)
np.random.shuffle(o)
self.orderings.append(copy(o))
def set_ordering(self, ordering):
self.setup_n_orderings(orderings=[ordering])
def initialize_parameters(self, marginal, W_initialiser=Gaussian(std=0.01)):
self.Wflags.set_value(W_initialiser.get_tensor(self.Wflags.get_value().shape))
self.W1.set_value(W_initialiser.get_tensor(self.W1.get_value().shape))
self.b1.set_value(W_initialiser.get_tensor(self.b1.get_value().shape))
if self.n_layers > 1:
self.Ws.set_value(W_initialiser.get_tensor(self.Ws.get_value().shape))
self.bs.set_value(W_initialiser.get_tensor(self.bs.get_value().shape))
self.V.set_value(W_initialiser.get_tensor(self.V.get_value().shape))
self.c.set_value(-np.log((1 - marginal) / marginal).astype(floatX))
def initialize_parameters_from_dataset(self, dataset, W_initialiser=Gaussian(std=0.01), sample_size=1000):
self.Wflags.set_value(W_initialiser.get_tensor(self.Wflags.get_value().shape))
self.W1.set_value(W_initialiser.get_tensor(self.W1.get_value().shape))
self.b1.set_value(W_initialiser.get_tensor(self.b1.get_value().shape))
if self.n_layers > 1:
self.Ws.set_value(W_initialiser.get_tensor(self.Ws.get_value().shape))
self.bs.set_value(W_initialiser.get_tensor(self.bs.get_value().shape))
self.V.set_value(W_initialiser.get_tensor(self.V.get_value().shape))
data_sample = dataset.sample_data(sample_size)[0].astype(floatX)
marginal = data_sample.mean(axis=0)
self.c.set_value(-np.log((1 - marginal) / marginal).astype(floatX))
def logdensity(self, x):
""" x is a matrix of column datapoints (VxB) V = n_visible, B = batch size """
B = x.shape[1]
nl = self.parameters["nonlinearity"].get_numpy_f()
lp = np.zeros((B, self.n_orderings))
W1 = self.W1.get_value()
b1 = self.b1.get_value()
Wflags = self.Wflags.get_value()
if self.n_layers > 1:
Ws = self.Ws.get_value()
bs = self.bs.get_value()
V = self.V.get_value()
c = self.c.get_value()
for o_index, o in enumerate(self.orderings):
a = np.zeros((B, self.n_hidden))
input_mask_contribution = np.zeros((B, self.n_hidden))
for j in xrange(self.n_visible):
i = o[j]
x_i = x[i]
h = nl(input_mask_contribution + a + b1)
for l in xrange(self.n_layers - 1):
h = nl(np.dot(h, Ws[l]) + bs[l])
t = np.dot(h, V[i]) + c[i]
p_xi_is_one = sigmoid(t) * 0.9999 + 0.0001 * 0.5
lp[:, o_index] += x_i * np.log(p_xi_is_one) + (1 - x_i) * np.log(1 - p_xi_is_one)
a += np.dot(x[i][:, np.newaxis], W1[i][np.newaxis, :])
input_mask_contribution += Wflags[i]
return logsumexp(lp + np.log(1 / self.n_orderings))
def estimate_average_loglikelihood_for_dataset_using_masks(self, x_dataset, masks_dataset, minibatch_size=20000, loops=1):
loglikelihood = 0.0
loglikelihood_sq = 0.0
n = 0
x_iterator = x_dataset.iterator(batch_size=minibatch_size, get_smaller_final_batch=True)
m_iterator = masks_dataset.iterator(batch_size=minibatch_size)
for _ in xrange(loops):
for x, m in izip(x_iterator, m_iterator):
x = x.T # VxB
batch_size = x.shape[1]
m = m.T[:, :batch_size]
n += batch_size
lls = self.compiled_mask_logdensity_estimator(x, m)
loglikelihood += np.sum(lls)
loglikelihood_sq += np.sum(lls ** 2)
return Estimation.sample_mean_from_sum_and_sum_sq(loglikelihood, loglikelihood_sq, n)
def sym_mask_logdensity_estimator(self, x, mask):
""" x is a matrix of column datapoints (DxB) D = n_visible, B = batch size """
# non_linearity_name = self.parameters["nonlinearity"].get_name()
# assert(non_linearity_name == "sigmoid" or non_linearity_name=="RLU")
x = x.T # BxD
mask = mask.T # BxD
output_mask = constantX(1) - mask # BxD
D = constantX(self.n_visible)
d = mask.sum(1) # d is the 1-based index of the dimension whose value to infer (not the size of the context)
masked_input = x * mask # BxD
h = self.nonlinearity(T.dot(masked_input, self.W1) + T.dot(mask, self.Wflags) + self.b1) # BxH
for l in xrange(self.n_layers - 1):
h = self.nonlinearity(T.dot(h, self.Ws[l]) + self.bs[l]) # BxH
t = T.dot(h, self.V.T) + self.c # BxD
p_x_is_one = T.nnet.sigmoid(t) * constantX(0.9999) + constantX(0.0001 * 0.5) # BxD
lp = ((x * T.log(p_x_is_one) + (constantX(1) - x) * T.log(constantX(1) - p_x_is_one)) * output_mask).sum(1) * D / (D - d) # B
return lp
def sym_masked_neg_loglikelihood_gradient(self, x, mask):
loglikelihood = self.sym_mask_logdensity_estimator(x, mask)
mean_loglikelihood = -loglikelihood.mean()
# Gradients
gradients = {}
for param in self.parameters_to_optimise:
gradients[param] = T.grad(mean_loglikelihood, self.__getattribute__(param))
return (mean_loglikelihood, gradients)
def sample(self, n=1):
W1 = self.W1.get_value()
b1 = self.b1.get_value()
Wflags = self.Wflags.get_value()
if self.n_layers > 1:
Ws = self.Ws.get_value()
bs = self.bs.get_value()
V = self.V.get_value()
c = self.c.get_value()
nl = self.parameters["nonlinearity"].get_numpy_f()
samples = np.zeros((n,self.n_visible))
for s in xrange(n):
# Sample an ordering
ordering = self.orderings[np.random.randint(len(self.orderings))]
a = np.zeros((1,self.n_hidden,)) # H
input_mask_contribution = np.zeros((self.n_hidden))
for j in xrange(self.n_visible):
i = ordering[j]
h = nl(input_mask_contribution + a + b1)
for l in xrange(self.n_layers - 1):
h = nl(np.dot(h, Ws[l]) + bs[l])
t = np.dot(h, V[i]) + c[i]
p_xi_is_one = sigmoid(t) * 0.9999 + 0.0001 * 0.5 # B
input_mask_contribution += Wflags[i]
a += np.dot(samples[s, i][np.newaxis, np.newaxis], W1[i][np.newaxis, :])
samples[s, i] = np.random.random() < p_xi_is_one
return samples
| [
"numpy.sum",
"Utils.Estimation.Estimation.sample_mean_from_sum_and_sum_sq",
"ParameterInitialiser.Gaussian",
"theano.tensor.nnet.sigmoid",
"theano.tensor.log",
"numpy.random.shuffle",
"theano.tensor.dot",
"Model.Model.TensorParameter",
"numpy.dot",
"NADE.NADE.__init__",
"theano.tensor.matrix",
... | [((544, 598), 'NADE.NADE.__init__', 'NADE.__init__', (['self', 'n_visible', 'n_hidden', 'nonlinearity'], {}), '(self, n_visible, n_hidden, nonlinearity)\n', (557, 598), False, 'from NADE import NADE\n'), ((1994, 2012), 'ParameterInitialiser.Gaussian', 'Gaussian', ([], {'std': '(0.01)'}), '(std=0.01)\n', (2002, 2012), False, 'from ParameterInitialiser import Gaussian\n'), ((3261, 3288), 'theano.tensor.matrix', 'T.matrix', (['"""x"""'], {'dtype': 'floatX'}), "('x', dtype=floatX)\n", (3269, 3288), True, 'import theano.tensor as T\n'), ((3301, 3328), 'theano.tensor.matrix', 'T.matrix', (['"""m"""'], {'dtype': 'floatX'}), "('m', dtype=floatX)\n", (3309, 3328), True, 'import theano.tensor as T\n'), ((3441, 3503), 'theano.function', 'theano.function', (['[x, m]', 'logdensity'], {'allow_input_downcast': '(True)'}), '([x, m], logdensity, allow_input_downcast=True)\n', (3456, 3503), False, 'import theano\n'), ((4174, 4192), 'ParameterInitialiser.Gaussian', 'Gaussian', ([], {'std': '(0.01)'}), '(std=0.01)\n', (4182, 4192), False, 'from ParameterInitialiser import Gaussian\n'), ((4862, 4880), 'ParameterInitialiser.Gaussian', 'Gaussian', ([], {'std': '(0.01)'}), '(std=0.01)\n', (4870, 4880), False, 'from ParameterInitialiser import Gaussian\n'), ((5824, 5855), 'numpy.zeros', 'np.zeros', (['(B, self.n_orderings)'], {}), '((B, self.n_orderings))\n', (5832, 5855), True, 'import numpy as np\n'), ((7724, 7802), 'Utils.Estimation.Estimation.sample_mean_from_sum_and_sum_sq', 'Estimation.sample_mean_from_sum_and_sum_sq', (['loglikelihood', 'loglikelihood_sq', 'n'], {}), '(loglikelihood, loglikelihood_sq, n)\n', (7766, 7802), False, 'from Utils.Estimation import Estimation\n'), ((8211, 8236), 'Utils.theano_helpers.constantX', 'constantX', (['self.n_visible'], {}), '(self.n_visible)\n', (8220, 8236), False, 'from Utils.theano_helpers import constantX, floatX\n'), ((9698, 9727), 'numpy.zeros', 'np.zeros', (['(n, self.n_visible)'], {}), '((n, self.n_visible))\n', (9706, 9727), True, 'import numpy as np\n'), ((626, 651), 'Model.Model.SizeParameter', 'SizeParameter', (['"""n_layers"""'], {}), "('n_layers')\n", (639, 651), False, 'from Model.Model import SizeParameter, TensorParameter\n'), ((713, 774), 'Model.Model.TensorParameter', 'TensorParameter', (['"""Wflags"""', '(n_visible, n_hidden)'], {'theano': '(True)'}), "('Wflags', (n_visible, n_hidden), theano=True)\n", (728, 774), False, 'from Model.Model import SizeParameter, TensorParameter\n'), ((835, 892), 'Model.Model.TensorParameter', 'TensorParameter', (['"""W1"""', '(n_visible, n_hidden)'], {'theano': '(True)'}), "('W1', (n_visible, n_hidden), theano=True)\n", (850, 892), False, 'from Model.Model import SizeParameter, TensorParameter\n'), ((953, 997), 'Model.Model.TensorParameter', 'TensorParameter', (['"""b1"""', 'n_hidden'], {'theano': '(True)'}), "('b1', n_hidden, theano=True)\n", (968, 997), False, 'from Model.Model import SizeParameter, TensorParameter\n'), ((1344, 1400), 'Model.Model.TensorParameter', 'TensorParameter', (['"""V"""', '(n_visible, n_hidden)'], {'theano': '(True)'}), "('V', (n_visible, n_hidden), theano=True)\n", (1359, 1400), False, 'from Model.Model import SizeParameter, TensorParameter\n'), ((1461, 1505), 'Model.Model.TensorParameter', 'TensorParameter', (['"""c"""', 'n_visible'], {'theano': '(True)'}), "('c', n_visible, theano=True)\n", (1476, 1505), False, 'from Model.Model import SizeParameter, TensorParameter\n'), ((6200, 6228), 'numpy.zeros', 'np.zeros', (['(B, self.n_hidden)'], {}), '((B, self.n_hidden))\n', (6208, 6228), True, 'import numpy as np\n'), ((6267, 6295), 'numpy.zeros', 'np.zeros', (['(B, self.n_hidden)'], {}), '((B, self.n_hidden))\n', (6275, 6295), True, 'import numpy as np\n'), ((7370, 7398), 'itertools.izip', 'izip', (['x_iterator', 'm_iterator'], {}), '(x_iterator, m_iterator)\n', (7374, 7398), False, 'from itertools import izip\n'), ((8172, 8184), 'Utils.theano_helpers.constantX', 'constantX', (['(1)'], {}), '(1)\n', (8181, 8184), False, 'from Utils.theano_helpers import constantX, floatX\n'), ((8630, 8648), 'theano.tensor.dot', 'T.dot', (['h', 'self.V.T'], {}), '(h, self.V.T)\n', (8635, 8648), True, 'import theano.tensor as T\n'), ((8726, 8749), 'Utils.theano_helpers.constantX', 'constantX', (['(0.0001 * 0.5)'], {}), '(0.0001 * 0.5)\n', (8735, 8749), False, 'from Utils.theano_helpers import constantX, floatX\n'), ((9882, 9910), 'numpy.zeros', 'np.zeros', (['(1, self.n_hidden)'], {}), '((1, self.n_hidden))\n', (9890, 9910), True, 'import numpy as np\n'), ((9954, 9977), 'numpy.zeros', 'np.zeros', (['self.n_hidden'], {}), '(self.n_hidden)\n', (9962, 9977), True, 'import numpy as np\n'), ((1095, 1161), 'Model.Model.TensorParameter', 'TensorParameter', (['"""Ws"""', '(n_layers, n_hidden, n_hidden)'], {'theano': '(True)'}), "('Ws', (n_layers, n_hidden, n_hidden), theano=True)\n", (1110, 1161), False, 'from Model.Model import SizeParameter, TensorParameter\n'), ((1226, 1282), 'Model.Model.TensorParameter', 'TensorParameter', (['"""bs"""', '(n_layers, n_hidden)'], {'theano': '(True)'}), "('bs', (n_layers, n_hidden), theano=True)\n", (1241, 1282), False, 'from Model.Model import SizeParameter, TensorParameter\n'), ((3953, 3973), 'numpy.random.shuffle', 'np.random.shuffle', (['o'], {}), '(o)\n', (3970, 3973), True, 'import numpy as np\n'), ((6782, 6831), 'numpy.dot', 'np.dot', (['x[i][:, np.newaxis]', 'W1[i][np.newaxis, :]'], {}), '(x[i][:, np.newaxis], W1[i][np.newaxis, :])\n', (6788, 6831), True, 'import numpy as np\n'), ((6915, 6943), 'numpy.log', 'np.log', (['(1 / self.n_orderings)'], {}), '(1 / self.n_orderings)\n', (6921, 6943), True, 'import numpy as np\n'), ((7644, 7655), 'numpy.sum', 'np.sum', (['lls'], {}), '(lls)\n', (7650, 7655), True, 'import numpy as np\n'), ((7692, 7708), 'numpy.sum', 'np.sum', (['(lls ** 2)'], {}), '(lls ** 2)\n', (7698, 7708), True, 'import numpy as np\n'), ((8686, 8703), 'theano.tensor.nnet.sigmoid', 'T.nnet.sigmoid', (['t'], {}), '(t)\n', (8700, 8703), True, 'import theano.tensor as T\n'), ((8706, 8723), 'Utils.theano_helpers.constantX', 'constantX', (['(0.9999)'], {}), '(0.9999)\n', (8715, 8723), False, 'from Utils.theano_helpers import constantX, floatX\n'), ((10406, 10473), 'numpy.dot', 'np.dot', (['samples[s, i][np.newaxis, np.newaxis]', 'W1[i][np.newaxis, :]'], {}), '(samples[s, i][np.newaxis, np.newaxis], W1[i][np.newaxis, :])\n', (10412, 10473), True, 'import numpy as np\n'), ((4012, 4019), 'copy.copy', 'copy', (['o'], {}), '(o)\n', (4016, 4019), False, 'from copy import copy\n'), ((6575, 6590), 'numpy.dot', 'np.dot', (['h', 'V[i]'], {}), '(h, V[i])\n', (6581, 6590), True, 'import numpy as np\n'), ((8424, 8452), 'theano.tensor.dot', 'T.dot', (['masked_input', 'self.W1'], {}), '(masked_input, self.W1)\n', (8429, 8452), True, 'import theano.tensor as T\n'), ((8455, 8479), 'theano.tensor.dot', 'T.dot', (['mask', 'self.Wflags'], {}), '(mask, self.Wflags)\n', (8460, 8479), True, 'import theano.tensor as T\n'), ((8576, 8596), 'theano.tensor.dot', 'T.dot', (['h', 'self.Ws[l]'], {}), '(h, self.Ws[l])\n', (8581, 8596), True, 'import theano.tensor as T\n'), ((10239, 10254), 'numpy.dot', 'np.dot', (['h', 'V[i]'], {}), '(h, V[i])\n', (10245, 10254), True, 'import numpy as np\n'), ((10506, 10524), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10522, 10524), True, 'import numpy as np\n'), ((4739, 4772), 'numpy.log', 'np.log', (['((1 - marginal) / marginal)'], {}), '((1 - marginal) / marginal)\n', (4745, 4772), True, 'import numpy as np\n'), ((5562, 5595), 'numpy.log', 'np.log', (['((1 - marginal) / marginal)'], {}), '((1 - marginal) / marginal)\n', (5568, 5595), True, 'import numpy as np\n'), ((6628, 6638), 'Utils.nnet.sigmoid', 'sigmoid', (['t'], {}), '(t)\n', (6635, 6638), False, 'from Utils.nnet import sigmoid, logsumexp\n'), ((6703, 6722), 'numpy.log', 'np.log', (['p_xi_is_one'], {}), '(p_xi_is_one)\n', (6709, 6722), True, 'import numpy as np\n'), ((6737, 6760), 'numpy.log', 'np.log', (['(1 - p_xi_is_one)'], {}), '(1 - p_xi_is_one)\n', (6743, 6760), True, 'import numpy as np\n'), ((10292, 10302), 'Utils.nnet.sigmoid', 'sigmoid', (['t'], {}), '(t)\n', (10299, 10302), False, 'from Utils.nnet import sigmoid, logsumexp\n'), ((3152, 3185), 'numpy.log', 'np.log', (['((1 - marginal) / marginal)'], {}), '((1 - marginal) / marginal)\n', (3158, 3185), True, 'import numpy as np\n'), ((6529, 6545), 'numpy.dot', 'np.dot', (['h', 'Ws[l]'], {}), '(h, Ws[l])\n', (6535, 6545), True, 'import numpy as np\n'), ((10193, 10209), 'numpy.dot', 'np.dot', (['h', 'Ws[l]'], {}), '(h, Ws[l])\n', (10199, 10209), True, 'import numpy as np\n'), ((8776, 8793), 'theano.tensor.log', 'T.log', (['p_x_is_one'], {}), '(p_x_is_one)\n', (8781, 8793), True, 'import theano.tensor as T\n'), ((8797, 8809), 'Utils.theano_helpers.constantX', 'constantX', (['(1)'], {}), '(1)\n', (8806, 8809), False, 'from Utils.theano_helpers import constantX, floatX\n'), ((8823, 8835), 'Utils.theano_helpers.constantX', 'constantX', (['(1)'], {}), '(1)\n', (8832, 8835), False, 'from Utils.theano_helpers import constantX, floatX\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 24 15:18:38 2019
@author: lrreid
Quick script to test the reading and plotting of the history data file
Plotting is now complete and moved to main script
"""
import numpy as np
import matplotlib.pyplot as plt
import datetime
from matplotlib.dates import DateFormatter
fsize = 14 # font size for axis lables, ticks and annotations
data = np.loadtxt(open("LATTE_Laser_Power_History_big.txt"), skiprows=1)
num2date = [datetime.datetime.strptime(str(int(data[k,0])), '%Y%m%d').date() for k in range(len(data[:,0]))]
dates = np.array(num2date, dtype='datetime64')
Regen_target = np.array([2.0, 2.0])
Regen_Ene = data[:,2]
PL1_Ene = np.transpose(np.array([data[:,3]*100])) # Puse energy in mJ for Powerlite 1
PL2_Ene = np.transpose(np.array([data[:,4]*100])) # Puse energy in mJ for Powerlite 2
Full_Ene = np.transpose(np.array([data[:,5]*100])) # Puse energy in mJ for Full power
if len(dates) > 30:
dates = dates[len(dates)-30:len(dates)]
Regen_Ene = Regen_Ene[len(dates)-30:len(dates)]
PL1_Ene = PL1_Ene[len(dates)-30:len(dates)]
PL2_Ene = PL2_Ene[len(dates)-30:len(dates)]
Full_Ene = Full_Ene[len(dates)-30:len(dates)]
D_max = np.amax(dates)+1 # Find first data point for min
D_min = np.amin(dates) # Find most recent data point for max
timespan = np.array([D_min, D_max])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,6), sharey=False)
plt.subplot(121)
plt.plot(dates,Regen_Ene,'b-s',label="Regen energy")
plt.plot(timespan, Regen_target, 'r--',label="Target")
plt.xticks(np.arange(min(dates)-5, max(dates)+5, step=5))
plt.yticks(np.arange(0, 4, step=0.5))
plt.axis([D_min, D_max, 1, 3]) # set axes [xmin, xmax, ymin, ymax]
plt.tick_params(labelsize=fsize)
ax1 = plt.gca() # Required for axis labels to appear
ax1.set_xlabel('Date', fontsize=fsize)
ax1.set_ylabel('Pulse energy (mJ)', fontsize=fsize)
plt.title('Regen energy', fontsize=fsize)
plt.grid(True)
fig.autofmt_xdate() # rotate and align the tick labels so they look better
ax1.xaxis.set_major_formatter(DateFormatter("%d/%m"))
plt.legend(bbox_to_anchor=(0.01, 0.20), loc='upper left', borderaxespad=0.)
plt.subplot(122)
plt.plot(dates,PL1_Ene,'b-s', label="Powerlite 1")
plt.plot(dates,PL2_Ene,'g-s', label="Powerlite 2")
plt.plot(dates,Full_Ene,'r-s', label="Full power")
plt.xticks(np.arange(min(dates)-5, max(dates)+5, step=5))
plt.yticks(np.arange(0, 1100, step=200))
plt.axis([D_min, D_max, 0, 800]) # set axes [xmin, xmax, ymin, ymax]
plt.tick_params(labelsize=fsize)
ax2 = plt.gca() # Required for axis labels to appear
ax2.set_xlabel('Date', fontsize=fsize)
ax2.set_ylabel('Pulse energy (mJ)', fontsize=fsize)
plt.title('Multipass energy', fontsize=fsize)
plt.grid(True)
fig.autofmt_xdate() # rotate and align the tick labels so they look better
ax2.xaxis.set_major_formatter(DateFormatter("%d/%m"))
plt.legend(bbox_to_anchor=(0.01, 0.40), loc='upper left', borderaxespad=0.) | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.amin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"numpy.amax",
"matplotlib.dates.DateFormatter",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tick_params",
"matp... | [((612, 650), 'numpy.array', 'np.array', (['num2date'], {'dtype': '"""datetime64"""'}), "(num2date, dtype='datetime64')\n", (620, 650), True, 'import numpy as np\n'), ((667, 687), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (675, 687), True, 'import numpy as np\n'), ((1328, 1342), 'numpy.amin', 'np.amin', (['dates'], {}), '(dates)\n', (1335, 1342), True, 'import numpy as np\n'), ((1398, 1422), 'numpy.array', 'np.array', (['[D_min, D_max]'], {}), '([D_min, D_max])\n', (1406, 1422), True, 'import numpy as np\n'), ((1443, 1492), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 6)', 'sharey': '(False)'}), '(1, 2, figsize=(12, 6), sharey=False)\n', (1455, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1492, 1508), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1503, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1509, 1564), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'Regen_Ene', '"""b-s"""'], {'label': '"""Regen energy"""'}), "(dates, Regen_Ene, 'b-s', label='Regen energy')\n", (1517, 1564), True, 'import matplotlib.pyplot as plt\n'), ((1562, 1617), 'matplotlib.pyplot.plot', 'plt.plot', (['timespan', 'Regen_target', '"""r--"""'], {'label': '"""Target"""'}), "(timespan, Regen_target, 'r--', label='Target')\n", (1570, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1743), 'matplotlib.pyplot.axis', 'plt.axis', (['[D_min, D_max, 1, 3]'], {}), '([D_min, D_max, 1, 3])\n', (1721, 1743), True, 'import matplotlib.pyplot as plt\n'), ((1788, 1820), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fsize'}), '(labelsize=fsize)\n', (1803, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1827, 1836), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1834, 1836), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2034), 'matplotlib.pyplot.title', 'plt.title', (['"""Regen energy"""'], {'fontsize': 'fsize'}), "('Regen energy', fontsize=fsize)\n", (2002, 2034), True, 'import matplotlib.pyplot as plt\n'), ((2035, 2049), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2043, 2049), True, 'import matplotlib.pyplot as plt\n'), ((2181, 2256), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.01, 0.2)', 'loc': '"""upper left"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.01, 0.2), loc='upper left', borderaxespad=0.0)\n", (2191, 2256), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2275), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2270, 2275), True, 'import matplotlib.pyplot as plt\n'), ((2276, 2328), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'PL1_Ene', '"""b-s"""'], {'label': '"""Powerlite 1"""'}), "(dates, PL1_Ene, 'b-s', label='Powerlite 1')\n", (2284, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2379), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'PL2_Ene', '"""g-s"""'], {'label': '"""Powerlite 2"""'}), "(dates, PL2_Ene, 'g-s', label='Powerlite 2')\n", (2335, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2378, 2430), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'Full_Ene', '"""r-s"""'], {'label': '"""Full power"""'}), "(dates, Full_Ene, 'r-s', label='Full power')\n", (2386, 2430), True, 'import matplotlib.pyplot as plt\n'), ((2528, 2560), 'matplotlib.pyplot.axis', 'plt.axis', (['[D_min, D_max, 0, 800]'], {}), '([D_min, D_max, 0, 800])\n', (2536, 2560), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2637), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fsize'}), '(labelsize=fsize)\n', (2620, 2637), True, 'import matplotlib.pyplot as plt\n'), ((2644, 2653), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2651, 2653), True, 'import matplotlib.pyplot as plt\n'), ((2810, 2855), 'matplotlib.pyplot.title', 'plt.title', (['"""Multipass energy"""'], {'fontsize': 'fsize'}), "('Multipass energy', fontsize=fsize)\n", (2819, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2856, 2870), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2864, 2870), True, 'import matplotlib.pyplot as plt\n'), ((3002, 3077), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.01, 0.4)', 'loc': '"""upper left"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.01, 0.4), loc='upper left', borderaxespad=0.0)\n", (3012, 3077), True, 'import matplotlib.pyplot as plt\n'), ((736, 764), 'numpy.array', 'np.array', (['[data[:, 3] * 100]'], {}), '([data[:, 3] * 100])\n', (744, 764), True, 'import numpy as np\n'), ((828, 856), 'numpy.array', 'np.array', (['[data[:, 4] * 100]'], {}), '([data[:, 4] * 100])\n', (836, 856), True, 'import numpy as np\n'), ((920, 948), 'numpy.array', 'np.array', (['[data[:, 5] * 100]'], {}), '([data[:, 5] * 100])\n', (928, 948), True, 'import numpy as np\n'), ((1267, 1281), 'numpy.amax', 'np.amax', (['dates'], {}), '(dates)\n', (1274, 1281), True, 'import numpy as np\n'), ((1686, 1711), 'numpy.arange', 'np.arange', (['(0)', '(4)'], {'step': '(0.5)'}), '(0, 4, step=0.5)\n', (1695, 1711), True, 'import numpy as np\n'), ((2157, 2179), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%d/%m"""'], {}), "('%d/%m')\n", (2170, 2179), False, 'from matplotlib.dates import DateFormatter\n'), ((2498, 2526), 'numpy.arange', 'np.arange', (['(0)', '(1100)'], {'step': '(200)'}), '(0, 1100, step=200)\n', (2507, 2526), True, 'import numpy as np\n'), ((2978, 3000), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%d/%m"""'], {}), "('%d/%m')\n", (2991, 3000), False, 'from matplotlib.dates import DateFormatter\n')] |
from typing import List
import numpy as np
from opendp.meas import make_base_geometric
from opendp.mod import enable_features
enable_features("contrib")
def histogramdd_indexes(x: np.ndarray, category_lengths: List[int]) -> np.ndarray:
"""Compute counts of each combination of categories in d dimensions.
Discrete version of np.histogramdd.
:param x: data of shape [n, len(`category_lengths`)] of non-negative category indexes
:param category_lengths: the number of unique categories per column
"""
assert x.shape[1] == len(category_lengths)
assert x.ndim == 2
if not len(category_lengths):
return np.array(x.shape[0])
# consider each row as a multidimensional index into an ndarray
# determine what those indexes would be should the ndarray be flattened
# the flat indices uniquely identify each cell
flat_indices = np.ravel_multi_index(x.T, category_lengths)
# count the number of instances of each index
hist = np.bincount(flat_indices, minlength=np.prod(category_lengths))
# map counts back to d-dimensional output
return hist.reshape(category_lengths)
def release_histogramdd_indexes(
x: np.ndarray, category_lengths: List[int], scale
) -> np.ndarray:
"""Release a d-dimensional histogram with noise `scale`.
The ith column of x must range from 0 to category_lengths[i].
:param x: data of shape [n, len(`category_lengths`)] of non-negative category indexes
:param category_lengths: the number of unique categories per column
"""
hist = histogramdd_indexes(x, category_lengths)
meas = make_base_geometric(scale, D="VectorDomain<AllDomain<i64>>")
return np.reshape(meas(hist.flatten()), hist.shape)
# TESTS
def test_histogramdd_discrete():
cat_counts = [2, 3]
x = np.array(
[[0, 2], [0, 0], [1, 1], [1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [0, 2], [1, 0]]
)
# size = 10
# x = np.stack([np.random.randint(c, size=size) for c in cat_counts], axis=1)
assert np.array_equal(histogramdd_indexes(x, cat_counts), [[1, 0, 2], [4, 3, 0]])
def test_histogram0d_discrete():
x = np.empty(shape=(100, 0))
print(histogramdd_indexes(x, []))
| [
"opendp.meas.make_base_geometric",
"opendp.mod.enable_features",
"numpy.empty",
"numpy.array",
"numpy.ravel_multi_index",
"numpy.prod"
] | [((127, 153), 'opendp.mod.enable_features', 'enable_features', (['"""contrib"""'], {}), "('contrib')\n", (142, 153), False, 'from opendp.mod import enable_features\n'), ((879, 922), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['x.T', 'category_lengths'], {}), '(x.T, category_lengths)\n', (899, 922), True, 'import numpy as np\n'), ((1604, 1664), 'opendp.meas.make_base_geometric', 'make_base_geometric', (['scale'], {'D': '"""VectorDomain<AllDomain<i64>>"""'}), "(scale, D='VectorDomain<AllDomain<i64>>')\n", (1623, 1664), False, 'from opendp.meas import make_base_geometric\n'), ((1797, 1892), 'numpy.array', 'np.array', (['[[0, 2], [0, 0], [1, 1], [1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [0, 2], [1, 0]\n ]'], {}), '([[0, 2], [0, 0], [1, 1], [1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [\n 0, 2], [1, 0]])\n', (1805, 1892), True, 'import numpy as np\n'), ((2131, 2155), 'numpy.empty', 'np.empty', ([], {'shape': '(100, 0)'}), '(shape=(100, 0))\n', (2139, 2155), True, 'import numpy as np\n'), ((643, 663), 'numpy.array', 'np.array', (['x.shape[0]'], {}), '(x.shape[0])\n', (651, 663), True, 'import numpy as np\n'), ((1021, 1046), 'numpy.prod', 'np.prod', (['category_lengths'], {}), '(category_lengths)\n', (1028, 1046), True, 'import numpy as np\n')] |
import numpy as np
N = int(input())
A = []
for i in range(N):
A_in = list(map(float, input().split()))
A.append(A_in)
print(round(np.linalg.det(A),2))
# -- another answer
N = int(input())
A = np.array([input().split() for _ in range(N)], float)
print(round(np.linalg.det(A),2))
| [
"numpy.linalg.det"
] | [((141, 157), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (154, 157), True, 'import numpy as np\n'), ((271, 287), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (284, 287), True, 'import numpy as np\n')] |
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper that implements concatenation of observation fields."""
from typing import Sequence, Optional
from acme import types
from acme.wrappers import base
import dm_env
import numpy as np
import tree
def _concat(values: types.NestedArray) -> np.ndarray:
"""Concatenates the leaves of `values` along the leading dimension.
Treats scalars as 1d arrays and expects that the shapes of all leaves are
the same except for the leading dimension.
Args:
values: the nested arrays to concatenate.
Returns:
The concatenated array.
"""
leaves = list(map(np.atleast_1d, tree.flatten(values)))
return np.concatenate(leaves)
def _zeros_like(nest, dtype=None):
"""Generate a nested NumPy array according to spec."""
return tree.map_structure(lambda x: np.zeros(x.shape, dtype or x.dtype), nest)
class ConcatObservationWrapper(base.EnvironmentWrapper):
"""Wrapper that concatenates observation fields.
It takes an environment with nested observations and concatenates the fields
in a single tensor. The orginial fields should be 1-dimensional.
Observation fields that are not in name_filter are dropped.
"""
def __init__(self, environment: dm_env.Environment,
name_filter: Optional[Sequence[str]] = None):
"""Initializes a new ConcatObservationWrapper.
Args:
environment: Environment to wrap.
name_filter: Sequence of observation names to keep. None keeps them all.
"""
super().__init__(environment)
observation_spec = environment.observation_spec()
if name_filter is None:
name_filter = list(observation_spec.keys())
self._obs_names = [x for x in name_filter if x in observation_spec.keys()]
dummy_obs = _zeros_like(observation_spec)
dummy_obs = self._convert_observation(dummy_obs)
self._observation_spec = dm_env.specs.BoundedArray(
shape=dummy_obs.shape,
dtype=dummy_obs.dtype,
minimum=-np.inf,
maximum=np.inf,
name='state')
def _convert_observation(self, observation):
obs = {k: observation[k] for k in self._obs_names}
return _concat(obs)
def step(self, action) -> dm_env.TimeStep:
timestep = self._environment.step(action)
return timestep._replace(
observation=self._convert_observation(timestep.observation))
def reset(self) -> dm_env.TimeStep:
timestep = self._environment.reset()
return timestep._replace(
observation=self._convert_observation(timestep.observation))
def observation_spec(self) -> types.NestedSpec:
return self._observation_spec
| [
"numpy.zeros",
"tree.flatten",
"dm_env.specs.BoundedArray",
"numpy.concatenate"
] | [((1237, 1259), 'numpy.concatenate', 'np.concatenate', (['leaves'], {}), '(leaves)\n', (1251, 1259), True, 'import numpy as np\n'), ((2439, 2561), 'dm_env.specs.BoundedArray', 'dm_env.specs.BoundedArray', ([], {'shape': 'dummy_obs.shape', 'dtype': 'dummy_obs.dtype', 'minimum': '(-np.inf)', 'maximum': 'np.inf', 'name': '"""state"""'}), "(shape=dummy_obs.shape, dtype=dummy_obs.dtype,\n minimum=-np.inf, maximum=np.inf, name='state')\n", (2464, 2561), False, 'import dm_env\n'), ((1205, 1225), 'tree.flatten', 'tree.flatten', (['values'], {}), '(values)\n', (1217, 1225), False, 'import tree\n'), ((1392, 1427), 'numpy.zeros', 'np.zeros', (['x.shape', '(dtype or x.dtype)'], {}), '(x.shape, dtype or x.dtype)\n', (1400, 1427), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
import json
#Get the stating phase information
def getStartingPhases():
with open('/nojournal/bin/OptimizationResults.txt') as f:
first_line = f.readline()
return first_line
#Appending the SP1 into phasesInring list
#Appending the phases which are greater than the starting phase for that ring
#Appending the phases which are less than the starting phase for that ring
#Repeat all the phase number
#Appending the phases which are greater than the starting phase for that ring
def phaseGroupInRing(SP, ring_phases, phasesInRing):
i = SP
for i in ring_phases:
if i > SP:
phasesInRing.append(i)
for i in ring_phases:
if i < SP:
phasesInRing.append(i)
# Extending the phases for 2nd&3rdcycles
phasesInRing.extend(phasesInRing*1)
# for i in ring_phases:
# if i > SP:
# phasesInRing.append(i)
# # Need to delete the phases based on starting phase
# phasesInRing.pop(len(phasesInRing)-(SP-1))
# print(phaseInRing)
return phasesInRing
def getInitToPhasesAndElaspedGreenTime():
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i == 1:
break
return line
#Find the phase duration for all the planned phases.
def getPhaseTimesForCycle1(phase_Times, SP, CP, RingNo):
if(CP == 'Left'):
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i == 2:
break
durationOfP1K1R1, durationOfP2K1R1, durationOfP3K1R1, durationOfP4K1R1, durationOfP5K1R1, durationOfP6K1R1, durationOfP7K1R1, durationOfP8K1R1 = line.split()
# print(line)
if(RingNo == 'Ring1'):
left_r1_k1_Phase_Times = [float(durationOfP1K1R1), float(durationOfP2K1R1), float(durationOfP3K1R1), float(durationOfP4K1R1)]
if SP > 1:
left_r1_k1_Phase_Times = left_r1_k1_Phase_Times[SP-1:]
elif(RingNo == 'Ring2'):
left_r2_k1_Phase_Times = [float(durationOfP5K1R1), float(durationOfP6K1R1), float(durationOfP7K1R1), float(durationOfP8K1R1)]
if SP > 4:
left_r2_k1_Phase_Times = left_r2_k1_Phase_Times[SP-5:]
# For cycle2 Left CP
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i == 3:
break
durationOfP1K2R1, durationOfP2K2R1, durationOfP3K2R1, durationOfP4K2R1, durationOfP5K2R1, durationOfP6K2R1, durationOfP7K2R1, durationOfP8K2R1 = line.split()
if(RingNo == 'Ring1'):
left_r1_k2_Phase_Times = [float(durationOfP1K2R1), float(durationOfP2K2R1), float(durationOfP3K2R1), float(durationOfP4K2R1)]
left_r1_k1_Phase_Times.extend(left_r1_k2_Phase_Times)
elif(RingNo == 'Ring2'):
left_r2_k2_Phase_Times = [float(durationOfP5K2R1), float(durationOfP6K2R1), float(durationOfP7K2R1), float(durationOfP8K2R1)]
left_r2_k1_Phase_Times.extend(left_r2_k2_Phase_Times)
# For cycle3 Left CP
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i == 4:
break
durationOfP1K3R1, durationOfP2K3R1, durationOfP3K3R1, durationOfP4K3R1, durationOfP5K3R1, durationOfP6K3R1, durationOfP7K3R1, durationOfP8K3R1 = line.split()
if(RingNo == 'Ring1'):
left_r1_k3_Phase_Times = [float(durationOfP1K3R1), float(durationOfP2K3R1), float(durationOfP3K3R1), float(durationOfP4K3R1)]
left_r1_k1_Phase_Times.extend(left_r1_k3_Phase_Times)
del left_r1_k1_Phase_Times[8:]
phase_Times = left_r1_k1_Phase_Times
elif(RingNo == 'Ring2'):
left_r2_k3_Phase_Times = [float(durationOfP5K3R1), float(durationOfP6K3R1), float(durationOfP7K3R1), float(durationOfP8K3R1)]
left_r2_k1_Phase_Times.extend(left_r2_k3_Phase_Times)
del left_r2_k1_Phase_Times[8:]
phase_Times = left_r2_k1_Phase_Times
# # # For cycle1 Right CP
if(CP == 'Right'):
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i == 5:
break
durationOfP1K1R2, durationOfP2K1R2, durationOfP3K1R2, durationOfP4K1R2, durationOfP5K1R2, durationOfP6K1R2, durationOfP7K1R2, durationOfP8K1R2 = line.split()
# print(line)
if(RingNo == 'Ring1'):
right_r1_k1_Phase_Times = [float(durationOfP1K1R2), float(durationOfP2K1R2), float(durationOfP3K1R2), float(durationOfP4K1R2)]
if SP > 1:
right_r1_k1_Phase_Times = right_r1_k1_Phase_Times[SP-1:]
elif(RingNo == 'Ring2'):
right_r2_k1_Phase_Times = [float(durationOfP5K1R2), float(durationOfP6K1R2), float(durationOfP7K1R2), float(durationOfP8K1R2)]
if SP > 4:
right_r2_k1_Phase_Times = right_r2_k1_Phase_Times[SP-5:]
# For cycle2 Right CP
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i == 6:
break
durationOfP1K2R2, durationOfP2K2R2, durationOfP3K2R2, durationOfP4K2R2, durationOfP5K2R2, durationOfP6K2R2, durationOfP7K2R2, durationOfP8K2R2 = line.split()
if(RingNo == 'Ring1'):
right_r1_k2_Phase_Times = [float(durationOfP1K2R2), float(durationOfP2K2R2), float(durationOfP3K2R2), float(durationOfP4K2R2)]
right_r1_k1_Phase_Times.extend(right_r1_k2_Phase_Times)
elif(RingNo == 'Ring2'):
right_r2_k2_Phase_Times = [float(durationOfP5K2R2), float(durationOfP6K2R2), float(durationOfP7K2R2), float(durationOfP8K2R2)]
right_r2_k1_Phase_Times.extend(right_r2_k2_Phase_Times)
# For cycle3 Right CP
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i == 7:
break
durationOfP1K3R2, durationOfP2K3R2, durationOfP3K3R2, durationOfP4K3R2, durationOfP5K3R2, durationOfP6K3R2, durationOfP7K3R2, durationOfP8K3R2 = line.split()
if(RingNo == 'Ring1'):
right_r1_k3_Phase_Times = [float(durationOfP1K3R2), float(durationOfP2K3R2), float(durationOfP3K3R2), float(durationOfP4K3R2)]
right_r1_k1_Phase_Times.extend(right_r1_k3_Phase_Times)
del right_r1_k1_Phase_Times[8:]
phase_Times = right_r1_k1_Phase_Times
elif(RingNo == 'Ring2'):
right_r2_k3_Phase_Times = [float(durationOfP5K3R2), float(durationOfP6K3R2), float(durationOfP7K3R2), float(durationOfP8K3R2)]
right_r2_k1_Phase_Times.extend(right_r2_k3_Phase_Times)
del right_r2_k1_Phase_Times[8:]
phase_Times = right_r2_k1_Phase_Times
phase_Times = [x for x in phase_Times if x != 0]
return phase_Times
def getCummulativePhaseTimes(ring_Phase_Times):
cum_Ring_Phase_Times = []
cum_Ring_Phase_Times = np.cumsum(ring_Phase_Times)
# Appending 0 in the beiginning of the list.
cum_Ring_Phase_Times = np.insert(
cum_Ring_Phase_Times, 0, 0) # First 0 is position
return cum_Ring_Phase_Times
def getPriorityRequest():
eta = []
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i == 14:
break
noOfReq = line
noOfReq = int(noOfReq)
print("No of Request", noOfReq)
reqInfoLineNo = 15+noOfReq
with open('/nojournal/bin/OptimizationResults.txt') as f:
for i, line in enumerate(f):
if i<15:
continue
# elif i>=15 & i<reqInfoLineNo:
elif i in range(15,reqInfoLineNo):
val1, Rl, Ru, val2, val3 = line.split()
# eta.append([float(Rl), float(Ru)])
eta.append(float(Rl))
else:
break
# print("ETA",eta)
return eta
# Plotting time-phase diagram
def timePhaseDiagram(SP1, SP2, cum_Left_Ring1_Phase_Times, cum_Right_Ring1_Phase_Times, cum_phaseInRing1, cum_Left_Ring2_Phase_Times, cum_Right_Ring2_Phase_Times, cum_phaseInRing2, phasesInRing1, phasesInRing2, ETA, req_phase, dilemmaZone_phases, dilemmaZone_ETA, ringNo):
fig, ax1 = plt.subplots()
if ringNo == 'Ring1&2':
#Ring1
color = 'tab:orange'
ax1.set_xlabel('Time (s)',fontsize=24, fontweight = 'bold')
ax1.set_ylabel('Ring 1 Phases', color=color, fontsize=28,fontweight = 'bold')
ax1.plot(cum_Left_Ring1_Phase_Times, cum_phaseInRing1, color=color,linewidth = 2)
ax1.plot(cum_Right_Ring1_Phase_Times, cum_phaseInRing1, color=color,linewidth = 2)
plt.xticks(np.arange(cum_Right_Ring1_Phase_Times[0], cum_Right_Ring1_Phase_Times[-1], 10),fontsize = 24)
ax1.set_yticks(ticks=np.arange(cum_phaseInRing1[0], cum_phaseInRing1[-1], 10))
ax1.set_yticklabels(phasesInRing1)
ax1.tick_params(axis='y', labelcolor=color, labelsize=24)
for axis in ['top','bottom','left','right']:
ax1.spines[axis].set_linewidth(4)
#Ring2
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Ring 2 Phases', color=color, fontsize=28, fontweight = 'bold')
ax2.plot(cum_Left_Ring2_Phase_Times, cum_phaseInRing2, color=color,linewidth = 4)
ax2.plot(cum_Right_Ring2_Phase_Times, cum_phaseInRing2, color=color, linewidth = 4)
ax2.set_yticks(ticks=np.arange(cum_phaseInRing2[0], cum_phaseInRing2[-1], 10))
ax2.set_yticklabels(phasesInRing2)
ax2.tick_params(axis='y', labelcolor=color,labelsize=24)
elif ringNo == 'Ring1':
color = 'tab:red'
ax1.set_xlabel('time (s)', fontsize=20)
ax1.set_ylabel('Ring 1', color=color, fontsize=20)
ax1.plot(cum_Left_Ring1_Phase_Times, cum_phaseInRing1, color=color)
ax1.plot(cum_Right_Ring1_Phase_Times, cum_phaseInRing1, color=color)
plt.xticks(np.arange(cum_Right_Ring1_Phase_Times[0], cum_Right_Ring1_Phase_Times[-1], 10),fontsize = 18)
ax1.set_yticks(ticks=np.arange(cum_phaseInRing1[0], cum_phaseInRing1[-1], 10))
ax1.set_yticklabels(phasesInRing1)
ax1.tick_params(axis='y', labelcolor=color, labelsize=18)
elif ringNo == 'Ring2':
color = 'tab:blue'
ax1.set_xlabel('time (s)', fontsize=20)
ax1.set_ylabel('Ring 2', color=color, fontsize=20)
ax1.plot(cum_Left_Ring2_Phase_Times, cum_phaseInRing2, color=color)
ax1.plot(cum_Right_Ring2_Phase_Times, cum_phaseInRing2, color=color)
plt.xticks(np.arange(cum_Right_Ring2_Phase_Times[0], cum_Right_Ring2_Phase_Times[-1], 10),fontsize = 18)
ax1.set_yticks(ticks=np.arange(cum_phaseInRing2[0], cum_phaseInRing2[-1], 10))
ax1.set_yticklabels(phasesInRing2)
ax1.tick_params(axis='y', labelcolor=color,labelsize=18)
# EV Requested phase
requestedPhasePosition =[]
indexPosList =[]
indexPos = 0
for i in req_phase:
if i<5:
for j in range(len(phasesInRing1)):
if phasesInRing1[j] == i:
indexPosList.append(j)
elif i>4:
for j in range(len(phasesInRing2)):
if phasesInRing2[j] == i:
indexPosList.append(j)
if i<5:
for i in indexPosList:
pos = cum_phaseInRing1[i]
requestedPhasePosition.append(pos)
elif i>4:
for i in indexPosList:
pos = cum_phaseInRing2[i]
requestedPhasePosition.append(pos)
patches =[]
req_phase_length = len(requestedPhasePosition)
for i in range(0,req_phase_length):
x = ETA[i]
y = requestedPhasePosition[i]
if i == 0:
ax1.add_patch(matplotlib.patches.Rectangle((x, y),2,10,angle=0.0,color = 'red',linewidth = 2, label = 'EV Priority Request'))
else:
ax1.add_patch(matplotlib.patches.Rectangle((x, y),2,10,angle=0.0,color = 'red',linewidth = 2))
# patches.append(matplotlib.patches.Rectangle((x, y),25,10,angle=0.0,color = 'red'))
# ax1.add_collection(PatchCollection(patches))
# Dilemma Requested phase
requestedPhasePosition =[]
indexPosList =[]
indexPos = 0
for i in dilemmaZone_phases:
if i<5:
for j in range(len(phasesInRing1)):
if phasesInRing1[j] == i:
indexPosList.append(j)
elif i>4:
for j in range(len(phasesInRing2)):
if phasesInRing2[j] == i:
indexPosList.append(j)
if i<5:
for i in indexPosList:
pos = cum_phaseInRing1[i]
requestedPhasePosition.append(pos)
elif i>4:
for i in indexPosList:
pos = cum_phaseInRing2[i]
requestedPhasePosition.append(pos)
patches =[]
req_phase_length = len(requestedPhasePosition)
for i in range(0,req_phase_length):
x = dilemmaZone_ETA[i]
y = requestedPhasePosition[i]
if i == 0:
ax1.add_patch(matplotlib.patches.Rectangle((x, y),2,10,angle=0.0,color = 'green',linewidth = 4, label = 'DilemmaZone Request'))
else:
ax1.add_patch(matplotlib.patches.Rectangle((x, y),2,10,angle=0.0,color = 'green', linewidth = 4))
ax1.legend(loc='upper right', bbox_to_anchor=(.9, 1), prop={"size":18})
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.grid(color='black', linestyle='-', linewidth=2)
# plt.legend(loc='best', bbox_to_anchor=(1.1, 1.1))
# plt.legend()
plt.show()
def main():
configFile = open("configuration.json", 'r')
config = (json.load(configFile))
# Close the config file:
configFile.close()
r1_phases = []
r2_phases = []
left_R1_CP_phase_times = []
right_R1_CP_phase_times = []
cum_Left_Ring1_Phase_Times = []
cum_Right_Ring1_Phase_Times = []
cum_phaseInRing1 = []
left_R2_CP_phase_times = []
right_R2_CP_phase_times = []
cum_Left_Ring2_Phase_Times = []
cum_Right_Ring2_Phase_Times = []
cum_phaseInRing2 = []
dilemmaZone_phases = []
dilemmaZone_ETA = []
ETA = []
req_phase = []
phasesInRing1 = []
phasesInRing2 = []
count = 0
noOfIteration = config["NoOfRequest"]
while (count < noOfIteration):
ETA_Val = config["ETA"][count]
#Append the same ETA value twice for draw two rectangles for two cycle
ETA.append(ETA_Val)
ETA.append(ETA_Val)
count = count + 1
print("ETA", ETA)
count = 0
noOfIteration = config["NoOfRequiredPhase"]
while (count < noOfIteration):
phaseVal = config["RequestedPhase"][count]
req_phase.append(phaseVal)
count = count + 1
print("Requested Phase", req_phase)
#Dilemma-Zone Information
count = 0
noOfIteration = config["NoOfDilemmaZoneRequest"]
while (count < noOfIteration):
ETA_Val = config["DilemmaZoneETA"][count]
#Append the same ETA value twice for draw two rectangles for two cycle
dilemmaZone_ETA.append(ETA_Val)
dilemmaZone_ETA.append(ETA_Val)
count = count + 1
print("DilemmaZone ETA", dilemmaZone_ETA)
count = 0
noOfIteration = config["NoOfRequiredDilemmaZonePhase"]
while (count < noOfIteration):
phaseVal = config["DilemmaZonePhases"][count]
dilemmaZone_phases.append(phaseVal)
count = count + 1
print("DilemmaZone Requested Phase", dilemmaZone_phases)
SP1, SP2 = getStartingPhases().split() #Get the stating phase information
print("SP1 =", SP1)
print("SP2 =", SP2)
SP1 = int(SP1) #Converting starting phase into integar value
SP2 = int(SP2) #Converting starting phase into integar value
#Obtained planned signal phase of cycle1,2,3 for ring 1. There will be 8 phases.
#phasesInRing1 = []
# n = int(input("Enter no of Phases in Ring1: "))
# phasesInRing1 = list(map(int,input("\nEnter the phase numbers following by space : ").strip().split()))[:n]
#Obtained planned signal phase of cycle1,2,3 for ring 2. There will be 8 phases
# phasesInRing2 = []
# n = int(input("Enter no of Phases in Ring2: "))
# phasesInRing2 = list(map(int,input("\nEnter the phase numbers following by space : ").strip().split()))[:n]
count = 0
noOfIteration = config["NoOfPhasesInRing1"]
while (count < noOfIteration):
phaseVal = config["PhasesInRing1"][count]
phasesInRing1.append(phaseVal)
count = count + 1
print("Phases In Ring1", phasesInRing1)
count = 0
noOfIteration = config["NoOfPhasesInRing2"]
while (count < noOfIteration):
phaseVal = config["PhasesInRing2"][count]
phasesInRing2.append(phaseVal)
count = count + 1
print("Phases In Ring2", phasesInRing2)
#obtained init time and green elapssed time
init1, init2, grn1, grn2 = getInitToPhasesAndElaspedGreenTime().split()
print("ini1 =", init1)
print("ini2 =", init2)
print("Elapesd Green1 =", grn1)
print("Elapesd Green2 =", grn2)
################## For Ring1##################
#Obatined ring wise phase duration for left and right critical points
left_R1_CP_phase_times = getPhaseTimesForCycle1(
left_R1_CP_phase_times, SP1, 'Left','Ring1')
right_R1_CP_phase_times = getPhaseTimesForCycle1(
right_R1_CP_phase_times, SP1, 'Right', 'Ring1')
print("Left Critical Points Phase times for Ring1 =", left_R1_CP_phase_times)
print("Right Critical Points Phase times for Ring1 =", right_R1_CP_phase_times)
# #creating cumulative list
# if SP2-SP1 == 3: ##starting phase 4,7
# cum_Left_Ring1_Phase_Times = getCummulativePhaseTimes(
# left_R1_CP_phase_times)
# cum_Right_Ring1_Phase_Times = getCummulativePhaseTimes(
# right_R1_CP_phase_times)
# cum_Left_Ring1_Phase_Times = np.insert(cum_Left_Ring1_Phase_Times,0,0.0)
# cum_Right_Ring1_Phase_Times = np.insert(cum_Right_Ring1_Phase_Times,0,0.0)
# phasesInRing1 = np.insert(phasesInRing1, 0, SP1-1)
# x = 0
# cum_phaseInRing1= [x]
# length = len(cum_Left_Ring1_Phase_Times)-1
# for i in range(length):
# x = x+10
# cum_phaseInRing1.append(x)
# elif SP2-SP1 == 5: ##starting phase 3,8
# cum_Left_Ring1_Phase_Times = getCummulativePhaseTimes(
# left_R1_CP_phase_times)
# cum_Right_Ring1_Phase_Times = getCummulativePhaseTimes(
# right_R1_CP_phase_times)
# cum_Left_Ring1_Phase_Times = np.insert(cum_Left_Ring2_Phase_Times,len(cum_Left_Ring2_Phase_Times),cum_Left_Ring2_Phase_Times[-1]+10)
# cum_Right_Ring1_Phase_Times = np.insert(cum_Right_Ring2_Phase_Times,len(cum_Right_Ring2_Phase_Times),cum_Right_Ring2_Phase_Times[-1]+10)
# phasesInRing1 = np.insert(phasesInRing1, len(phasesInRing1), SP1)
# x = 0
# cum_phaseInRing1= [x]
# length = len(cum_Left_Ring1_Phase_Times)-1
# for i in range(length):
# x = x+10
# cum_phaseInRing1.append(x)
# else:
# cum_Left_Ring1_Phase_Times = getCummulativePhaseTimes(left_R1_CP_phase_times)
# cum_Right_Ring1_Phase_Times = getCummulativePhaseTimes(right_R1_CP_phase_times)
# x = 0
# cum_phaseInRing1= [x]
# length = len(cum_Left_Ring1_Phase_Times)-1
# for i in range(length):
# x = x+10
# cum_phaseInRing1.append(x)
cum_Left_Ring1_Phase_Times = getCummulativePhaseTimes(left_R1_CP_phase_times)
cum_Right_Ring1_Phase_Times = getCummulativePhaseTimes(right_R1_CP_phase_times)
x = 0
cum_phaseInRing1= [x]
length = len(cum_Left_Ring1_Phase_Times)-1
for i in range(length):
x = x+10
cum_phaseInRing1.append(x)
print("Phases In Ring1", phasesInRing1)
print("Cumulative Left Critical Points Phase times for Ring1 =",
cum_Left_Ring1_Phase_Times)
print("Cumulative Right Critical Points Phase times for Ring1 =",
cum_Right_Ring1_Phase_Times)
print("Cumulative Phases in Ring1 =", cum_phaseInRing1)
################## For Ring2##################
left_R2_CP_phase_times = getPhaseTimesForCycle1(
left_R2_CP_phase_times, SP2, 'Left','Ring2')
right_R2_CP_phase_times = getPhaseTimesForCycle1(
right_R2_CP_phase_times, SP2, 'Right', 'Ring2')
print("Left Critical Points Phase times for Ring2 =", left_R2_CP_phase_times)
print("Right Critical Points Phase times for Ring2 =", right_R2_CP_phase_times)
# # #creating cumulative list
# if SP2-SP1 == 3:
# cum_Left_Ring2_Phase_Times = getCummulativePhaseTimes(
# left_R2_CP_phase_times)
# cum_Right_Ring2_Phase_Times = getCummulativePhaseTimes(
# right_R2_CP_phase_times)
# cum_Left_Ring2_Phase_Times = np.insert(cum_Left_Ring2_Phase_Times,len(cum_Left_Ring2_Phase_Times),cum_Left_Ring2_Phase_Times[-1]+10)
# cum_Right_Ring2_Phase_Times = np.insert(cum_Right_Ring2_Phase_Times,len(cum_Right_Ring2_Phase_Times),cum_Right_Ring2_Phase_Times[-1]+10)
# phasesInRing2 = np.insert(phasesInRing2, len(phasesInRing2), SP2)
# x = 0
# cum_phaseInRing2= [x]
# length = len(cum_Left_Ring2_Phase_Times)-1
# for i in range(length):
# x = x+10
# cum_phaseInRing2.append(x)
# elif SP2-SP1 == 5:
# cum_Left_Ring2_Phase_Times = getCummulativePhaseTimes(
# left_R2_CP_phase_times)
# cum_Right_Ring2_Phase_Times = getCummulativePhaseTimes(
# right_R2_CP_phase_times)
# cum_Left_Ring2_Phase_Times = np.insert(cum_Left_Ring2_Phase_Times,0,0.0)
# cum_Right_Ring2_Phase_Times = np.insert(cum_Right_Ring2_Phase_Times,0,0.0)
# phasesInRing2 = np.insert(phasesInRing2, 0, SP2-1)
# x = 0
# cum_phaseInRing2= [x]
# length = len(cum_Left_Ring2_Phase_Times)-1
# for i in range(length):
# x = x+10
# cum_phaseInRing2.append(x)
# else:
# cum_Left_Ring2_Phase_Times = getCummulativePhaseTimes(left_R2_CP_phase_times)
# cum_Right_Ring2_Phase_Times = getCummulativePhaseTimes(right_R2_CP_phase_times)
# x = 0
# cum_phaseInRing2= [x]
# length = len(cum_Left_Ring2_Phase_Times)-1
# for i in range(length):
# x = x+10
# cum_phaseInRing2.append(x)
cum_Left_Ring2_Phase_Times = getCummulativePhaseTimes(left_R2_CP_phase_times)
cum_Right_Ring2_Phase_Times = getCummulativePhaseTimes(right_R2_CP_phase_times)
x = 0
cum_phaseInRing2= [x]
length = len(cum_Left_Ring2_Phase_Times)-1
for i in range(length):
x = x+10
cum_phaseInRing2.append(x)
print("Phases In Ring2", phasesInRing2)
print("Cumulative Left Critical Points Phase times for Ring2 =",
cum_Left_Ring2_Phase_Times)
print("Cumulative Right Critical Points Phase times for Ring2 =",
cum_Right_Ring2_Phase_Times)
print("Cumulative Phases in Ring2 =", cum_phaseInRing2)
timePhaseDiagram(SP1, SP2,cum_Left_Ring1_Phase_Times, cum_Right_Ring1_Phase_Times,
cum_phaseInRing1,cum_Left_Ring2_Phase_Times, cum_Right_Ring2_Phase_Times,
cum_phaseInRing2, phasesInRing1, phasesInRing2, ETA, req_phase, dilemmaZone_phases, dilemmaZone_ETA, 'Ring1&2')
if __name__ == '__main__':
main() | [
"json.load",
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle",
"numpy.insert",
"numpy.cumsum",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid"
] | [((7260, 7287), 'numpy.cumsum', 'np.cumsum', (['ring_Phase_Times'], {}), '(ring_Phase_Times)\n', (7269, 7287), True, 'import numpy as np\n'), ((7366, 7403), 'numpy.insert', 'np.insert', (['cum_Ring_Phase_Times', '(0)', '(0)'], {}), '(cum_Ring_Phase_Times, 0, 0)\n', (7375, 7403), True, 'import numpy as np\n'), ((8550, 8564), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8562, 8564), True, 'import matplotlib.pyplot as plt\n'), ((13911, 13962), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""black"""', 'linestyle': '"""-"""', 'linewidth': '(2)'}), "(color='black', linestyle='-', linewidth=2)\n", (13919, 13962), True, 'import matplotlib.pyplot as plt\n'), ((14042, 14052), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14050, 14052), True, 'import matplotlib.pyplot as plt\n'), ((14131, 14152), 'json.load', 'json.load', (['configFile'], {}), '(configFile)\n', (14140, 14152), False, 'import json\n'), ((8991, 9069), 'numpy.arange', 'np.arange', (['cum_Right_Ring1_Phase_Times[0]', 'cum_Right_Ring1_Phase_Times[-1]', '(10)'], {}), '(cum_Right_Ring1_Phase_Times[0], cum_Right_Ring1_Phase_Times[-1], 10)\n', (9000, 9069), True, 'import numpy as np\n'), ((9115, 9171), 'numpy.arange', 'np.arange', (['cum_phaseInRing1[0]', 'cum_phaseInRing1[-1]', '(10)'], {}), '(cum_phaseInRing1[0], cum_phaseInRing1[-1], 10)\n', (9124, 9171), True, 'import numpy as np\n'), ((9818, 9874), 'numpy.arange', 'np.arange', (['cum_phaseInRing2[0]', 'cum_phaseInRing2[-1]', '(10)'], {}), '(cum_phaseInRing2[0], cum_phaseInRing2[-1], 10)\n', (9827, 9874), True, 'import numpy as np\n'), ((10324, 10402), 'numpy.arange', 'np.arange', (['cum_Right_Ring1_Phase_Times[0]', 'cum_Right_Ring1_Phase_Times[-1]', '(10)'], {}), '(cum_Right_Ring1_Phase_Times[0], cum_Right_Ring1_Phase_Times[-1], 10)\n', (10333, 10402), True, 'import numpy as np\n'), ((12188, 12301), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(x, y)', '(2)', '(10)'], {'angle': '(0.0)', 'color': '"""red"""', 'linewidth': '(2)', 'label': '"""EV Priority Request"""'}), "((x, y), 2, 10, angle=0.0, color='red',\n linewidth=2, label='EV Priority Request')\n", (12216, 12301), False, 'import matplotlib\n'), ((12340, 12425), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(x, y)', '(2)', '(10)'], {'angle': '(0.0)', 'color': '"""red"""', 'linewidth': '(2)'}), "((x, y), 2, 10, angle=0.0, color='red', linewidth=2\n )\n", (12368, 12425), False, 'import matplotlib\n'), ((13514, 13629), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(x, y)', '(2)', '(10)'], {'angle': '(0.0)', 'color': '"""green"""', 'linewidth': '(4)', 'label': '"""DilemmaZone Request"""'}), "((x, y), 2, 10, angle=0.0, color='green',\n linewidth=4, label='DilemmaZone Request')\n", (13542, 13629), False, 'import matplotlib\n'), ((13668, 13754), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(x, y)', '(2)', '(10)'], {'angle': '(0.0)', 'color': '"""green"""', 'linewidth': '(4)'}), "((x, y), 2, 10, angle=0.0, color='green',\n linewidth=4)\n", (13696, 13754), False, 'import matplotlib\n'), ((10448, 10504), 'numpy.arange', 'np.arange', (['cum_phaseInRing1[0]', 'cum_phaseInRing1[-1]', '(10)'], {}), '(cum_phaseInRing1[0], cum_phaseInRing1[-1], 10)\n', (10457, 10504), True, 'import numpy as np\n'), ((10967, 11045), 'numpy.arange', 'np.arange', (['cum_Right_Ring2_Phase_Times[0]', 'cum_Right_Ring2_Phase_Times[-1]', '(10)'], {}), '(cum_Right_Ring2_Phase_Times[0], cum_Right_Ring2_Phase_Times[-1], 10)\n', (10976, 11045), True, 'import numpy as np\n'), ((11091, 11147), 'numpy.arange', 'np.arange', (['cum_phaseInRing2[0]', 'cum_phaseInRing2[-1]', '(10)'], {}), '(cum_phaseInRing2[0], cum_phaseInRing2[-1], 10)\n', (11100, 11147), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from scipy.integrate._ivp import rk
from probnum import diffeq
import probnum.problems.zoo.diffeq as diffeq_zoo
_ADAPTIVE_STEPS = diffeq.stepsize.AdaptiveSteps(atol=1e-4, rtol=1e-4, firststep=0.1)
_CONSTANT_STEPS = diffeq.stepsize.ConstantSteps(0.1)
def setup_solver(y0, ode, steprule):
scipysolver = rk.RK45(ode.f, ode.t0, y0, ode.tmax)
testsolver = diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta(
solver_type=rk.RK45, steprule=steprule
)
return testsolver, scipysolver, ode
@pytest.mark.parametrize("steprule", [_ADAPTIVE_STEPS, _CONSTANT_STEPS])
def case_lorenz63(steprule):
y0 = np.array([0.0, 1.0, 1.05])
ode = diffeq_zoo.lorenz63(t0=0.0, tmax=1.0, y0=y0)
return setup_solver(y0, ode, steprule=steprule)
@pytest.mark.parametrize("steprule", [_ADAPTIVE_STEPS, _CONSTANT_STEPS])
def case_logistic(steprule):
y0 = np.array([0.1])
ode = diffeq_zoo.logistic(t0=0.0, tmax=1.0, y0=y0)
return setup_solver(y0, ode, steprule=steprule)
@pytest.mark.parametrize("steprule", [_ADAPTIVE_STEPS, _CONSTANT_STEPS])
def case_lotkavolterra(steprule):
y0 = np.array([0.1, 0.1])
ode = diffeq_zoo.lotkavolterra(t0=0.0, tmax=1.0, y0=y0)
return setup_solver(y0, ode, steprule=steprule)
| [
"probnum.diffeq.stepsize.ConstantSteps",
"probnum.diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta",
"probnum.problems.zoo.diffeq.lotkavolterra",
"probnum.diffeq.stepsize.AdaptiveSteps",
"numpy.array",
"pytest.mark.parametrize",
"scipy.integrate._ivp.rk.RK45",
"probnum.problems.zoo.diffeq.lorenz6... | [((165, 235), 'probnum.diffeq.stepsize.AdaptiveSteps', 'diffeq.stepsize.AdaptiveSteps', ([], {'atol': '(0.0001)', 'rtol': '(0.0001)', 'firststep': '(0.1)'}), '(atol=0.0001, rtol=0.0001, firststep=0.1)\n', (194, 235), False, 'from probnum import diffeq\n'), ((250, 284), 'probnum.diffeq.stepsize.ConstantSteps', 'diffeq.stepsize.ConstantSteps', (['(0.1)'], {}), '(0.1)\n', (279, 284), False, 'from probnum import diffeq\n'), ((547, 618), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""steprule"""', '[_ADAPTIVE_STEPS, _CONSTANT_STEPS]'], {}), "('steprule', [_ADAPTIVE_STEPS, _CONSTANT_STEPS])\n", (570, 618), False, 'import pytest\n'), ((794, 865), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""steprule"""', '[_ADAPTIVE_STEPS, _CONSTANT_STEPS]'], {}), "('steprule', [_ADAPTIVE_STEPS, _CONSTANT_STEPS])\n", (817, 865), False, 'import pytest\n'), ((1030, 1101), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""steprule"""', '[_ADAPTIVE_STEPS, _CONSTANT_STEPS]'], {}), "('steprule', [_ADAPTIVE_STEPS, _CONSTANT_STEPS])\n", (1053, 1101), False, 'import pytest\n'), ((342, 378), 'scipy.integrate._ivp.rk.RK45', 'rk.RK45', (['ode.f', 'ode.t0', 'y0', 'ode.tmax'], {}), '(ode.f, ode.t0, y0, ode.tmax)\n', (349, 378), False, 'from scipy.integrate._ivp import rk\n'), ((396, 493), 'probnum.diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta', 'diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta', ([], {'solver_type': 'rk.RK45', 'steprule': 'steprule'}), '(solver_type=rk.RK45,\n steprule=steprule)\n', (449, 493), False, 'from probnum import diffeq\n'), ((657, 683), 'numpy.array', 'np.array', (['[0.0, 1.0, 1.05]'], {}), '([0.0, 1.0, 1.05])\n', (665, 683), True, 'import numpy as np\n'), ((694, 738), 'probnum.problems.zoo.diffeq.lorenz63', 'diffeq_zoo.lorenz63', ([], {'t0': '(0.0)', 'tmax': '(1.0)', 'y0': 'y0'}), '(t0=0.0, tmax=1.0, y0=y0)\n', (713, 738), True, 'import probnum.problems.zoo.diffeq as diffeq_zoo\n'), ((904, 919), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (912, 919), True, 'import numpy as np\n'), ((930, 974), 'probnum.problems.zoo.diffeq.logistic', 'diffeq_zoo.logistic', ([], {'t0': '(0.0)', 'tmax': '(1.0)', 'y0': 'y0'}), '(t0=0.0, tmax=1.0, y0=y0)\n', (949, 974), True, 'import probnum.problems.zoo.diffeq as diffeq_zoo\n'), ((1145, 1165), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (1153, 1165), True, 'import numpy as np\n'), ((1176, 1225), 'probnum.problems.zoo.diffeq.lotkavolterra', 'diffeq_zoo.lotkavolterra', ([], {'t0': '(0.0)', 'tmax': '(1.0)', 'y0': 'y0'}), '(t0=0.0, tmax=1.0, y0=y0)\n', (1200, 1225), True, 'import probnum.problems.zoo.diffeq as diffeq_zoo\n')] |
'''
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Buddha Bless, No Bug !
'''
'''
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Buddha Bless, No Bug !
'''
import numpy as np
import data_loader
import torch
import torch.nn.modules as nn
import jindutiao
from torch.utils.tensorboard import SummaryWriter
LR = 0.001
Batch_Size = 128
Epoch = 30
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Resnet_model(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
self.avgpool2d = nn.AvgPool2d(2, stride=2)
#输入部分
self.conv2d_1 = nn.Conv2d(1,6,kernel_size=5,padding=2)
self.batchnorm2d = nn.BatchNorm2d(6)
#中间残差块
self.conv2d_2 = nn.Conv2d(6,6,kernel_size=3,padding=1)
#输出部分
self.conv2d_3 = nn.Conv2d(6, 6, 5)
self.flatten = nn.Flatten()
self.sig = nn.Sigmoid()
self.linear_1 = nn.Linear(6*5*5, 64)
self.linear_2 = nn.Linear(64, 10)
def forward(self,input):
x = self.conv2d_1(input)
x = self.batchnorm2d(x)
# x = self.relu(x)
x = self.avgpool2d(x)
for i in range(4):
x = self.res_forward(x)
x = self.conv2d_3(x)
x = self.avgpool2d(x)
x = self.flatten(x)
x = self.linear_1(x)
x = self.sig(x)
x = self.linear_2(x)
x = torch.softmax(x, dim=1)
return x
def res_forward(self,input):
x = self.conv2d_2(input)
x = self.batchnorm2d(x)
x = self.relu(x)
x = self.conv2d_2(x)
x = self.batchnorm2d(x)
x += input
x = self.relu(x)
return x
def train():
print("-----------------------Training-----------------------\n")
train_datas = data_loader.loadDataSet("../database/HandwrittenDatas/train-images.idx3-ubyte","../database/HandwrittenDatas/train-labels.idx1-ubyte", 60000, Batch_Size)
cnn = Resnet_model().to(device)
loss_fn = nn.loss.BCELoss()
opt = torch.optim.Adam(cnn.parameters(), lr=LR)
for epoch in range(Epoch):
aver_loss = 0
counter = 0
jindutiao_index = 0
print("---epoch:{}---".format(epoch))
for x, y in train_datas:
x = x.to(device)
y = y.to(device)
x = x[:,np.newaxis]
counter += 1
y_pred = cnn(x)
loss = loss_fn(y_pred, y)
aver_loss += loss
opt.zero_grad()
loss.backward()
opt.step()
jindutiao.progress(jindutiao_index,len(train_datas.dataset)/128-1)
jindutiao_index += 1
print("\nloss:{}".format(aver_loss / counter))
return cnn
def Test(model):
print("------------------------Testing------------------------\n")
test_datas = data_loader.loadDataSet("../database/HandwrittenDatas/t10k-images.idx3-ubyte","../database/HandwrittenDatas/t10k-labels.idx1-ubyte",10000,Batch_Size)
count = 0
length = float(len(test_datas.dataset))
jindutiao_index = 0
for x,y in test_datas:
x = x[:, np.newaxis]
y_pred = model(x)
y_pred = y_pred.detach().numpy()
y = y.detach().numpy()
y = np.argmax(y,axis=1)
y_pred = np.argmax(y_pred, axis=1)
for o1,o2 in zip(y,y_pred):
if o1==o2:
count += 1
jindutiao.progress(jindutiao_index,len(test_datas.dataset)/128)
jindutiao_index += 1
print("\n此Resnet模型准确率为:{}".format(count/length))
def main():
model = train()
torch.save(model, './model_save/resnet_model.pth')
model = torch.load('./model_save/resnet_model.pth').to('cpu')
Test(model)
if __name__ == '__main__':
print("-------------------------resnet-------------------------\n")
main() | [
"torch.nn.modules.Sigmoid",
"torch.nn.modules.loss.BCELoss",
"torch.nn.modules.Linear",
"numpy.argmax",
"data_loader.loadDataSet",
"torch.nn.modules.ReLU",
"torch.nn.modules.AvgPool2d",
"torch.nn.modules.Conv2d",
"torch.load",
"torch.softmax",
"torch.save",
"torch.cuda.is_available",
"torch.... | [((3379, 3537), 'data_loader.loadDataSet', 'data_loader.loadDataSet', (['"""../database/HandwrittenDatas/train-images.idx3-ubyte"""', '"""../database/HandwrittenDatas/train-labels.idx1-ubyte"""', '(60000)', 'Batch_Size'], {}), "('../database/HandwrittenDatas/train-images.idx3-ubyte',\n '../database/HandwrittenDatas/train-labels.idx1-ubyte', 60000, Batch_Size)\n", (3402, 3537), False, 'import data_loader\n'), ((3585, 3602), 'torch.nn.modules.loss.BCELoss', 'nn.loss.BCELoss', ([], {}), '()\n', (3600, 3602), True, 'import torch.nn.modules as nn\n'), ((4415, 4571), 'data_loader.loadDataSet', 'data_loader.loadDataSet', (['"""../database/HandwrittenDatas/t10k-images.idx3-ubyte"""', '"""../database/HandwrittenDatas/t10k-labels.idx1-ubyte"""', '(10000)', 'Batch_Size'], {}), "('../database/HandwrittenDatas/t10k-images.idx3-ubyte',\n '../database/HandwrittenDatas/t10k-labels.idx1-ubyte', 10000, Batch_Size)\n", (4438, 4571), False, 'import data_loader\n'), ((5155, 5205), 'torch.save', 'torch.save', (['model', '"""./model_save/resnet_model.pth"""'], {}), "(model, './model_save/resnet_model.pth')\n", (5165, 5205), False, 'import torch\n'), ((1975, 2000), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1998, 2000), False, 'import torch\n'), ((2117, 2126), 'torch.nn.modules.ReLU', 'nn.ReLU', ([], {}), '()\n', (2124, 2126), True, 'import torch.nn.modules as nn\n'), ((2152, 2177), 'torch.nn.modules.AvgPool2d', 'nn.AvgPool2d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (2164, 2177), True, 'import torch.nn.modules as nn\n'), ((2217, 2258), 'torch.nn.modules.Conv2d', 'nn.Conv2d', (['(1)', '(6)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(1, 6, kernel_size=5, padding=2)\n', (2226, 2258), True, 'import torch.nn.modules as nn\n'), ((2283, 2300), 'torch.nn.modules.BatchNorm2d', 'nn.BatchNorm2d', (['(6)'], {}), '(6)\n', (2297, 2300), True, 'import torch.nn.modules as nn\n'), ((2341, 2382), 'torch.nn.modules.Conv2d', 'nn.Conv2d', (['(6)', '(6)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(6, 6, kernel_size=3, padding=1)\n', (2350, 2382), True, 'import torch.nn.modules as nn\n'), ((2419, 2437), 'torch.nn.modules.Conv2d', 'nn.Conv2d', (['(6)', '(6)', '(5)'], {}), '(6, 6, 5)\n', (2428, 2437), True, 'import torch.nn.modules as nn\n'), ((2461, 2473), 'torch.nn.modules.Flatten', 'nn.Flatten', ([], {}), '()\n', (2471, 2473), True, 'import torch.nn.modules as nn\n'), ((2493, 2505), 'torch.nn.modules.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2503, 2505), True, 'import torch.nn.modules as nn\n'), ((2530, 2554), 'torch.nn.modules.Linear', 'nn.Linear', (['(6 * 5 * 5)', '(64)'], {}), '(6 * 5 * 5, 64)\n', (2539, 2554), True, 'import torch.nn.modules as nn\n'), ((2575, 2592), 'torch.nn.modules.Linear', 'nn.Linear', (['(64)', '(10)'], {}), '(64, 10)\n', (2584, 2592), True, 'import torch.nn.modules as nn\n'), ((2991, 3014), 'torch.softmax', 'torch.softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (3004, 3014), False, 'import torch\n'), ((4814, 4834), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4823, 4834), True, 'import numpy as np\n'), ((4851, 4876), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (4860, 4876), True, 'import numpy as np\n'), ((5218, 5261), 'torch.load', 'torch.load', (['"""./model_save/resnet_model.pth"""'], {}), "('./model_save/resnet_model.pth')\n", (5228, 5261), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
#############################################################################
# @package ad_hmi
# @Config file generation.
#############################################################################
# @author <NAME>
# @copyright (c) All rights reserved.
#############################################################################
import socket
import logging
import os
import configparser
import sys
import threading
import struct
import copy
import numpy
import time
import json
from collections import deque
from asammdf import MDF, Signal
from Commands import *
class Service:
def __init__(self):
self.logger = logging.getLogger(__name__)
# --- Initialize XCP related parameters --- #
self.odt_data = {}
# --- Reserved for sniffer --- #
self.sniffer = None
# self.sniffer = MessageAnalyzer(self.logger) # For developing phase
self.code_command = 0
self.sub_command = 0
def start_log(self):
self.logger.setLevel(level=logging.INFO)
if os.path.exists('XCP_Service.log'):
os.remove('XCP_Service.log')
handler = logging.FileHandler("XCP_Service.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.info('======================Log start======================')
def read_json_file(self, file_path):
if os.path.exists(file_path):
try:
with open(file_path) as fn:
self.odt_data.update(json.loads(fn.read()))
except IOError as err:
self.logger.warning('Error when reading json file: ' + str(err))
else:
self.logger.warning('Json file does not exist. Please check.')
def init_sniffer(self):
"""
Used for Ethernet packet sniffer. (Get packet between CANape & VX box)
:return: None
"""
self.sniffer = MessageAnalyzer(self.logger)
def analyze_packet(self, message, cto=False):
"""
After received the message, check the content and abstract data we need.
:param message: Original message data in TCP packet
:param cto: Show if message is from master (CANape)
:return: None
"""
if cto:
self.code_command = message[4]
if len(message) > 5:
self.sub_command = message[5]
self.sniffer.check_cto(message)
else:
self.sniffer.check_resp(self.code_command, message, self.sub_command)
class MessageSender:
def __init__(self, logger):
self.target_host = '192.168.50.2'
self.target_port = 5555
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.settimeout(1)
# self.client.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True)
# self.client.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 60000, 30000))
self.logger = logger
self.byte_order = 0
self.resp_queue = list()
self.listener = None
self.end_thread = False
# self.message_analyzer = MessageAnalyzer(logger)
def connect(self):
"""
Establish TCP link to VX box.
:return: True if TCP link has been established.
"""
print('>>>> connect')
result = False
try:
self.client.connect((self.target_host, self.target_port))
result = True
if not self.listener:
self.listener = threading.Thread(target=self.listen)
else:
self.end_thread = True
time.sleep(1)
self.client.send(b'\x02\x00\x00\x00\xff\x01')
# recv = self.client.recv(9014)
# print(recv)
self.listener.start()
except socket.gaierror:
self.logger.warning('Address-related error connecting to server. No connection was established.')
except socket.error:
self.logger.warning('Connection error. No connection was established.')
return result
def disconnect(self):
"""
Terminate TCP link.
:return: None
"""
self.end_thread = True
time.sleep(1)
self.client.close()
def listen(self):
print('>>>> listen')
while not self.end_thread:
try:
data = self.client.recv(9014)
if data:
print(data)
self.resp_queue.append(data)
except:
print("woshibaile")
self.end_thread = False
print('>>>> listen end')
def get_resp(self):
ret = None
if self.resp_queue:
ret = self.resp_queue.pop(0)
return ret
def tcp_send(self, data, retry=0, *params):
"""
Send CTO to VX box and analyze response
:param data: command code of XCP
:param retry: retry times. If value is over 5, return false
:param params: parameters for command code (optional)
:return: True if positive response from VX box is received
"""
result = True
if params:
data.extend(params)
# data.insert(0, len(data))
# data.insert(1, 0)
# data.insert(1, 0)
# data.insert(1, 0)
try:
print('start sending data')
self.client.send(data)
data_recv = self.xcp_get_response()
# if data_recv[0] == 0xFC:
# retry += 1
# if retry < 6:
# result = self.tcp_send(data[4:len(data)], retry)
# elif data_recv[0] == 0xFD:
# retry += 1
# if retry < 6:
# result = self.tcp_send(data[4:len(data)], retry)
# elif data_recv[0] == 0xFE:
# if not data_recv[1]:
# self.logger.info('Error code 0x00: Command processor synchronization.')
# result = True
# elif self.message_analyzer.check_error_code(data_recv[1]):
# retry += 1
# if retry < 6:
# result = self.tcp_send(data[4:len(data)], retry)
# else:
# result = True
# self.message_analyzer.analyze_pos_resp(data[4], data_recv, data[5])
except socket.error:
self.logger.info('Connection lost. Reconnect.')
self.connect()
result = False
finally:
return result
def xcp_get_response(self):
"""
Get response from VX box and check result
:return: raw data of response from VX box (removed 1st~4th bytes)
"""
data = self.client.recv(9014)
while not data:
data = self.client.recv(9014)
if data[4] <= 0xFB: # DAQ packet
# self.message_analyzer.split_daq(data)
data = self.xcp_get_response()
data_new = data[4:len(data)]
if data_new[0] == 0xFC: # Service packet
if data_new[1]: # Service text
self.logger.info('XCP slave sends notification: ' + data_new[2:len(data_new) - 2].decode())
data_new = self.xcp_get_response()
else:
self.logger.info('XCP slave requests to be reset.')
elif data_new[0] == 0xFD: # Event packet
if data_new[1] == 0x00:
self.logger.info('Slave starting in RESUME mode.')
data_new = self.xcp_get_response()
elif data_new[1] == 0x01:
self.logger.info('The DAQ configuration in non-volatile memory has been cleared.')
data_new = self.xcp_get_response()
elif data_new[1] == 0x02:
self.logger.info('The DAQ configuration has been stored into non-volatile memory.')
data_new = self.xcp_get_response()
elif data_new[1] == 0x03:
self.logger.info('The calibration data has been stored into non-volatile memory.')
data_new = self.xcp_get_response()
elif data_new[1] == 0x05:
self.logger.info('Slave requesting to restart timeout.') # needs more action
data_new = self.xcp_get_response()
elif data_new[1] == 0x06:
self.logger.info('DAQ processor overload.') # needs more action
elif data_new[1] == 0x07:
self.logger.info('Session terminated by slave device.') # needs more action
elif data_new[1] == 0x08:
self.logger.info('Transfer of externally triggered timestamp.')
data_new = self.xcp_get_response()
elif data_new[1] == 0x09:
self.logger.info('Indication of a STIM timeout.') # needs more action
data_new = self.xcp_get_response()
elif data_new[1] == 0x0A:
self.logger.info('Slave entering SLEEP mode.') # needs more action
elif data_new[1] == 0x0B:
self.logger.info('Slave leaving SLEEP mode.') # needs more action
data_new = self.xcp_get_response()
elif data_new[1] == 0x0C:
self.logger.info('ECU state changed.') # needs more action
data_new = self.xcp_get_response()
elif data_new[1] == 0xFC:
self.logger.info('ASAM MCD-1-XCP AS SW-DBG-over-XCP related events.')
data_new = self.xcp_get_response()
elif data_new[1] == 0xFD:
self.logger.info('ASAM MCD-1 POD related events.')
data_new = self.xcp_get_response()
elif data_new[1] == 0xFE:
self.logger.info('User-defined event.')
data_new = self.xcp_get_response()
elif data_new[1] == 0xFF:
self.logger.info('Transport layer specific event.')
data_new = self.xcp_get_response()
return data_new
def xcp_connect(self):
"""
Send connect command and wait for response.
:return: True if positive response is received
"""
return self.tcp_send(XCP_CONNECT, 0, *bytearray([0x00]))
def xcp_disconnect(self):
"""
Send disconnect command and wait for response.
:return: True if positive response is received
"""
return self.tcp_send(XCP_DISCONNECT)
def xcp_get_status(self):
"""
Send get status command and wait for response.
:return: True if positive response is received
"""
return self.tcp_send(XCP_GET_STATUS)
def xcp_get_id(self):
pass
def xcp_set_request(self):
pass
def xcp_get_seed(self):
pass
def xcp_unlock(self):
pass
def xcp_set_mta(self):
pass
def xcp_upload(self):
pass
def xcp_short_upload(self):
pass
def xcp_build_checksum(self):
pass
def xcp_transport_layer_cmd(self):
pass
def xcp_user_cmd(self):
pass
def xcp_get_version(self):
pass
def xcp_download(self):
pass
def xcp_download_next(self):
pass
def xcp_download_max(self):
pass
def xcp_short_download(self):
pass
def xcp_modify_bits(self):
pass
def xcp_set_cal_page(self):
pass
def xcp_get_cal_page(self):
pass
def xcp_get_pag_processor_info(self):
pass
def xcp_get_segment_info(self):
pass
def xcp_get_page_info(self):
pass
def xcp_set_segment_mode(self):
pass
def xcp_get_segment_mode(self):
pass
def xcp_copy_cal_page(self):
pass
def xcp_set_daq_ptr(self, daq_list_number, odt_number, odt_entry_number):
"""
Send set DAQ pointer command and wait for response.
:param daq_list_number: parameter in this command
:param odt_number: parameter in this command
:param odt_entry_number: parameter in this command
:return: True if positive response is received.
"""
if self.byte_order:
params = bytearray([0x00, (daq_list_number & 0xff00) >> 8, daq_list_number & 0xff, odt_number,
odt_entry_number])
else:
params = bytearray([0x00, daq_list_number & 0xff, (daq_list_number & 0xff00) >> 8, odt_number,
odt_entry_number])
return self.tcp_send(XCP_SET_DAQ_PTR, 0, *params)
def xcp_write_daq(self):
pass
def xcp_set_daq_list_mode(self, mode, daq_list_number, event_channel_number, translate_rate_prescaler, priority):
"""
Send set DAQ list mode command and wait for response.
:param mode: parameter in this command
:param daq_list_number: parameter in this command
:param event_channel_number: parameter in this command
:param translate_rate_prescaler: parameter in this command
:param priority: parameter in this command
:return: True if positive response is received.
"""
mode_byte = mode['pid_off'] << 5 + mode['timestamp'] << 4 + mode['dto_ctr'] << 3 + mode['direction'] << 1 + \
mode['alternating']
if self.byte_order:
params = bytearray([mode_byte, (daq_list_number & 0xff00) >> 8, daq_list_number & 0xff,
(event_channel_number & 0xff00) >> 8, event_channel_number & 0xff,
translate_rate_prescaler, priority])
else:
params = bytearray([mode_byte, daq_list_number & 0xff, (daq_list_number & 0xff00) >> 8,
event_channel_number & 0xff, (event_channel_number & 0xff00) >> 8,
translate_rate_prescaler, priority])
return self.tcp_send(XCP_SET_DAQ_LIST_MODE, 0, *params)
def xcp_start_stop_daq_list(self, mode, daq_list_number):
"""
Send start/stop DAQ list command and wait for response.
:param mode: parameter in this command
:param daq_list_number: parameter in this command
:return: True if positive response is received.
"""
if self.byte_order:
params = bytearray([mode, (daq_list_number & 0xff00) >> 8, daq_list_number & 0xff])
else:
params = bytearray([mode, daq_list_number & 0xff, (daq_list_number & 0xff00) >> 8])
return self.tcp_send(XCP_START_STOP_DAQ_LIST, 0, *params)
def xcp_start_stop_synch(self, mode):
"""
Send start/stop synch command and wait for response.
:param mode: parameter in this command
:return: True if positive response is received.
"""
return self.tcp_send(XCP_START_STOP_SYNCH, 0, *bytearray([mode]))
def xcp_write_daq_multiple(self, daq_element_number, daq_element_list):
"""
Send write multiple DAQ command and wait for response.
:param daq_element_number: parameter in this command
:param daq_element_list: parameter in this command
:return: True if positive response is received.
"""
params = [daq_element_number]
for i in range(0, daq_element_number):
params.append(daq_element_list[i]['bit_offset'])
params.append(daq_element_list[i]['size'])
if self.byte_order:
params.append((daq_element_list[i]['address'] & 0xff000000) >> 24)
params.append((daq_element_list[i]['address'] & 0xff0000) >> 16)
params.append((daq_element_list[i]['address'] & 0xff00) >> 8)
params.append(daq_element_list[i]['address'] & 0xff)
else:
params.append(daq_element_list[i]['address'] & 0xff)
params.append((daq_element_list[i]['address'] & 0xff00) >> 8)
params.append((daq_element_list[i]['address'] & 0xff0000) >> 16)
params.append((daq_element_list[i]['address'] & 0xff000000) >> 24)
params.append(daq_element_list[i]['dummy'])
return self.tcp_send(XCP_WRITE_DAQ_MULTIPLE, 0, *bytearray(params))
def xcp_read_daq(self):
pass
def xcp_get_daq_clock(self):
"""
Send get DAQ clock command and wait for response.
:return: True if positive response is received.
"""
return self.tcp_send(XCP_GET_DAQ_CLOCK)
def xcp_get_daq_processor_info(self):
"""
Send get DAQ processor info command and wait for response.
:return: True if positive response is received.
"""
return self.tcp_send(XCP_GET_DAQ_PROCESSOR_INFO)
def xcp_get_daq_resolution_info(self):
"""
Send get DAQ resolution info command and wait for response.
:return: True if positive response is received.
"""
return self.tcp_send(XCP_GET_DAQ_PROCESSOR_INFO)
def xcp_get_daq_list_mode(self):
pass
def xcp_get_daq_event_info(self):
pass
def xcp_dto_ctr_properties(self):
pass
def xcp_set_daq_packed_mode(self):
pass
def xcp_get_daq_packed_mode(self):
pass
def xcp_clear_daq_list(self):
pass
def xcp_get_daq_list_info(self):
pass
def xcp_free_daq(self):
"""
Send free DAQ command and wait for response.
:return: True if positive response is received.
"""
return self.tcp_send(XCP_FREE_DAQ)
def xcp_alloc_daq(self, daq_count):
"""
Send allocate DAQ command and wait for response.
:param daq_count: parameter in this command
:return: True if positive response is received.
"""
if self.byte_order:
params = bytearray([0x00, (daq_count & 0xff00) >> 8, daq_count & 0xff])
else:
params = bytearray([0x00, daq_count & 0xff, (daq_count & 0xff00) >> 8])
return self.tcp_send(XCP_ALLOC_DAQ, 0, *params)
def xcp_alloc_odt(self, daq_list_number, odt_count):
"""
Send allocate ODT command and wait for response.
:param daq_list_number: parameter in this command
:param odt_count: parameter in this command
:return: True if positive response is received.
"""
if self.byte_order:
params = bytearray([0x00, (daq_list_number & 0xff00) >> 8, daq_list_number & 0xff, odt_count])
else:
params = bytearray([0x00, daq_list_number & 0xff, (daq_list_number & 0xff00) >> 8, odt_count])
return self.tcp_send(XCP_ALLOC_ODT, 0, *params)
def xcp_alloc_odt_entry(self, daq_list_number, odt_number, odt_entries_count):
"""
Send allocate ODT entry command and wait for response.
:param daq_list_number: parameter in this command
:param odt_number: parameter in this command
:param odt_entries_count: parameter in this command
:return: True if positive response is received.
"""
if self.byte_order:
params = bytearray([0x00, (daq_list_number & 0xff00) >> 8, daq_list_number & 0xff, odt_number,
odt_entries_count])
else:
params = bytearray([0x00, daq_list_number & 0xff, (daq_list_number & 0xff00) >> 8, odt_number,
odt_entries_count])
return self.tcp_send(XCP_ALLOC_ODT_ENTRY, 0, *params)
def xcp_program_start(self):
pass
def xcp_program_clear(self):
pass
def xcp_program(self):
pass
def xcp_program_reset(self):
pass
def xcp_get_pgm_processor_info(self):
pass
def xcp_get_sector_info(self):
pass
def xcp_program_prepare(self):
pass
def xcp_program_format(self):
pass
def xcp_program_next(self):
pass
def xcp_program_max(self):
pass
def xcp_program_verify(self):
pass
def xcp_time_correlation_properties(self):
pass
def xcp_asam_ae_mcd_1_xcp_as_sw_dbg_over_xcp(self):
pass
def xcp_asam_ae_mcd_1_pod_bs(self):
pass
class MessageAnalyzer:
def __init__(self, logger):
self.logger = logger
# --- Start initialization of CTO parameters --- #
# Store command code and sub command
self.command_code = 0
self.sub_command = 0
# Parameters of connect command
self.current_mode = 0
# Parameters of SET_DAQ_LIST_MODE command
self.alternating = 0
self.direction = 0
self.dto_ctr = 0
self.timestamp = 0
self.pid_off = 0
# Parameters of DAQ & event
self.event_structure = {}
self.event_selected = 0
self.event_daq_selected = 0
self.daq_structure = {}
self.daq_selected = 0
self.odt_selected = 0
self.odt_entry_selected = 0
self.daq_variables = {}
self.daq_data_tmp = {}
self.var_to_rec_pre = {}
self.var_to_rec = {}
# --- End initialization of CTO parameters --- #
# --- Start initialization of response parameters --- #
# Parameters of connect command
self.connect_mode = 0 # Parameter of CTO
self.cal_pag_available = 0
self.daq_available = 0
self.stim_available = 0
self.pgm_available = 0
self.dbg_available = 0
self.byte_order = 0 # Intel format
self.address_granularity = 0 # byte in minimum
self.slave_block_mode = 0
self.conn_optional = 0
self.max_cto = 0
self.max_dto = 0
self.xcp_protocol_version = 0
self.xcp_trans_layer_version = 0
# Parameters of get communication mode info command
self.master_block_mode = 0
self.interleaved_mode = 0
self.max_bs = 0
self.min_st = 0
self.queue_size = 0
self.xcp_driver_ver = 0.0
# Parameters of get status command
self.store_cal_req = 0
self.cal_pag_cfg_lost = 0
self.store_daq_req = 0
self.clear_daq_req = 0
self.daq_cfg_lost = 0
self.daq_running = 0
self.resume = 0
self.cal_pag_protected = 0
self.daq_protected = 0
self.stim_protected = 0
self.pgm_protected = 0
self.dbg_protected = 0
self.state_number = 0
self.session_configuration_id = 0
# Parameters of get DAQ processor info
self.daq_config_type = 0
self.prescaler_supported = 0
self.resume_supported = 0
self.bit_stim_supported = 0
self.timestamp_supported = 0
self.pid_off_supported = 0
self.overload_indication_type = 0
self.max_daq = 0
self.max_event_channel = 0
self.min_daq = 0
self.optimisation_type = 0
self.address_extension = 0
self.identification_field_type = 0
# DAQ Processing Resolution parameters
self.granularity_odt_entry_size_daq = 0
self.max_odt_entry_size_daq = 0
self.granularity_odt_entry_size_stim = 0
self.max_odt_entry_size_stim = 0
self.timestamp_size = 0
self.timestamp_fixed = 0
self.timestamp_unit = 0
self.timestamp_ticks = 0
# Get DAQ clock parameters
self.trigger_initiator = 0
self.time_of_ts_sampling = 0
self.fmt_xcp_slv = 0
self.fmt_grandm = 0
self.fmt_ecu = 0
self.cluster_identifier = 0
self.timestamp = 0
# --- End initialization of response parameters --- #
def generate_mf4(self):
mdf = MDF()
sigs = []
if self.var_to_rec:
for each_key in self.var_to_rec.keys():
sigs.append(Signal(self.var_to_rec[each_key]['value'],
self.var_to_rec[each_key]['time'],
name=each_key,
unit=self.var_to_rec[each_key]['unit'],
conversion=self.var_to_rec[each_key]['conversion'],
comment=self.var_to_rec[each_key]['comment']))
mdf.append(sigs, 'arrays', common_timebase=True)
mdf.save('demo.mf4', overwrite=True)
def update_connect_params(self, params):
"""
Update parameters in response of connect command
:param params: The original response of connect command
:return: None
"""
self.cal_pag_available = params[1] & 1
self.daq_available = (params[1] & 4) >> 2
self.stim_available = (params[1] & 8) >> 3
self.pgm_available = (params[1] & 16) >> 4
self.dbg_available = (params[1] & 32) >> 5
self.byte_order = params[2] & 1
self.address_granularity = (params[2] & 6) >> 1
self.slave_block_mode = (params[2] & 64) >> 6
self.conn_optional = (params[2] & 128) >> 7
self.max_cto = params[3]
if self.byte_order:
self.max_dto = (params[4] << 8) + params[5]
else:
self.max_dto = (params[5] << 8) + params[4]
self.xcp_protocol_version = params[6]
self.xcp_trans_layer_version = params[7]
def update_status(self, params):
"""
Update parameters in response of get status command
:param params: The original response of get status command
:return: None
"""
self.store_cal_req = params[1] & 1
self.cal_pag_cfg_lost = (params[1] & 2) >> 1
self.store_daq_req = (params[1] & 4) >> 2
self.clear_daq_req = (params[1] & 8) >> 3
self.daq_cfg_lost = (params[1] & 16) >> 4
self.daq_running = (params[1] & 64) >> 6
self.resume = (params[1] & 128) >> 7
self.cal_pag_protected = params[2] & 1
self.daq_protected = (params[2] & 4) >> 2
self.stim_protected = (params[2] & 8) >> 3
self.pgm_protected = (params[2] & 16) >> 4
self.dbg_protected = (params[2] & 32) >> 5
self.state_number = params[3]
if self.byte_order:
self.session_configuration_id = (params[4] << 8) + params[5]
else:
self.session_configuration_id = (params[5] << 8) + params[4]
def update_comm_mode_info(self, params):
"""
Update parameters in response of get communication mode info command
:param params: The original response of get status command
:return: None
"""
self.master_block_mode = params[2] & 1
self.interleaved_mode = (params[2] & 2) >> 1
self.max_bs = params[4]
self.min_st = params[5]
self.queue_size = params[6]
self.xcp_driver_ver = float(params[7]) / 10
def update_pid(self, params):
"""
Update PID of DAQ in event list
:param params: original response message
:return: None
"""
self.event_structure[str(self.event_selected)][str(self.event_daq_selected)]['pid'] = params[1]
def update_daq_clock(self, params):
"""
Update parameters in response of get DAQ clock command
:param params: The original response of get DAQ clock command
:return: None
"""
self.trigger_initiator = params[2] & 7
self.time_of_ts_sampling = (params[2] & 24) >> 3
self.fmt_xcp_slv = params[3] & 3
self.fmt_grandm = (params[3] & 12) >> 2
self.fmt_ecu = (params[3] & 48) >> 4
self.cluster_identifier = (params[3] & 64) >> 6
if self.byte_order:
self.timestamp = (params[4] >> 24) + (params[5] >> 16) + (params[6] >> 8) + params[7]
else:
self.timestamp = (params[7] >> 24) + (params[6] >> 16) + (params[5] >> 8) + params[4]
def update_daq_processor_info(self, params):
"""
Update parameters in response of get DAQ processor info command
:param params: The original response of get DAQ processor info command
:return: None
"""
self.daq_config_type = params[1] & 1
self.prescaler_supported = (params[1] & 2) >> 1
self.resume_supported = (params[1] & 4) >> 2
self.bit_stim_supported = (params[1] & 8) >> 3
self.timestamp_supported = (params[1] & 16) >> 4
self.pid_off_supported = (params[1] & 32) >> 5
self.overload_indication_type = (params[1] & 192) >> 6
if self.byte_order:
self.max_daq = (params[2] << 8) + params[3]
self.max_event_channel = (params[4] << 8) + params[5]
else:
self.max_daq = (params[3] << 8) + params[2]
self.max_event_channel = (params[5] << 8) + params[4]
self.min_daq = params[6]
self.optimisation_type = params[7] & 15
self.address_extension = (params[7] & 48) >> 4
self.identification_field_type = (params[7] & 192) >> 6
def update_daq_resolution_info(self, params):
"""
Update parameters in response of get DAQ resolution info command
:param params: The original response of get DAQ resolution info command
:return: None
"""
self.granularity_odt_entry_size_daq = params[1]
self.max_odt_entry_size_daq = params[2]
self.granularity_odt_entry_size_stim = params[3]
self.max_odt_entry_size_stim = params[4]
self.timestamp_size = params[5] & 7
self.timestamp_fixed = (params[5] & 8) >> 3
self.timestamp_unit = (params[5] & 240) >> 4
if self.byte_order:
self.timestamp_ticks = (params[6] << 8) + params[7]
else:
self.timestamp_ticks = (params[7] << 8) + params[6]
def split_daq(self, data):
"""
Split ODT packets in 1 TCP packet according to length of each ODT packet.
:param data: raw data in TCP packet
:return: None
"""
i = 0
print(data)
while i < len(data):
ptr = data[i]
print('pointer: ' + str(ptr))
if str(data[i+1]) in self.daq_structure.keys():
self.analyze_daq(data[i+4: i+4+ptr])
i += 4 + ptr
def analyze_daq(self, data):
"""
Analyze ODT packet after it is split
:param data: Original data of the packet
:return: Nont
"""
if self.identification_field_type == 0: # Absolute ODT number
pass # TBD
elif self.identification_field_type == 1: # Relative ODT number, absolute DAQ list number (byte)
# Ignore first PID
self.check_daq_length(data)
elif self.identification_field_type == 2: # Relative ODT number, absolute DAQ list number (word)
pass # TBD
else: # Relative ODT number, absolute DAQ list number (word, aligned)
pass # TBD
def check_daq_length(self, data):
"""
Check if all ODT packets in 1 DAQ have been received.
If yes, start analyzing the DAQ data.
:param data: Original data of the ODT packet
:return: None
"""
print(data)
if not data[0]:
self.daq_data_tmp[str(data[1])] = b'' # Clear data at last time
self.daq_data_tmp[str(data[1])] += data[6: len(data)]
else:
self.daq_data_tmp[str(data[1])] += data[2: len(data)]
if len(self.daq_data_tmp[str(data[1])]) == self.daq_structure[str(data[1])]['length']:
self.go_through_daq_data(str(data[1]))
def go_through_daq_data(self, daq):
"""
Check structure of DAQ to decide what data we need to abstract as variable values.
:param daq: number of DAQ
:return: None
"""
for i in range(0, self.daq_structure[daq]['ODT']):
for j in range(0, self.daq_structure[daq][str(i)]['ODT_Entry']):
if not self.daq_structure[daq][str(i)][str(j)]['var'].keys():
for each_key in self.daq_structure[daq][str(i)][str(j)]['var'].keys():
var_pos = self.daq_structure[daq][str(i)]['start_pos'] + \
self.daq_structure[daq][str(i)][str(j)]['start_pos'] + \
self.daq_structure[daq][str(i)][str(j)]['var'][each_key]['offset']
if 'part' in self.daq_structure[daq][str(i)][str(j)]['var'][each_key].keys():
self.update_variable_value(daq, each_key, var_pos,
self.daq_structure[daq][str(i)][str(j)]['var'][each_key]['raw_len'],
self.daq_structure[daq][str(i)][str(j)]['var'][each_key]['part'],
self.daq_structure[daq][str(i)][str(j)]['var'][each_key]['length'])
else:
self.update_variable_value(daq, each_key, var_pos,
self.daq_structure[daq][str(i)][str(j)]['var'][each_key]['raw_len'])
self.update_daq_vars(daq)
def update_variable_value(self, daq, variable_name, offset, var_len, part=0, length=0):
"""
Update value of variable to be measured in the DAQ
:param daq: DAQ number
:param variable_name: name of variable
:param offset: offset in DAQ packet
:param var_len: length(bytes) of variable
:param part: to show if value of the variable is transmitted separately
:param length: length of variable part if it is transmitted separately
:return: None
"""
if not part:
self.var_to_rec_pre[daq][variable_name]['time'] = time.time()
self.var_to_rec_pre[daq][variable_name]['value'] = \
self.get_value(self.daq_data_tmp[daq][offset: offset + var_len], var_len)
else:
if part == 1:
self.var_to_rec_pre[daq][variable_name] = {'raw_len': var_len, 'time': time.time(), 'value': {}}
self.var_to_rec_pre[daq][variable_name]['value'][str(part)] = \
self.daq_data_tmp[daq][offset: offset + length]
def update_daq_vars(self, daq):
"""
Update values in pre-deliver dict to formal dict and combine value of variables that are transmitted separately
:param daq: number of DAQ
:return: None
"""
if daq in self.var_to_rec_pre.keys():
for each_key in self.var_to_rec_pre[daq]:
if isinstance(self.var_to_rec_pre[daq][each_key]['value'], dict):
if isinstance(self.var_to_rec_pre[daq][each_key]['value']['1'], bytes) and \
isinstance(self.var_to_rec_pre[daq][each_key]['value']['2'], bytes):
self.var_to_rec[each_key] = self.get_value(self.var_to_rec_pre[daq][each_key]['value']['1'] +
self.var_to_rec_pre[daq][each_key]['value']['2'],
self.var_to_rec_pre[daq][each_key]['raw_len'])
elif isinstance(self.var_to_rec_pre[daq][each_key]['value']['1'], bytes):
self.var_to_rec[each_key] = self.get_value(self.var_to_rec_pre[daq][each_key]['value']['1'] +
self.var_to_rec_pre[daq][each_key]['value']['2'].
to_bytes(1, 'little'),
self.var_to_rec_pre[daq][each_key]['raw_len'])
elif isinstance(self.var_to_rec_pre[daq][each_key]['value']['2'], bytes):
self.var_to_rec[each_key] = self.get_value(self.var_to_rec_pre[daq][each_key]['value']['1'].
to_bytes(1, 'little') +
self.var_to_rec_pre[daq][each_key]['value']['2'],
self.var_to_rec_pre[daq][each_key]['raw_len'])
else:
self.var_to_rec[each_key] = self.get_value(self.var_to_rec_pre[daq][each_key]['value']['1'].
to_bytes(1, 'little') +
self.var_to_rec_pre[daq][each_key]['value']['2'].
to_bytes(1, 'little'),
self.var_to_rec_pre[daq][each_key]['raw_len'])
else:
self.var_to_rec[each_key]['value'].append(self.var_to_rec_pre[daq][each_key]['value'])
self.var_to_rec[each_key]['time'].append(self.var_to_rec_pre[daq][each_key]['time'])
# For testing
for tmp_key in self.var_to_rec.keys():
if len(self.var_to_rec[tmp_key]['value']) == 1000:
self.generate_mf4()
for tmp_key_1 in self.var_to_rec.keys():
self.var_to_rec[tmp_key]['value'].clear()
self.var_to_rec[tmp_key]['time'].clear()
break
def get_value(self, raw_value, var_len, byte_order=0):
"""
Translate value of variable from bytes to its target type
:param raw_value: Bytes type of value
:param var_len: length(bytes) of value
:param byte_order: Reserved to deal with variable whose format is Motorola
:return: Value in target variable type
"""
if byte_order:
pass # TBD for Motorola byte order
if var_len == 1:
true_value = numpy.ubyte(raw_value)
elif var_len == 2:
true_value = numpy.frombuffer(raw_value, numpy.uint16)[0]
elif var_len == 4:
true_value = numpy.frombuffer(raw_value, numpy.uint32)[0]
elif var_len == 8:
true_value = numpy.frombuffer(raw_value, numpy.uint64)[0]
else:
true_value = False
self.logger.error('Invalid length: ' + str(var_len))
return true_value
def update_daq_dict(self, daq_count):
"""
Update DAQ structure in use according to CTO received.
:param daq_count: Number of DAQ
:return: None
"""
for i in range(0, daq_count):
self.daq_structure[str(i)] = {'length': 0, 'ODT': 0}
def update_odt_dict(self, daq_number, odt_count):
"""
Update ODT structure in DAQ list according to CTO received
:param daq_number: DAQ number ODT is located in
:param odt_count: Number of ODT
:return: None
"""
self.daq_structure[str(daq_number)]['ODT'] = odt_count
for i in range(0, odt_count):
self.daq_structure[str(daq_number)][str(i)] = {'length': 0, 'ODT_Entry': 0}
def update_odt_entry_dict(self, daq_number, odt_number, odt_entry_count):
"""
Update ODT Entry structure in DAQ list according to CTO received
:param daq_number: DAQ number ODT is located in
:param odt_number: ODT number ODT Entry is located in
:param odt_entry_count: Number of ODT Entry
:return: None
"""
self.daq_structure[str(daq_number)][str(odt_number)]['ODT_Entry'] = odt_entry_count
for i in range(0, odt_entry_count):
self.daq_structure[str(daq_number)][str(odt_number)][str(i)] = {'var': {}}
def set_odt_entry(self, daq_number, odt_number, odt_entry_number):
"""
Save information of selected ODT Entry this time
:param daq_number: DAQ number
:param odt_number: ODT number
:param odt_entry_number: ODT Entry number
:return: None
"""
self.daq_selected = daq_number
self.odt_selected = odt_number
self.odt_entry_selected = odt_entry_number
def split_multi_daq(self, command, user_cmd=False):
"""
Split multiple DAQs and proceed
:param command: original command message
:param user_cmd: if command is USER_CMD
:return: None
"""
if user_cmd:
daq_number = command[0]
else:
daq_number = command[1]
for i in range(0, daq_number):
self.check_write_daq(command[2 + i * 8: 10 + i * 8], multi=True, user_cmd=user_cmd)
def check_write_daq(self, daq, multi=False, user_cmd=False):
"""
Check if there is any variable we need to monitor in variables of this ODT Entry
:param daq: Original data of write command
:param multi: if command is WRITE_DAQ_MULTIPLE
:param user_cmd: if command is USER_CMD
:return: None
"""
if multi: # Analyze format of WRITE_DAQ_MULTIPLE command
if user_cmd:
# Format: byte0~3: address, byte4: bit_offset, byte5: size, byte6: extension(not confirmed)
# byte7: padding (not confirmed)
address = struct.unpack('L', daq[0:4])[0]
bit_offset = daq[4]
size = daq[5]
extension = daq[6]
else:
# Format: byte0: bit_offset, byte1: size, byte2~5: address, byte6: extension, byte7: padding
bit_offset = daq[0]
size = daq[1]
address = struct.unpack('I', daq[2:6])[0]
extension = daq[6]
else: # Format: byte0: bit_offset, byte1: size, byte2: extension, byte3~6: address, byte7: padding
bit_offset = daq[0]
size = daq[1]
extension = daq[2]
address = struct.unpack('L', daq[3:7])[0]
if self.odt_entry_selected:
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]\
['start_pos'] = self.daq_structure[str(self.daq_selected)][str(self.odt_selected)]\
[str(self.odt_entry_selected - 1)]['start_pos'] + \
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)]\
[str(self.odt_entry_selected - 1)]['size']
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]\
['size'] = size
else:
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)]['start_pos'] = \
self.daq_structure[str(self.daq_selected)]['length']
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]\
['start_pos'] = 0
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]\
['size'] = size
self.daq_structure[str(self.daq_selected)]['length'] += size
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)]['length'] += size
if extension: # Check if address extension is used
address = (address << 8) + extension
for each_item in self.daq_variables.keys():
# Check if any variable (part or full-size) to be recorded is in this ODT Entry
start_address = int(self.daq_variables[each_item]['address'], 16)
end_address = start_address + self.daq_variables[each_item]['raw_len']
if end_address >= address and start_address <= (address + size):
if start_address >= address and end_address <= (address + size): # Full size
self.update_daq_checklist(each_item, start_address - address, bit_offset)
elif address > start_address: # 2nd part
self.update_daq_checklist(each_item, address, bit_offset, part=2, length=end_address - address + 1)
else: # 1st part
self.update_daq_checklist(each_item, start_address - address, bit_offset, part=1,
length=address + size - start_address + 1)
self.odt_entry_selected += 1
def update_daq_checklist(self, key, offset, bit_offset, part=0, length=0):
"""
Move or copy data of variable to record from variable list to target ODT Entry
:param key: Name of variable
:param offset: offset compared to starting address of ODT Entry
:param bit_offset: Bit mask of variable
:param part: Optional. Only for status that part of variable is in the ODT Entry. Default is 0 (full-size)
:param length: Optional. Only for status that part of variable is in the ODT Entry. Default is 0 (full-size)
:return: None
"""
if not part: # Full size (normal case). Add offset and bit-offset into base data
self.var_to_rec[key] = {'time': deque(maxlen=1000), 'value': deque(maxlen=1000),
'unit': self.daq_variables[key]['unit'],
'conversion': self.daq_variables[key]['conversion'],
'comment': self.daq_variables[key]['comment']}
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var'][key]\
= self.daq_variables.pop(key)
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var'][key]\
['offset'] = offset
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var'][key]\
['bit_offset'] = bit_offset
else:
if 'part' not in self.daq_variables[key].keys():
self.daq_variables[key]['part'] = part
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key] = copy.deepcopy(self.daq_variables[key])
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key]['length'] = length
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key]['offset'] = offset
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key]['bit_offset'] = bit_offset
else:
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key] = self.daq_variables.pop(key)
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key]['part'] = part
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key]['length'] = length
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key]['offset'] = offset
self.daq_structure[str(self.daq_selected)][str(self.odt_selected)][str(self.odt_entry_selected)]['var']\
[key]['bit_offset'] = bit_offset
def get_len(self, var_type):
"""
Return variable length according to variable type
:param var_type: type of variable
:return: length of variable
"""
length = 0
if var_type in ['UBYTE', 'SBYTE']:
length = 1
elif var_type in ['UWORD', 'SWORD', 'FLOAT16_IEEE']:
length = 2
elif var_type in ['ULONG', 'SLONG', 'FLOAT32_IEEE']:
length = 4
elif var_type in ['A_UINT64', 'A_INT64', 'FLOAT64_IEEE']:
length = 8
else:
self.logger.error('Unknown type: ' + var_type)
return length
def update_event_list(self, raw_data):
"""
Assign DAQ into target event channel
:param raw_data: data in command
:return: None
"""
self.alternating = raw_data[0] & 1
self.direction = (raw_data[0] & 2) >> 1
self.dto_ctr = (raw_data[0] & 8) >> 3
self.timestamp = (raw_data[0] & 16) >> 4
self.pid_off = (raw_data[0] & 32) >> 5
self.event_selected = struct.unpack('H', raw_data[3:5])[0]
self.event_daq_selected = struct.unpack('H', raw_data[1:3])[0]
self.daq_structure[str(self.event_daq_selected)]['prescaler'] = raw_data[5]
self.daq_structure[str(self.event_daq_selected)]['priority'] = raw_data[6]
if str(self.event_selected) not in self.event_structure:
self.event_structure[str(self.event_selected)] = dict()
self.event_structure[str(self.event_selected)][str(self.event_daq_selected)] = \
self.daq_structure[str(self.event_daq_selected)]
def check_error_code(self, code):
"""
Check which error code is received and whether it is necessary to resend command or wait for another response
:param code: Error code
:return: True if we need to resend command
"""
if code == 0x10:
self.logger.info('Error code 0x10: Command was not executed.')
result = True
elif code == 0x11:
self.logger.info('Error code 0x11: Command rejected because DAQ is running.')
result = False
elif code == 0x12:
self.logger.info('Error code 0x12: Command rejected because PGM is running.')
result = False
elif code == 0x20:
self.logger.info('Error code 0x20: Unknown command or not implemented optional command.')
result = False
elif code == 0x21:
self.logger.info('Error code 0x21: Command syntax invalid.')
result = False
elif code == 0x22:
self.logger.info('Error code 0x22: Command syntax valid but command parameter(s) out of range.')
result = False
elif code == 0x23:
self.logger.info('Error code 0x23: Te memory location is write protected.')
result = False
elif code == 0x24:
self.logger.info('Error code 0x24: The memory location is not accessible.')
result = False
elif code == 0x25:
self.logger.info('Error code 0x25: Access denied, Seed & Key is required.')
result = False
elif code == 0x26:
self.logger.info('Error code 0x26: Selected page not available.')
result = False
elif code == 0x27:
self.logger.info('Error code 0x27: Selected mode not available.')
result = False
elif code == 0x28:
self.logger.info('Error code 0x28: Selected segment not valid.')
result = False
elif code == 0x29:
self.logger.info('Error code 0x29: Sequence error.')
result = False
elif code == 0x2A:
self.logger.info('Error code 0x2A: DAQ configuration not valid.')
result = False
elif code == 0x30:
self.logger.info('Error code 0x30: Memory overflow error.')
result = False
elif code == 0x31:
self.logger.info('Error code 0x31: Generic error.')
result = False
elif code == 0x32:
self.logger.info('Error code 0x32: The slave internal program verify routine detects an error.')
result = False
elif code == 0x33:
self.logger.info('Error code 0x33: Access to requested resource is temporary not possible.')
result = True
elif code == 0x34:
self.logger.info('Error code 0x34: Unknown sub command or not implemented optional sub command.')
result = False
elif code == 0x35:
self.logger.info('Error code 0x35: There was a change of the synchronization status inbetween the last'
'upload of clock information and start of measurement.')
result = False
elif code == 0xFC:
self.logger.info('Error code 0xFC: ASAM MCD-1-XCP AS SW-DBG-over-XCP related errors.')
result = False
else:
self.logger.error('Unknown error code.')
result = False
return result
def check_cto(self, comm):
"""
Once box has received a CTO, check if it needs to abstract any information.
:param comm: Command code
:return: command code and sub command, used to analyze response message
"""
command_length = comm[0]
self.command_code = comm[4]
self.sub_command = 0
if len(comm) > 5:
self.sub_command = comm[5]
if self.command_code == XCP_CONNECT[0]:
self.current_mode = comm[1]
elif self.command_code == XCP_DISCONNECT[0]:
pass # Do nothing
elif self.command_code == XCP_GET_STATUS[0]:
pass # Do nothing
elif self.command_code == XCP_SYNCH[0]:
pass # Do nothing
elif self.command_code == XCP_GET_COMM_MODE_INFO[0]:
pass # Do nothing
elif self.command_code == XCP_GET_ID[0]:
pass # TBD
elif self.command_code == XCP_SET_REQUEST[0]:
pass # TBD
elif self.command_code == XCP_GET_SEED[0]:
pass # TBD
elif self.command_code == XCP_UNLOCK[0]:
pass # TBD
elif self.command_code == XCP_SET_MTA[0]:
pass # TBD
elif self.command_code == XCP_UPLOAD[0]:
pass # TBD
elif self.command_code == XCP_SHORT_UPLOAD[0]:
pass # TBD
elif self.command_code == XCP_BUILD_CHECKSUM[0]:
pass # TBD
elif self.command_code == XCP_TRANSPORT_LAYER_CMD[0]:
pass # TBD
elif self.command_code == XCP_USER_CMD[0]:
if self.sub_command == 0x81: # Write multi DAQ with user cmd format
self.split_multi_daq(comm[6: command_length + 4])
elif self.command_code == XCP_GET_VERSION[0] and self.sub_command == XCP_GET_VERSION[1]:
pass # Do nothing
elif self.command_code == XCP_DOWNLOAD[0]:
pass # TBD
elif self.command_code == XCP_DOWNLOAD_NEXT[0]:
pass # TBD
elif self.command_code == XCP_DOWNLOAD_MAX[0]:
pass # TBD
elif self.command_code == XCP_SHORT_DOWNLOAD[0]:
pass # TBD
elif self.command_code == XCP_MODIFY_BITS[0]:
pass # TBD
elif self.command_code == XCP_SET_CAL_PAGE[0]:
pass # TBD
elif self.command_code == XCP_GET_CAL_PAGE[0]:
pass # TBD
elif self.command_code == XCP_GET_PAG_PROCESSOR_INFO[0]:
pass # Do nothing
elif self.command_code == XCP_GET_SEGMENT_INFO[0]:
pass # TBD
elif self.command_code == XCP_GET_PAGE_INFO[0]:
pass # TBD
elif self.command_code == XCP_SET_SEGMENT_MODE[0]:
pass # TBD
elif self.command_code == XCP_GET_SEGMENT_MODE[0]:
pass # TBD
elif self.command_code == XCP_COPY_CAL_PAGE[0]:
pass # TBD
elif self.command_code == XCP_SET_DAQ_PTR[0]:
self.set_odt_entry(struct.unpack('H', comm[6:8])[0], comm[8], comm[9])
elif self.command_code == XCP_WRITE_DAQ[0]:
self.check_write_daq(comm[4: 12])
elif self.command_code == XCP_SET_DAQ_LIST_MODE[0]:
self.update_event_list(comm[5: 12])
elif self.command_code == XCP_START_STOP_DAQ_LIST[0]:
self.logger.info('DAQ selected: parameter: ' + str(comm[5]) + ', DAQ: ' +
str(self.daq_structure[str(struct.unpack('H', comm[6:8])[0])]))
elif self.command_code == XCP_START_STOP_SYNCH[0]:
pass # TBD
elif self.command_code == XCP_WRITE_DAQ_MULTIPLE[0]:
self.split_multi_daq(comm[4: command_length + 4])
elif self.command_code == XCP_SET_DAQ_PACKED_MODE[0] and self.sub_command == XCP_SET_DAQ_PACKED_MODE[1]:
pass # TBD
elif self.command_code == XCP_GET_DAQ_PACKED_MODE[0] and self.sub_command == XCP_GET_DAQ_PACKED_MODE[1]:
pass # TBD
elif self.command_code == XCP_READ_DAQ[0]:
pass # Do nothing
elif self.command_code == XCP_GET_DAQ_CLOCK[0]:
pass # Do nothing
elif self.command_code == XCP_GET_DAQ_PROCESSOR_INFO[0]:
pass # Do nothing
elif self.command_code == XCP_GET_DAQ_RESOLUTION_INFO[0]:
pass # Do nothing
elif self.command_code == XCP_GET_DAQ_LIST_MODE[0]:
pass # TBD
elif self.command_code == XCP_GET_DAQ_EVENT_INFO[0]:
pass # TBD
elif self.command_code == XCP_DTO_CTR_PROPERTIES[0]:
pass # TBD
elif self.command_code == XCP_CLEAR_DAQ_LIST[0]:
pass # TBD
elif self.command_code == XCP_GET_DAQ_LIST_INFO[0]:
pass # TBD
elif self.command_code == XCP_FREE_DAQ[0]:
pass # TBD
elif self.command_code == XCP_ALLOC_DAQ[0]:
self.update_daq_dict(struct.unpack('H', comm[6:8])[0])
elif self.command_code == XCP_ALLOC_ODT[0]:
self.update_odt_dict(struct.unpack('H', comm[6:8])[0], comm[8])
elif self.command_code == XCP_ALLOC_ODT_ENTRY[0]:
self.update_odt_entry_dict(struct.unpack('H', comm[6:8])[0], comm[8], comm[9])
elif self.command_code == XCP_PROGRAM_START[0]:
pass # Do nothing
elif self.command_code == XCP_PROGRAM_CLEAR[0]:
pass # TBD
elif self.command_code == XCP_PROGRAM[0]:
pass # TBD
elif self.command_code == XCP_PROGRAM_RESET[0]:
pass # Do nothing
elif self.command_code == XCP_GET_PGM_PROCESSOR_INFO[0]:
pass # Do nothing
elif self.command_code == XCP_GET_SECTOR_INFO[0]:
pass # TBD
elif self.command_code == XCP_PROGRAM_PREPARE[0]:
pass # TBD
elif self.command_code == XCP_PROGRAM_FORMAT[0]:
pass # TBD
elif self.command_code == XCP_PROGRAM_NEXT[0]:
pass # TBD
elif self.command_code == XCP_PROGRAM_MAX[0]:
pass # TBD
elif self.command_code == XCP_PROGRAM_VERIFY[0]:
pass # TBD
elif self.command_code == XCP_TIME_CORRELATION_PROPERTIES[0]:
pass # TBD
elif self.command_code == XCP_ASAM_AE_MCD_1_XCP_AS_SW_DBG_OVER_XCP[0] and \
self.sub_command == XCP_ASAM_AE_MCD_1_XCP_AS_SW_DBG_OVER_XCP[1]:
pass # TBD
elif self.command_code == XCP_ASAM_AE_MCD_1_POD_BS[0] and self.sub_command == XCP_ASAM_AE_MCD_1_POD_BS[1]:
pass # TBD
else:
self.logger.error('No such command: ' + hex(self.command_code))
return [self.command_code, self.sub_command]
def check_resp(self, comm, resp, sub_comm=0x00):
"""
Check type of response message and take action
:param comm: command code
:param resp: response message
:param sub_comm: sub command
:return: None
"""
if resp[4] <= 0xFB: # DAQ packet
self.split_daq(resp)
elif resp[4] == 0xFC: # Service Request Code
if resp[5]: # Slave sends notification to master
self.logger.info('XCP slave sends information: ' + resp[6: len(resp) - 2].decode())
else: # Slave requests to be reset
self.logger.info('XCP slave requests to be reset')
elif resp[4] == 0xFD: # Event Code
pass # Event Code. Ignore this message as temporary strategy
elif resp[4] == 0xFE: # Error Code
if not resp[5]:
self.logger.info('Error code 0x00: Command processor synchronization.')
else:
self.check_error_code(resp[5])
else: # Positive response
self.analyze_pos_resp(comm, resp[4: len(resp)], sub_comm)
def analyze_pos_resp(self, comm, resp, sub_comm=0x00):
"""
Analyze positive response message from VX box according to different command codes.
:param comm: command code
:param resp: response message
:param sub_comm: sub command (optional)
:return: None
"""
# Standard commands
print(resp)
if comm == 0xFF: # connect
self.update_connect_params(resp)
elif comm == 0xFE: # disconnect
pass # Do nothing
elif comm == 0xFD: # get status
self.update_status(resp)
elif comm == 0xFC: # synchronize
pass
elif comm == 0xFB: # get comm mode info
self.update_comm_mode_info(resp)
elif comm == 0xFA: # get ID
pass
elif comm == 0xF9: # set request
pass
elif comm == 0xF8: # get seed
pass
elif comm == 0xF7: # unlock
pass
elif comm == 0xF6: # set MTA
pass
elif comm == 0xF5: # upload
pass
elif comm == 0xF4: # short upload
pass
elif comm == 0xF3: # build checksum
pass
elif comm == 0xF2: # transport layer cmd
pass
elif comm == 0xF1: # user cmd
pass
elif comm == 0xC0 and sub_comm == 0x00: # get version
pass
# Calibration commands
elif comm == 0xF0: # download
pass
elif comm == 0xEF: # download next
pass
elif comm == 0xEE: # download max
pass
elif comm == 0xED: # short download
pass
elif comm == 0xEC: # modify bits
pass
# Page switching commands
elif comm == 0xEB: # set cal page
pass
elif comm == 0xEA: # get cal page
pass
elif comm == 0xE9: # get page processor info
pass
elif comm == 0xE8: # get segment info
pass
elif comm == 0xE7: # get page info
pass
elif comm == 0xE6: # set segment mode
pass
elif comm == 0xE5: # get segment mode
pass
elif comm == 0xE4: # copy cal page
pass
# Basic data acquisition and stimulation commands
elif comm == 0xE3: # clear DAQ list
pass
elif comm == 0xE2: # set DAQ ptr
pass # Do nothing
elif comm == 0xE1: # write DAQ
pass
elif comm == 0xE0: # set DAQ list mode
pass # Do nothing
elif comm == 0xDF: # get DAQ list mode
pass
elif comm == 0xDE: # start/stop DAQ list
self.update_pid(resp)
elif comm == 0xDD: # start/stop sync
pass # Do nothing
elif comm == 0xDC: # get DAQ clock
self.update_daq_clock(resp)
elif comm == 0xDB: # read DAQ
pass
elif comm == 0xDA: # get DAQ processor info
self.update_daq_processor_info(resp)
elif comm == 0xD9: # get DAQ resolution info
self.update_daq_resolution_info(resp)
elif comm == 0xD8: # get DAQ list info
pass
elif comm == 0xD7: # get DAQ event info
pass
elif comm == 0xC7: # write DAQ multiple
pass
elif comm == 0xC5: # DTO counter properties
pass
elif comm == 0xC0 and sub_comm == 0x01: # set DAQ packed mode
pass
elif comm == 0xC0 and sub_comm == 0x02: # get DAQ packed mode
pass
# Dynamic data acquisition and stimulation commands
elif comm == 0xD6: # free DAQ
pass # Do nothing
elif comm == 0xD5: # allocate DAQ
pass # Do nothing
elif comm == 0xD4: # allocate ODT
pass # Do nothing
elif comm == 0xD3: # allocate ODT entry
pass # Do nothing
# Non-volatile memory programming commands
elif comm == 0xD2: # program start
pass
elif comm == 0xD1: # program clear
pass
elif comm == 0xD0: # program
pass
elif comm == 0xCF: # program reset
pass
elif comm == 0xCE: # get PGM processor info
pass
elif comm == 0xCD: # get sector info
pass
elif comm == 0xCC: # program prepare
pass
elif comm == 0xCB: # program format
pass
elif comm == 0xCA: # program next
pass
elif comm == 0xC9: # program max
pass
elif comm == 0xC8: # program verify
pass
# Time synchronization commands
elif comm == 0xC6: # time correlation properties
pass
# command spaces for related ASAM standards
elif comm == 0xC0 and sub_comm == 0xFC: # ASAM AE MCD-1-XCP AS SW-DBG-over-XCP
pass
elif comm == 0xC0 and sub_comm == 0xFD: # ASAM AE MCD-1 POD BS
pass
else: # unknown command
self.logger.error('Unknown command code')
| [
"threading.Thread",
"os.remove",
"copy.deepcopy",
"asammdf.Signal",
"logging.FileHandler",
"numpy.ubyte",
"numpy.frombuffer",
"socket.socket",
"os.path.exists",
"struct.unpack",
"collections.deque",
"time.sleep",
"logging.Formatter",
"time.time",
"asammdf.MDF",
"logging.getLogger"
] | [((657, 684), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (674, 684), False, 'import logging\n'), ((1060, 1093), 'os.path.exists', 'os.path.exists', (['"""XCP_Service.log"""'], {}), "('XCP_Service.log')\n", (1074, 1093), False, 'import os\n'), ((1154, 1192), 'logging.FileHandler', 'logging.FileHandler', (['"""XCP_Service.log"""'], {}), "('XCP_Service.log')\n", (1173, 1192), False, 'import logging\n'), ((1252, 1325), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1269, 1325), False, 'import logging\n'), ((1541, 1566), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1555, 1566), False, 'import os\n'), ((2831, 2880), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2844, 2880), False, 'import socket\n'), ((4354, 4367), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4364, 4367), False, 'import time\n'), ((23826, 23831), 'asammdf.MDF', 'MDF', ([], {}), '()\n', (23829, 23831), False, 'from asammdf import MDF, Signal\n'), ((1107, 1135), 'os.remove', 'os.remove', (['"""XCP_Service.log"""'], {}), "('XCP_Service.log')\n", (1116, 1135), False, 'import os\n'), ((33882, 33893), 'time.time', 'time.time', ([], {}), '()\n', (33891, 33893), False, 'import time\n'), ((38052, 38074), 'numpy.ubyte', 'numpy.ubyte', (['raw_value'], {}), '(raw_value)\n', (38063, 38074), False, 'import numpy\n'), ((48748, 48781), 'struct.unpack', 'struct.unpack', (['"""H"""', 'raw_data[3:5]'], {}), "('H', raw_data[3:5])\n", (48761, 48781), False, 'import struct\n'), ((48819, 48852), 'struct.unpack', 'struct.unpack', (['"""H"""', 'raw_data[1:3]'], {}), "('H', raw_data[1:3])\n", (48832, 48852), False, 'import struct\n'), ((3649, 3685), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.listen'}), '(target=self.listen)\n', (3665, 3685), False, 'import threading\n'), ((3759, 3772), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3769, 3772), False, 'import time\n'), ((42020, 42048), 'struct.unpack', 'struct.unpack', (['"""L"""', 'daq[3:7]'], {}), "('L', daq[3:7])\n", (42033, 42048), False, 'import struct\n'), ((45201, 45219), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (45206, 45219), False, 'from collections import deque\n'), ((45230, 45248), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (45235, 45248), False, 'from collections import deque\n'), ((46270, 46308), 'copy.deepcopy', 'copy.deepcopy', (['self.daq_variables[key]'], {}), '(self.daq_variables[key])\n', (46283, 46308), False, 'import copy\n'), ((23958, 24203), 'asammdf.Signal', 'Signal', (["self.var_to_rec[each_key]['value']", "self.var_to_rec[each_key]['time']"], {'name': 'each_key', 'unit': "self.var_to_rec[each_key]['unit']", 'conversion': "self.var_to_rec[each_key]['conversion']", 'comment': "self.var_to_rec[each_key]['comment']"}), "(self.var_to_rec[each_key]['value'], self.var_to_rec[each_key]['time'\n ], name=each_key, unit=self.var_to_rec[each_key]['unit'], conversion=\n self.var_to_rec[each_key]['conversion'], comment=self.var_to_rec[\n each_key]['comment'])\n", (23964, 24203), False, 'from asammdf import MDF, Signal\n'), ((34176, 34187), 'time.time', 'time.time', ([], {}), '()\n', (34185, 34187), False, 'import time\n'), ((38127, 38168), 'numpy.frombuffer', 'numpy.frombuffer', (['raw_value', 'numpy.uint16'], {}), '(raw_value, numpy.uint16)\n', (38143, 38168), False, 'import numpy\n'), ((41382, 41410), 'struct.unpack', 'struct.unpack', (['"""L"""', 'daq[0:4]'], {}), "('L', daq[0:4])\n", (41395, 41410), False, 'import struct\n'), ((41734, 41762), 'struct.unpack', 'struct.unpack', (['"""I"""', 'daq[2:6]'], {}), "('I', daq[2:6])\n", (41747, 41762), False, 'import struct\n'), ((38224, 38265), 'numpy.frombuffer', 'numpy.frombuffer', (['raw_value', 'numpy.uint32'], {}), '(raw_value, numpy.uint32)\n', (38240, 38265), False, 'import numpy\n'), ((38321, 38362), 'numpy.frombuffer', 'numpy.frombuffer', (['raw_value', 'numpy.uint64'], {}), '(raw_value, numpy.uint64)\n', (38337, 38362), False, 'import numpy\n'), ((55759, 55788), 'struct.unpack', 'struct.unpack', (['"""H"""', 'comm[6:8]'], {}), "('H', comm[6:8])\n", (55772, 55788), False, 'import struct\n'), ((56221, 56250), 'struct.unpack', 'struct.unpack', (['"""H"""', 'comm[6:8]'], {}), "('H', comm[6:8])\n", (56234, 56250), False, 'import struct\n'), ((57679, 57708), 'struct.unpack', 'struct.unpack', (['"""H"""', 'comm[6:8]'], {}), "('H', comm[6:8])\n", (57692, 57708), False, 'import struct\n'), ((57798, 57827), 'struct.unpack', 'struct.unpack', (['"""H"""', 'comm[6:8]'], {}), "('H', comm[6:8])\n", (57811, 57827), False, 'import struct\n'), ((57938, 57967), 'struct.unpack', 'struct.unpack', (['"""H"""', 'comm[6:8]'], {}), "('H', comm[6:8])\n", (57951, 57967), False, 'import struct\n')] |
#! /usr/bin/python3
import click
import numpy as np
from Tkinter import *
@click.command()
def main():
""" CoordSys :: Conv is a GUI application for conversion between the various coordinate systems.
Steps involved in conversion :
1) Enter the values of known coordinates separated by ','
2) Click Calculate to obtain the necessary coordinates. """
root = Tk()
#Modifying GUI
root.title("CoordSys :: Converter")
root.configure(bg="peach puff")
#Variables
var_1 = StringVar()
var_2 = StringVar()
var_3 = StringVar()
var_4 = StringVar()
var_5 = StringVar()
var_6 = StringVar()
#Main Head
label_head = Label(root,text="Coordinate System Conversions",font=("Times",20),foreground="saddle brown",bg="peach puff")
label_head.place(x=520,y=40)
#Button actions and respective funtions
def cart_to_cy():
label_cart_cy = Label(root,text="x,y and z :",bd=0,bg="thistle")
label_cart_cy.place(x=180,y=210)
entry_x = Entry(root,textvariable=var_1)
entry_x.place(x=250,y=210)
def calc():
x=float(var_1.get().split(',')[0])
y=float(var_1.get().split(',')[1])
z=float(var_1.get().split(',')[2])
rho = (x**2+y**2)**0.5
theta = np.arctan(y/x)
label_new_coor = Label(root,text="rho,theta and z:",bd=0,bg="thistle")
label_new_coor.place(x=180,y=250)
label_result = Label(root,text=(str("%.3f"%rho)+", "+str("%.3f"%theta)+" and "+str("%.3f"%z)),bd=0,bg="khaki")
label_result.place(x=300,y=250)
button_calc = Button(root,text="Calculate",command=calc,activebackground="olive drab",activeforeground="gray69",cursor="hand1",bg="light blue",bd=0,highlightbackground="black")
button_calc.place(x=210,y=300)
def cart_to_sp():
label_cart_sp = Label(root,text="x,y and z :",bd=0,bg="thistle")
label_cart_sp.place(x=560,y=210)
entry_x = Entry(root,textvariable=var_2)
entry_x.place(x=670,y=210)
def calc():
x=float(var_2.get().split(',')[0])
y=float(var_2.get().split(',')[1])
z=float(var_2.get().split(',')[2])
r = (x**2+y**2+z**2)**0.5
phi = np.arccos(z/r)
theta = np.arcsin(y/(r*(np.sin(phi))))
label_new_coor = Label(root,text="r,phi and theta:",bd=0,bg="thistle")
label_new_coor.place(x=560,y=250)
label_result = Label(root,text=(str("%.3f"%r)+", "+str("%.3f"%phi)+" and "+str("%.3f"%theta)),bd=0,bg="khaki")
label_result.place(x=670,y=250)
button_calc = Button(root,text="Calculate",command=calc,activebackground="olive drab",activeforeground="gray69",cursor="hand1",bg="light blue",bd=0,highlightbackground="black")
button_calc.place(x=640,y=300)
def cy_to_cart():
label_cy_cart = Label(root,text="rho,theta and z :",bd=0,bg="thistle")
label_cy_cart.place(x=1000,y=210)
entry_x = Entry(root,textvariable=var_3)
entry_x.place(x=1120,y=210)
def calc():
rho=float(var_3.get().split(',')[0])
theta=float(var_3.get().split(',')[1])
z=float(var_3.get().split(',')[2])
x = rho*np.cos(theta)
y = rho*np.sin(theta)
label_new_coor = Label(root,text="x, y and z : ",bd=0,bg="thistle")
label_new_coor.place(x=1000,y=250)
label_result = Label(root,text=(str("%.3f"%x)+", "+str("%.3f"%y)+" and "+str("%.3f"%z)),bd=0,bg="khaki")
label_result.place(x=1120,y=250)
button_calc = Button(root,text="Calculate",command=calc,activebackground="olive drab",activeforeground="gray69",cursor="hand1",bg="light blue",bd=0,highlightbackground="black")
button_calc.place(x=1080,y=300)
def sp_to_cart():
label_sp_cart = Label(root,text="r,phi and theta :",bd=0,bg="thistle")
label_sp_cart.place(x=180,y=480)
entry_x = Entry(root,textvariable=var_4)
entry_x.place(x=300,y=480)
def calc():
r=float(var_4.get().split(',')[0])
phi=float(var_4.get().split(',')[1])
theta=float(var_4.get().split(',')[2])
x = r*np.sin(phi)*np.cos(theta)
y = r*np.sin(phi)*np.sin(theta)
z = r * np.cos(phi)
label_new_coor = Label(root,text="x,y and z:",bd=0,bg="thistle")
label_new_coor.place(x=180,y=520)
label_result = Label(root,text=(str("%.3f"%x)+", "+str("%.3f"%y)+" and "+str("%.3f"%z)),bd=0,bg="khaki")
label_result.place(x=300,y=520)
button_calc = Button(root,text="Calculate",command=calc,activebackground="olive drab",activeforeground="gray69",cursor="hand1",bg="light blue",bd=0,highlightbackground="black")
button_calc.place(x=210,y=560)
def cy_to_sp():
label_cy_sp = Label(root,text="rho,theta and z :",bd=0,bg="thistle")
label_cy_sp.place(x=560,y=480)
entry_x = Entry(root,textvariable=var_5)
entry_x.place(x=700,y=480)
def calc():
rho=float(var_5.get().split(',')[0])
theta=float(var_5.get().split(',')[1])
z=float(var_5.get().split(',')[2])
r = (rho**2+z**2)**0.5
phi = np.arccos(z/r)
label_new_coor = Label(root,text="r,phi and theta:",bd=0,bg="thistle")
label_new_coor.place(x=560,y=520)
label_result = Label(root,text=(str("%.3f"%r)+", "+str("%.3f"%phi)+" and "+str("%.3f"%theta)),bd=0,bg="khaki")
label_result.place(x=700,y=520)
button_calc = Button(root,text="Calculate",command=calc,activebackground="olive drab",activeforeground="gray69",cursor="hand1",bg="light blue",bd=0,highlightbackground="black")
button_calc.place(x=640,y=560)
def sp_to_cy():
label_sp_cy = Label(root,text="r,phi and theta :",bd=0,bg="thistle")
label_sp_cy.place(x=1010,y=480)
entry_x = Entry(root,textvariable=var_6)
entry_x.place(x=1130,y=480)
def calc():
r=float(var_6.get().split(',')[0])
phi=float(var_6.get().split(',')[1])
theta=float(var_6.get().split(',')[2])
rho = r * np.sin(phi)
z = r * np.cos(phi)
label_new_coor = Label(root,text="rho,theta and z:",bd=0,bg="thistle")
label_new_coor.place(x=1010,y=520)
label_result = Label(root,text=(str("%.3f"%rho)+", "+str("%.3f"%theta)+" and "+str("%.3f"%z)),bd=0,bg="khaki")
label_result.place(x=1130,y=520)
button_calc = Button(root,text="Calculate",command=calc,activebackground="olive drab",activeforeground="gray69",cursor="hand1",highlightbackground="black",bd=0,bg="light blue")
button_calc.place(x=1080,y=560)
#Buttons
button_cart_cy = Button(root,text="Cartesian to Cylindrical",command=cart_to_cy,activebackground="light yellow",activeforeground="red",cursor="hand1",bg="light pink",bd=0)
button_cart_cy.place(x=180,y=150)
button_cart_sp = Button(root,text="Cartesian to Spherical",command=cart_to_sp,activebackground="light yellow",activeforeground="red",cursor="hand1",bg="light pink",bd=0)
button_cart_sp.place(x=600,y=150)
button_cy_cart = Button(root,text="Cylindrical to Cartesian",command=cy_to_cart,activebackground="light yellow",activeforeground="red",cursor="hand1",bg="light pink",bd=0)
button_cy_cart.place(x=1030,y=150)
button_sp_cart = Button(root,text="Spherical to Cartesian",command=sp_to_cart,activebackground="light yellow",activeforeground="red",cursor="hand1",bg="light pink",bd=0)
button_sp_cart.place(x=180,y=420)
button_cy_sp = Button(root,text="Cylindrical to Spherical",command=cy_to_sp,activebackground="light yellow",activeforeground="red",cursor="hand1",bg="light pink",bd=0)
button_cy_sp.place(x=600,y=420)
button_sp_cy = Button(root,text="Spherical to Cylindrical",command=sp_to_cy,activebackground="light yellow",activeforeground="red",cursor="hand1",bg="light pink",bd=0)
button_sp_cy.place(x=1030,y=420)
#Fun
cr = Label(root,text="Copyright 2018 Amogh.A.Joshi. All rights reserved.",relief=SUNKEN,cursor="gumby")
cr.pack(side=BOTTOM,fill=X)
#Mainloop
root.mainloop()
if __name__ == "__main__":
main()
| [
"click.command",
"numpy.sin",
"numpy.cos",
"numpy.arctan",
"numpy.arccos"
] | [((78, 93), 'click.command', 'click.command', ([], {}), '()\n', (91, 93), False, 'import click\n'), ((1305, 1321), 'numpy.arctan', 'np.arctan', (['(y / x)'], {}), '(y / x)\n', (1314, 1321), True, 'import numpy as np\n'), ((2278, 2294), 'numpy.arccos', 'np.arccos', (['(z / r)'], {}), '(z / r)\n', (2287, 2294), True, 'import numpy as np\n'), ((5314, 5330), 'numpy.arccos', 'np.arccos', (['(z / r)'], {}), '(z / r)\n', (5323, 5330), True, 'import numpy as np\n'), ((3288, 3301), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3294, 3301), True, 'import numpy as np\n'), ((3322, 3335), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3328, 3335), True, 'import numpy as np\n'), ((4275, 4288), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4281, 4288), True, 'import numpy as np\n'), ((4319, 4332), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4325, 4332), True, 'import numpy as np\n'), ((4353, 4364), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4359, 4364), True, 'import numpy as np\n'), ((6262, 6273), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (6268, 6273), True, 'import numpy as np\n'), ((6294, 6305), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (6300, 6305), True, 'import numpy as np\n'), ((4263, 4274), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4269, 4274), True, 'import numpy as np\n'), ((4307, 4318), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4313, 4318), True, 'import numpy as np\n'), ((2329, 2340), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2335, 2340), True, 'import numpy as np\n')] |
import numpy as np
perms = []
for i in range(0, 5):
arr = np.random.permutation(9)
arr = [x+1 for x in arr]
perms += arr
print(arr)
print(perms)
| [
"numpy.random.permutation"
] | [((63, 87), 'numpy.random.permutation', 'np.random.permutation', (['(9)'], {}), '(9)\n', (84, 87), True, 'import numpy as np\n')] |
## Lindenmayer system functions and classes
# Imports
import itertools
import numpy
import pandas
from evolve_soft_2d import utility
from evolve_soft_2d.unit import rep_grid
################################################################################
class vocabulary:
"""The L-system vocabulary
"""
def __init__(
self,
var: list,
con: list,
var_descr: list,
con_descr: list,
) -> None:
"""Vocabulary parameters
Parameters
----------
vocab : list
The vocabulary members
descr : list
The description of the vocabulary members
"""
self.var = var
self.con = con
self.var_descr = var_descr
self.con_descr = con_descr
def __repr__(self) -> str:
"""Format a representation of the L-system vocabulary for the log
Returns
-------
str
Formatted representation of the L-system vocabulary for the log
"""
r = "L-System Variables:\n"
for i in range(0, len(self.var)):
r += "{}: {}\n".format(self.var[i], self.var_descr[i])
r += "L-System Constants:\n"
for i in range(0, len(self.con)):
r += "{}: {}\n".format(self.con[i], self.con_descr[i])
return r
################################################################################
class lsystem:
"""Lindenmayer system class
"""
def __init__(
self,
vocab: vocabulary,
gramm: list,
axiom: str,
n: int,
seed: int = None,
) -> None:
"""Lindenmayer system parameters
Parameters
----------
vocab : vocabulary
The vocabulary of the L-system
gramm : list
The grammatical rules of the L-system
axiom : str
The initial axiom of the L-system
n : int
The number of iterations of the L-system
seed : int, optional
The seed for the random generation, by default None
"""
self.seed = seed
self.vocab = vocab
self.gramm = gramm
self.axiom = axiom
self.n = n
# The final result of the L-system
self.word = self.iterate()
self.gramm_str = utility.list_to_str([i[0] + " -> " + i[1] for i in self.gramm], "\n")
def __repr__(self) -> str:
"""Format a representation of the L-system
Returns
-------
str
Formatted representation of the L-system for the log
"""
r = "Grammar:\n{}\n".format(self.gramm_str)
r += "Axiom: {}\n".format(self.axiom)
r += "Number Of Iterations: {}\n".format(self.n)
r += "Resulting Word: {}\n".format(self.word)
r += "\n{}".format(self.vocab)
if self.seed is not None:
r += "\nSeed: {}\n".format(self.seed)
return r
def apply_gramm(
self,
c: str,
) -> str:
"""Apply the grammatical transformation to a character
Parameters
----------
c : str
The character to be transformed
Returns
-------
str
The new string
"""
# Loop through the grammatical rules
for i in self.gramm:
# Check if the character matches the current rule
if c == i[0]:
# Return the transformation
return i[1]
# Return the original character if it matches no rules
return c
def rewrite(
self,
w: str
) -> str:
"""Rewrite a character string
Parameters
----------
w : str
The character string
Returns
-------
str
The rewritten string
"""
# Initialisations
rw = ""
# Loop through the character string
for i in w:
# Rewrite the current character
rw += self.apply_gramm(i)
return rw
def iterate(self) -> str:
"""Generate the final L-system string
Returns
-------
str
The final L-system string
"""
# Initialisation
rw = "F"
# Loop for the specified number of iterations
for _ in range(0, self.n):
# Rewrite the current string
rw = self.rewrite(rw)
# Apply the axiom
rw = self.axiom.replace("F", rw)
return rw
################################################################################
def interpret_word(
template,
w: str,
) -> list:
"""Interpret a word generated by an L-system as an element grid
Parameters
----------
w : str
The word generated by the L-system
Returns
-------
list
The element grid
"""
# Initialisations
c = []
x = 0
y = 0
d = 0
F1 = True
x_stack = []
y_stack = []
d_stack = []
x_stack.append(x)
y_stack.append(y)
d_stack.append(d)
keep = []
# Determine how many reflections are initiated
reflections = w.count("(")
# Loop through the number of reflections
for _ in range(0, reflections):
# Find the reflection boundary indices
b1 = w.find("(")
b2 = w.find(")")
# Apply the reflection transformation
s = utility.clean_str(w[b1:b2 + 1], ["(", ")", "+", "-", "x"], ["[", "]", "x", "+", "-"])
# Replace the relevant substring with its reflection
w = utility.clean_str(w, [w[b1:b2 + 1]], [s])
# Loop through every character in the string
for i in w:
# Check if the current character is F
if i == "F":
# Check that the stack is not currently at its initial values and that the initial element flag is not set
if len(x_stack) > 1 and F1 == False:
# Determine the x and y coordinates of the current element
x, y = determine_c(d, x, y)
else:
# Set the initial element coordinates
x = 0
y = 0
# Unset the flag
F1 = False
# Add the element
c.append([x, y])
# Check if the current character is f
elif i == "f":
# Check that the stack is not currently at its initial values and that the initial element flag is not set
if len(x_stack) > 1 and F1 == False:
# Determine the x and y coordinates of the current element
x, y = determine_c(d, x, y)
else:
# Set the initial element coordinates
x = 0
y = 0
# Unset the flag
F1 = False
# Check if the current character is +
elif i == "+":
# Update the direction
d = update_d(i, d)
# Check if the current character is -
elif i == "-":
# Update the direction
d = update_d(i, d)
# Check if the current character is [
elif i == "[":
# Push the current coordinates and direction
x_stack.append(x)
y_stack.append(y)
d_stack.append(d)
# Check if the current character is ]
elif i == "]":
# Check if the stacks have any coordinates pushed to them
if len(x_stack) > 1:
# Pop the last saved coordinates and direction
x = x_stack.pop()
y = y_stack.pop()
d = d_stack.pop()
# Check if the stack is at its initial values
if len(x_stack) == 1:
# Reset the flag
F1 = True
else:
# Pop the original coordinates and direction
x = x_stack[0]
y = y_stack[0]
d = d_stack[0]
# Remove any duplicate element coordinates
c.sort()
c = list(c for c, _ in itertools.groupby(c))
# Format the list of coordinates as a dataframe
col = ["x", "y"]
c = pandas.DataFrame(c, columns = col)
# Normalise the coordinates
c.x = utility.normalise_list(c.x, template.x_e/2 - 0.5)
c.y = utility.normalise_list(c.y, template.y_e/2 - 0.5)
# Remove any coordinates outside the bounds of the internal space
c = c[c.x >= template.b]
c = c[c.x < template.x_e - template.b]
c = c[c.y >= template.b]
c = c[c.y < template.y_e - template.b]
# Reformat the dataframe
c = c.reset_index(drop = True)
c = c.astype(int)
# Loop through the dataframe
for i in range(0, len(c)):
# Append the element coordinate to the list of coordinates
keep.append(1 + c.x[i] + c.y[i]*template.x_e)
# Determine which elements should be removed
rem = utility.unique_list(template.e_internal, keep)
return rem
################################################################################
def interpret_word_raw(
w: str,
) -> pandas.DataFrame:
"""Interpret a word generated by an L-system as an element grid
Parameters
----------
w : str
The word generated by the L-system
Returns
-------
list
The element grid
"""
# Initialisations
c = []
x = 0
y = 0
d = 0
F1 = True
x_stack = []
y_stack = []
d_stack = []
x_stack.append(x)
y_stack.append(y)
d_stack.append(d)
# Determine how many reflections are initiated
reflections = w.count("(")
# Loop through the number of reflections
for _ in range(0, reflections):
# Find the reflection boundary indices
b1 = w.find("(")
b2 = w.find(")")
# Apply the reflection transformation
s = utility.clean_str(w[b1:b2 + 1], ["(", ")", "+", "-", "x"], ["[", "]", "x", "+", "-"])
# Replace the relevant substring with its reflection
w = utility.clean_str(w, [w[b1:b2 + 1]], [s])
# Loop through every character in the string
for i in w:
# Check if the current character is F
if i == "F":
# Check that the stack is not currently at its initial values and that the initial element flag is not set
if len(x_stack) > 1 and F1 == False:
# Determine the x and y coordinates of the current element
x, y = determine_c(d, x, y)
else:
# Set the initial element coordinates
x = 0
y = 0
# Unset the flag
F1 = False
# Add the element
c.append([x, y])
# Check if the current character is f
elif i == "f":
# Check that the stack is not currently at its initial values and that the initial element flag is not set
if len(x_stack) > 1 and F1 == False:
# Determine the x and y coordinates of the current element
x, y = determine_c(d, x, y)
else:
# Set the initial element coordinates
x = 0
y = 0
# Unset the flag
F1 = False
# Check if the current character is +
elif i == "+":
# Update the direction
d = update_d(i, d)
# Check if the current character is -
elif i == "-":
# Update the direction
d = update_d(i, d)
# Check if the current character is [
elif i == "[":
# Push the current coordinates and direction
x_stack.append(x)
y_stack.append(y)
d_stack.append(d)
# Check if the current character is ]
elif i == "]":
# Check if the stacks have any coordinates pushed to them
if len(x_stack) > 1:
# Pop the last saved coordinates and direction
x = x_stack.pop()
y = y_stack.pop()
d = d_stack.pop()
# Check if the stack is at its initial values
if len(x_stack) == 1:
# Reset the flag
F1 = True
else:
# Pop the original coordinates and direction
x = x_stack[0]
y = y_stack[0]
d = d_stack[0]
# Remove any duplicate element coordinates
c.sort()
c = list(c for c, _ in itertools.groupby(c))
# Format the list of coordinates as a dataframe
col = ["x", "y"]
c = pandas.DataFrame(c, columns = col)
# # Normalise the coordinates
# c.x = utility.normalise_list(c.x, max(c.x))
# c.y = utility.normalise_list(c.y, max(c.y))
# Reformat the dataframe
c = c.reset_index(drop = True)
c = c.astype(int)
return c
################################################################################
def update_d(
r: str,
d: int,
) -> int:
"""Update the direction
Parameters
----------
r : str
The direction of rotation
d : int
The current direction
Returns
-------
int
The updated direction
"""
# Check if the direction of rotation is 45 degrees positive
if r == "+":
d += 45
# Check if the direction of rotation is 45 degrees negative
elif r == "-":
d -= 45
# Check if the direction is more than 360 degrees
if d >= 360:
d -=360
# Check if the direction is less than 0 degrees
elif d < 0:
d += 360
return d
################################################################################
def determine_c(
d: int,
x: int,
y: int,
) -> (int, int):
"""Determine the coordinates of the new element
Parameters
----------
d : int
The current direction
x : int
The current x-coordinate
y : int
The current y-coordinate
Returns
-------
(int, int)
The x- and y-coordinates of the new element
"""
if d == 0:
y += 1
elif d == 45:
x += -1
y += 1
elif d == 90:
x += -1
elif d == 135:
x += -1
y += -1
elif d == 180:
y += -1
elif d == 225:
x += 1
y += -1
elif d == 270:
x += 1
elif d == 315:
x += 1
y += 1
return (x, y)
################################################################################
def gen_lsystem(
seed: int,
v: vocabulary,
a_i: int,
r_n: int,
r_l: int,
n: int,
) -> lsystem:
"""Generate a random L-system
Parameters
----------
seed : int
The seed for the random generation
v : vocabulary
The vocabulary of the L-system
a_i : int
The index of the axis of symmetry to use
r_n : int
The number of rules to generate
r_l : int
The length of the rules
n : int
The number of iterations for the L-System
Returns
-------
lsystem
The L-System
"""
# Initialisations
g = []
i = 0
j = 0
# Select the axiom from the list of axioms
aos = a_all[a_i]
# Loop until a rule including the command to draw an element is defined
while 1:
# Generate a random rule
g2 = utility.gen_random(l_c, r_l, seed + i)
# Check if the command to draw an element is included in the rule
if "F" in g2:
# Exit the loop
break
i += 1
# Save the first rule
g.append(["F", g2])
# Loop through the number of rules to generate
for i in range(1, r_n):
# Loop until a rule applied to a new character is generated
while 1:
numpy.random.seed(seed = seed + j)
# Select a character
g1 = numpy.random.choice(e_var[1:])
# Check if the character already has a rule applied to it
if g1 not in [i[0]for i in g]:
# Exit the loop
break
j += 1
# Generate the rule
g2 = utility.gen_random(l_c, r_l, seed = seed + i)
# Add the rule to the list of rules
g.append([g1, g2])
# Define the L-system
ls = lsystem(v, g, aos, n, seed = seed)
return ls
################################################################################
# The L-system vocabulary used to generate internal elements
e_var = ["F", "f", "+", "-"]
e_con = ["[", "]", "(", ")"]
e_var_descr = [
"Create an element at the current position and increment the position",
"Increment the position",
"Rotate the current direction by 45 degrees counterclockwise",
"Rotate the current direction by 45 degrees clockwise",
]
e_con_descr = [
"Push the current position",
"Pop to the previously pushed position",
"Push and reflect the current position",
"Pop and unreflect to the previously pushed and reflected position",
]
e_vocabulary = vocabulary(e_var, e_con, e_var_descr, e_con_descr)
# L-system axioms for symmetry
a_rot_hor = "[F]++++[F]"
a_rot_ver = "--[F]++++[F]"
a_rot_hor_ver = "[F]++[F]++[F]++[F]"
a_rot_dia = "+[F]++++[F]"
a_rot_ndi = "-[F]++++[F]"
a_rot_dia_ndi = "+[F]++[F]++[F]++[F]"
a_mir_hor = "[F]++++(F)"
a_mir_ver = "--[F]++++(F)"
a_mir_hor_ver = "[F]++(F)++[F]++(F)"
a_mir_dia = "+[F]++++(F)"
a_mir_ndi = "-[F]++++(F)"
a_mir_dia_ndi = "+[F]++(F)++[F]++(F)"
a_all = [a_rot_hor, a_rot_ver, a_rot_hor_ver, a_rot_dia, a_rot_ndi, a_rot_dia_ndi, a_mir_hor, a_mir_ver, a_mir_hor_ver, a_mir_dia, a_mir_ndi, a_mir_dia_ndi]
# L-System components for random generation
l_c = ["F", "f", "+", "-", "++", "--", "fF", "Ff", "[F]", "[f]", "[+F]", "[+fF]", "[-F]", "[-fF]"] | [
"pandas.DataFrame",
"numpy.random.choice",
"numpy.random.seed",
"evolve_soft_2d.utility.clean_str",
"evolve_soft_2d.utility.gen_random",
"evolve_soft_2d.utility.unique_list",
"evolve_soft_2d.utility.list_to_str",
"evolve_soft_2d.utility.normalise_list",
"itertools.groupby"
] | [((8353, 8385), 'pandas.DataFrame', 'pandas.DataFrame', (['c'], {'columns': 'col'}), '(c, columns=col)\n', (8369, 8385), False, 'import pandas\n'), ((8433, 8484), 'evolve_soft_2d.utility.normalise_list', 'utility.normalise_list', (['c.x', '(template.x_e / 2 - 0.5)'], {}), '(c.x, template.x_e / 2 - 0.5)\n', (8455, 8484), False, 'from evolve_soft_2d import utility\n'), ((8493, 8544), 'evolve_soft_2d.utility.normalise_list', 'utility.normalise_list', (['c.y', '(template.y_e / 2 - 0.5)'], {}), '(c.y, template.y_e / 2 - 0.5)\n', (8515, 8544), False, 'from evolve_soft_2d import utility\n'), ((9102, 9148), 'evolve_soft_2d.utility.unique_list', 'utility.unique_list', (['template.e_internal', 'keep'], {}), '(template.e_internal, keep)\n', (9121, 9148), False, 'from evolve_soft_2d import utility\n'), ((12887, 12919), 'pandas.DataFrame', 'pandas.DataFrame', (['c'], {'columns': 'col'}), '(c, columns=col)\n', (12903, 12919), False, 'import pandas\n'), ((2340, 2411), 'evolve_soft_2d.utility.list_to_str', 'utility.list_to_str', (["[(i[0] + ' -> ' + i[1]) for i in self.gramm]", '"""\n"""'], {}), "([(i[0] + ' -> ' + i[1]) for i in self.gramm], '\\n')\n", (2359, 2411), False, 'from evolve_soft_2d import utility\n'), ((5530, 5619), 'evolve_soft_2d.utility.clean_str', 'utility.clean_str', (['w[b1:b2 + 1]', "['(', ')', '+', '-', 'x']", "['[', ']', 'x', '+', '-']"], {}), "(w[b1:b2 + 1], ['(', ')', '+', '-', 'x'], ['[', ']', 'x',\n '+', '-'])\n", (5547, 5619), False, 'from evolve_soft_2d import utility\n'), ((5692, 5733), 'evolve_soft_2d.utility.clean_str', 'utility.clean_str', (['w', '[w[b1:b2 + 1]]', '[s]'], {}), '(w, [w[b1:b2 + 1]], [s])\n', (5709, 5733), False, 'from evolve_soft_2d import utility\n'), ((10064, 10153), 'evolve_soft_2d.utility.clean_str', 'utility.clean_str', (['w[b1:b2 + 1]', "['(', ')', '+', '-', 'x']", "['[', ']', 'x', '+', '-']"], {}), "(w[b1:b2 + 1], ['(', ')', '+', '-', 'x'], ['[', ']', 'x',\n '+', '-'])\n", (10081, 10153), False, 'from evolve_soft_2d import utility\n'), ((10226, 10267), 'evolve_soft_2d.utility.clean_str', 'utility.clean_str', (['w', '[w[b1:b2 + 1]]', '[s]'], {}), '(w, [w[b1:b2 + 1]], [s])\n', (10243, 10267), False, 'from evolve_soft_2d import utility\n'), ((15698, 15736), 'evolve_soft_2d.utility.gen_random', 'utility.gen_random', (['l_c', 'r_l', '(seed + i)'], {}), '(l_c, r_l, seed + i)\n', (15716, 15736), False, 'from evolve_soft_2d import utility\n'), ((16509, 16552), 'evolve_soft_2d.utility.gen_random', 'utility.gen_random', (['l_c', 'r_l'], {'seed': '(seed + i)'}), '(l_c, r_l, seed=seed + i)\n', (16527, 16552), False, 'from evolve_soft_2d import utility\n'), ((16137, 16169), 'numpy.random.seed', 'numpy.random.seed', ([], {'seed': '(seed + j)'}), '(seed=seed + j)\n', (16154, 16169), False, 'import numpy\n'), ((16225, 16255), 'numpy.random.choice', 'numpy.random.choice', (['e_var[1:]'], {}), '(e_var[1:])\n', (16244, 16255), False, 'import numpy\n'), ((8247, 8267), 'itertools.groupby', 'itertools.groupby', (['c'], {}), '(c)\n', (8264, 8267), False, 'import itertools\n'), ((12781, 12801), 'itertools.groupby', 'itertools.groupby', (['c'], {}), '(c)\n', (12798, 12801), False, 'import itertools\n')] |
# -*- coding: UTF-8 -*-
''' Data preprocessing for slot tagging and intent prediction.
Replace the unseen tokens in the test/dev set with <unk> for user intents, user slot tags and agent actions.
Author : <NAME>
Email : <EMAIL>
Created Date: Dec. 31, 2016
'''
from DataSetCSV import DataSetCSV
import numpy as np
from keras.preprocessing import sequence
from utils import to_categorical
def vectorizing_zeropad(sentences, maxlen, token2id, prefix=''):
''' encode utterance or slot tags into id sequence.
0s for padding, 1s for unk.
return a matrix with shape=(sample_nb, maxlen)
e.g. [[0, 0, 0, 1, 2, 4, 5, ...], [...]]
Inputs:
sentences: shape = (sample_nb,), a list of strings
Outputs:
zeropad: shape = (sample_nb, maxlen), pre-padded ids
token_txt: shape = (sample_nb,), new text sequences with unknown words replaced by '<unk>'
'''
encode = list()
token_txt = list()
for sent in sentences:
output = list()
output_txt = list()
for token in sent.strip().split():
token = '{}{}'.format(prefix, token.strip())
if token in token2id:
output.append(token2id[token])
output_txt.append(token)
else:
# we reserved 1 for <unk>, 0 for <pad>
output.append(token2id['<{}unk>'.format(prefix)])
output_txt.append('<{}unk>'.format(prefix))
encode.append(output)
token_txt.append(' '.join(output_txt))
zeropad = sequence.pad_sequences(
encode, maxlen, padding='pre', truncating='pre')
return zeropad, np.asarray(token_txt)
def vectorizing_binaryVec(intents, vocab_size, intent2id, prefix=''):
''' convert into binary vectors.
Inputs:
intents: shape = (sample_nb,)
vocab_size: scalar, vocabulary size
intent2id: dict, (token, id) pairs
Outputs:
vec: shape = (sample_nb, vocab_size), binary matrix with firing ones when token exists in specific position
intent_txt: shape = (sample_nb,), a list of text with unknown tokens replaced by '<unk>'
'''
vec = np.zeros((intents.shape[0], vocab_size))
intent_txt = list()
for intent_idx, intent in enumerate(intents):
output_txt = set() # duplicated intent may exist, need to unique
for token_idx, token in enumerate(intent.strip().split(';')):
if token == 'null': # null exists in agent acts, but is not considered as label; while user intent does not include it
output_txt.add(token)
continue
token = '{}{}'.format(prefix, token)
if token in intent2id:
vec[intent_idx, intent2id[token] - 1] = 1
output_txt.add(token)
else:
unk = '<{}unk>'.format(prefix)
vec[intent_idx, intent2id[unk] - 1] = 1
output_txt.add(unk)
intent_txt.append(';'.join(sorted(output_txt)))
return vec, np.asarray(intent_txt)
class DataSetCSVslotTagging(DataSetCSV):
def __init__(self, csv_file, train_data=None, flag='train'):
super(DataSetCSVslotTagging, self).__init__(
csv_file, train_data, flag)
def getUserUtterMaxlen(self):
maxlen = 0
for utter in self.userUtter_txt:
utter_len = len([x for x in utter.strip().split()])
if utter_len > maxlen:
maxlen = utter_len
return maxlen
def transform_data(self, maxlen):
self.maxlen_userUtter = maxlen
# replace unknown words with <unk> in user utterance, and encode it
# using word id.
self.userUtter_encodePad, self.userUtter_txt = vectorizing_zeropad(
self.userUtter_txt, self.maxlen_userUtter, self.word2id, prefix='')
# replace unknown tags with <tag-unk> in user slot tags, and encode it
# as 1hot matrix
userTag_encodePad, self.userTag_txt = vectorizing_zeropad(
self.userTag_txt, self.maxlen_userUtter, self.userTag2id, prefix='tag-')
self.userTag_1hotPad = to_categorical(
userTag_encodePad, self.userTag_vocab_size)
# replace unknown intents with <intent-unk> in user intents, and encode
# it as binary vec
self.userIntent_vecBin, self.userIntent_txt = vectorizing_binaryVec(
self.userIntent_txt, self.userIntent_vocab_size, self.userIntent2id, prefix='intent-')
if __name__ == '__main__':
csv_train = './data/csv/dstc4.all.w-intent.train.csv'
csv_test = './data/csv/dstc4.all.w-intent.test.csv'
csv_dev = './data/csv/dstc4.all.w-intent.dev.csv'
train_data = DataSetCSVslotTagging(csv_train, flag='train')
dev_data = DataSetCSVslotTagging(
csv_dev, train_data=train_data, flag='test')
test_data = DataSetCSVslotTagging(
csv_test, train_data=train_data, flag='test')
maxlen_userUtter_train = train_data.getUserUtterMaxlen()
maxlen_userUtter_dev = dev_data.getUserUtterMaxlen()
maxlen_userUtter_test = test_data.getUserUtterMaxlen()
maxlen_userUtter = max(maxlen_userUtter_train,
maxlen_userUtter_dev, maxlen_userUtter_test)
train_data.transform_data(maxlen_userUtter)
dev_data.transform_data(maxlen_userUtter)
test_data.transform_data(maxlen_userUtter)
import ipdb
ipdb.set_trace()
print('done')
| [
"ipdb.set_trace",
"keras.preprocessing.sequence.pad_sequences",
"numpy.asarray",
"utils.to_categorical",
"numpy.zeros"
] | [((1588, 1659), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['encode', 'maxlen'], {'padding': '"""pre"""', 'truncating': '"""pre"""'}), "(encode, maxlen, padding='pre', truncating='pre')\n", (1610, 1659), False, 'from keras.preprocessing import sequence\n'), ((2229, 2269), 'numpy.zeros', 'np.zeros', (['(intents.shape[0], vocab_size)'], {}), '((intents.shape[0], vocab_size))\n', (2237, 2269), True, 'import numpy as np\n'), ((5451, 5467), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (5465, 5467), False, 'import ipdb\n'), ((1689, 1710), 'numpy.asarray', 'np.asarray', (['token_txt'], {}), '(token_txt)\n', (1699, 1710), True, 'import numpy as np\n'), ((3092, 3114), 'numpy.asarray', 'np.asarray', (['intent_txt'], {}), '(intent_txt)\n', (3102, 3114), True, 'import numpy as np\n'), ((4190, 4248), 'utils.to_categorical', 'to_categorical', (['userTag_encodePad', 'self.userTag_vocab_size'], {}), '(userTag_encodePad, self.userTag_vocab_size)\n', (4204, 4248), False, 'from utils import to_categorical\n')] |
# Standard library
import argparse
import os
import pathlib
import shutil
import sys
import simulacra.star
import simulacra.tellurics
import simulacra.detector
import simulacra.gascell
# Third-party
import numpy as np
# from threadpoolctl import threadpool_limits
# Package
# from .helpers import get_parser
# from ..log import logger
import random
import astropy.coordinates as coord
import astropy.units as u
import astropy.time as at
random.seed(102102102)
np.random.seed(102102102)
def run_simulation(detector,transmission_models,exp_times,epoches,window):
# parser args into these constants and filename
tstart = at.Time('2020-01-01T08:10:00.123456789',format='isot',scale='utc')
tend = tstart + window * u.day
night_grid = simulacra.star.get_night_grid(detector.loc,tstart,tend,steps_per_night=5)
possible_times, airmass = simulacra.star.get_realistic_times(detector.stellar_model.target,detector.loc,night_grid)
obs_ints = random.sample(range(len(airmass)),epoches)
obs_times, obs_airmass = possible_times[obs_ints], airmass[obs_ints]
for model in transmission_models:
detector.add_model(model)
data = detector.simulate(obs_times,exp_times)
return data
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-e','--epoches',type=int,required=True,help='number of epoches of data to generate')
parser.add_argument('-d','--distance',type=float,required=True,help='distance to star in pcs')
parser.add_argument('-p','--period',type=float,default=40.3,help='period of the star wobble in days')
parser.add_argument('-a','--amp',type=float,default=2,help='amplitude of velocity wobble in km/s')
parser.add_argument('--alpha',type=float,default=0.4,help='The alpha ratio of the star being observed must be in the PHOENIX repository')
parser.add_argument('-z',type=float,default=-1.0,help='The z ratio of the star being observed must be in the PHOENIX repository')
parser.add_argument('-T','--temp',type=float,default=4600,help='The temperature in Kelvin of the star being observed must be in the PHOENIX repository')
parser.add_argument('--logg',type=float,default=1.0,help='The logg of the star being observed must be in the PHOENIX repository')
parser.add_argument('--amplitude',type=float,default=2.0,help='The amplitude of oscillation of the star in km/s being observed must be in the PHOENIX repository')
parser.add_argument('--epsilon',type=float,default=1.0,help='random property of the wave transformation')
parser.add_argument('-w',type=float,default=0.0, help='random property of the wave transformation')
parser.add_argument('--gamma',type=float,default=1.0, help='user set parameter to control SNR')
parser.add_argument('--ra',type=float,default=None,help='The right ascension of the star being observed if left empty it will be random set.')
parser.add_argument('--dec',type=float,default=None,help='The right ascension of the star being observed if left empty it will be random set.')
parser.add_argument('--window',type=float,default=180,help='Time in days to observe the star over')
parser.add_argument('--exp_time',type=float,default=8,help='Time in minutes of each exposure')
parser.add_argument('-o','--output',type=str,default='../../out',help='Output directory, default assumes you are in the home directory of this package.')
return parser
def add_tellurics_args(parser):
parser.add_argument('--pressure',type=float,default=1.0e6,help='The pressure at the observatory at the time of the observations in pascals')
parser.add_argument('--temperature',type=float,default=300,help='The temperature at the observatory at the time of the observations in Kelvin')
parser.add_argument('--humidity',type=float,default=50.0,help='The humidity at the observatory at the time of the observations in percentage')
return parser
def get_star(loc,args):
if args.ra is None:
args.ra = np.random.uniform(0,360) * u.degree
if args.dec is None:
args.dec = np.random.uniform(loc.lat.to(u.degree).value-30,loc.lat.to(u.degree).value+30) * u.degree
target = coord.SkyCoord(args.ra,args.dec,frame='icrs')
stellar_model = simulacra.star.PhoenixModel(args.distance * u.pc,args.alpha,args.z,\
args.temp,args.logg,target,\
args.amplitude * u.km/u.s,args.period * u.day)
return stellar_model
def get_tellurics(loc,wave_min,wave_max,args):
tellurics_model = simulacra.tellurics.TelFitModel(wave_min,wave_max,loc)
tellurics_model.pressure = args.pressure * u.Pa
tellurics_model.temperature = args.temperature * u.Kelvin
tellurics_model.humidity = args.humidity
return tellurics_model
class CLI:
"""To add a new subcommand, just add a new classmethod and a docstring!"""
_usage = None
def __init__(self):
parser = argparse.ArgumentParser(
description='A pipeline utility for running The Joker',
usage=self._usage.strip())
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print(f"Unsupported command '{args.command}'")
parser.print_help()
sys.exit(1)
getattr(self, args.command)()
def apogee(self):
"""Generate APOGEE data with a simple command."""
parser = get_parser()
parser = add_tellurics_args(parser)
args = parser.parse_args(sys.argv[2:])
obs = 'APO'
loc = coord.EarthLocation.of_site(obs)
# print(loc.lon,loc.latlat)
stellar_model = get_star(loc,args)
wave_min = 1.51*u.um
wave_max = 1.70*u.um
# Detector physical parameters
################################
det_dict = {'resolution':22_500.0,
'area': np.pi * (2.5*u.m/2)**2,
'dark_current': 100/u.s,
'read_noise': 100,
'ccd_eff':0.99,
'through_put':0.05,
'epsilon': args.epsilon,
'w':args.w,
'gamma':args.gamma}
tellurics_model = get_tellurics(loc,wave_min,wave_max,args)
exp_times = np.ones(args.epoches)*args.exp_time * u.minute
delta_x = simulacra.detector.spacing_from_res(4*det_dict['resolution'])
x_grid = np.arange(np.log(wave_min.to(u.Angstrom).value),np.log(wave_max.to(u.Angstrom).value),delta_x)
wave_grid = np.exp(x_grid) * u.Angstrom
detector = simulacra.detector.Detector(stellar_model,loc=loc,wave_grid=wave_grid,**det_dict)
data = run_simulation(detector,[tellurics_model],exp_times,args.epoches,args.window)
filename = os.path.join(args.output,'apogee_e{}_a{}_p{}'.format(args.epoches,stellar_model.amplitude.to(u.m/u.s).value,stellar_model.period.to(u.day).value))
print(filename)
data.to_h5(filename + '.h5')
def keckhires(self):
"""Generate Keck HIRES data with a simple command."""
# parser = argparse.ArgumentParser(sys.argv)
parser = get_parser()
parser = add_tellurics_args(parser)
args = parser.parse_args(sys.argv[2:])
obs = 'Keck Observatory'
loc = coord.EarthLocation.of_site(obs)
stellar_model = get_star(loc,args)
wave_min = 500*u.nm
wave_max = 630*u.nm
# Detector physical parameters
################################
det_dict = {'resolution':100_000.0,
'area': np.pi * (10*u.m/2)**2,
'dark_current': 100/u.s,
'read_noise': 100,
'ccd_eff':0.99,
'through_put':0.05,
'epsilon': args.epsilon,
'w':args.w,
'gamma':args.gamma}
tellurics_model = get_tellurics(loc,wave_min,wave_max,args)
gascell_model = simulacra.gascell.GasCellModel(filename='data/gascell/keck_fts_inUse.idl')
exp_times = np.ones(args.epoches)*args.exp_time * u.minute
delta_x = simulacra.detector.spacing_from_res(4*det_dict['resolution'])
x_grid = np.arange(np.log(wave_min.to(u.Angstrom).value),np.log(wave_max.to(u.Angstrom).value),delta_x)
wave_grid = np.exp(x_grid) * u.Angstrom
detector = simulacra.detector.Detector(stellar_model,loc=loc,wave_grid=wave_grid,**det_dict)
data = run_simulation(detector,[tellurics_model,gascell_model],exp_times,args.epoches,args.window)
filename = os.path.join(args.output,'keck_e{}_a{}_p{}'.format(args.epoches,stellar_model.amplitude.to(u.m/u.s).value,stellar_model.period.to(u.day).value))
print(filename)
data.to_h5(filename + '.h5')
def expres(self):
"""Generate EXPRES data with a simple command."""
parser = get_parser()
parser = add_tellurics_args(parser)
args = parser.parse_args(sys.argv[2:])
obs = 'Lowell Observatory'
loc = coord.EarthLocation.of_site(obs)
stellar_model = get_star(loc,args)
wave_min = 700*u.nm
wave_max = 950*u.nm
# Detector physical parameters
################################
det_dict = {'resolution':130_000.0,
'area': np.pi * (4.3*u.m/2)**2,
'dark_current': 100/u.s,
'read_noise': 100,
'ccd_eff':0.99,
'through_put':0.05,
'epsilon': args.epsilon,
'w':args.w,
'gamma':args.gamma}
tellurics_model = get_tellurics(loc,wave_min,wave_max,args)
exp_times = np.ones(args.epoches)*args.exp_time * u.minute
delta_x = simulacra.detector.spacing_from_res(4*det_dict['resolution'])
x_grid = np.arange(np.log(wave_min.to(u.Angstrom).value),np.log(wave_max.to(u.Angstrom).value),delta_x)
wave_grid = np.exp(x_grid) * u.Angstrom
detector = simulacra.detector.Detector(stellar_model,loc=loc,wave_grid=wave_grid,**det_dict)
data = run_simulation(detector,[tellurics_model],exp_times,args.epoches,args.window)
filename = os.path.join(args.output,'expres_e{}_a{}_p{}'.format(args.epoches,stellar_model.amplitude.to(u.m/u.s).value,stellar_model.period.to(u.day).value))
print(filename)
data.to_h5(filename + '.h5')
# Auto-generate the usage block:
cmds = []
maxlen = max([len(name) for name in CLI.__dict__.keys()])
for name, attr in CLI.__dict__.items():
if not name.startswith('_'):
cmds.append(f' {name.ljust(maxlen)} {attr.__doc__}\n')
CLI._usage = f"""
simulacra <command> [<args>]
Available commands:
{''.join(cmds)}
See more usage information about a given command by running:
simulacra <command> --help
"""
# keck hires, gaia, apogee, expres
def main():
CLI()
| [
"numpy.random.uniform",
"numpy.random.seed",
"argparse.ArgumentParser",
"astropy.time.Time",
"numpy.ones",
"random.seed",
"numpy.exp",
"astropy.coordinates.SkyCoord",
"astropy.coordinates.EarthLocation.of_site",
"sys.exit"
] | [((440, 462), 'random.seed', 'random.seed', (['(102102102)'], {}), '(102102102)\n', (451, 462), False, 'import random\n'), ((463, 488), 'numpy.random.seed', 'np.random.seed', (['(102102102)'], {}), '(102102102)\n', (477, 488), True, 'import numpy as np\n'), ((630, 698), 'astropy.time.Time', 'at.Time', (['"""2020-01-01T08:10:00.123456789"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2020-01-01T08:10:00.123456789', format='isot', scale='utc')\n", (637, 698), True, 'import astropy.time as at\n'), ((1250, 1275), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1273, 1275), False, 'import argparse\n'), ((4171, 4218), 'astropy.coordinates.SkyCoord', 'coord.SkyCoord', (['args.ra', 'args.dec'], {'frame': '"""icrs"""'}), "(args.ra, args.dec, frame='icrs')\n", (4185, 4218), True, 'import astropy.coordinates as coord\n'), ((5621, 5653), 'astropy.coordinates.EarthLocation.of_site', 'coord.EarthLocation.of_site', (['obs'], {}), '(obs)\n', (5648, 5653), True, 'import astropy.coordinates as coord\n'), ((7358, 7390), 'astropy.coordinates.EarthLocation.of_site', 'coord.EarthLocation.of_site', (['obs'], {}), '(obs)\n', (7385, 7390), True, 'import astropy.coordinates as coord\n'), ((9110, 9142), 'astropy.coordinates.EarthLocation.of_site', 'coord.EarthLocation.of_site', (['obs'], {}), '(obs)\n', (9137, 9142), True, 'import astropy.coordinates as coord\n'), ((3988, 4013), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(360)'], {}), '(0, 360)\n', (4005, 4013), True, 'import numpy as np\n'), ((5332, 5343), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5340, 5343), False, 'import sys\n'), ((6595, 6609), 'numpy.exp', 'np.exp', (['x_grid'], {}), '(x_grid)\n', (6601, 6609), True, 'import numpy as np\n'), ((8395, 8409), 'numpy.exp', 'np.exp', (['x_grid'], {}), '(x_grid)\n', (8401, 8409), True, 'import numpy as np\n'), ((10048, 10062), 'numpy.exp', 'np.exp', (['x_grid'], {}), '(x_grid)\n', (10054, 10062), True, 'import numpy as np\n'), ((6335, 6356), 'numpy.ones', 'np.ones', (['args.epoches'], {}), '(args.epoches)\n', (6342, 6356), True, 'import numpy as np\n'), ((8135, 8156), 'numpy.ones', 'np.ones', (['args.epoches'], {}), '(args.epoches)\n', (8142, 8156), True, 'import numpy as np\n'), ((9788, 9809), 'numpy.ones', 'np.ones', (['args.epoches'], {}), '(args.epoches)\n', (9795, 9809), True, 'import numpy as np\n')] |
import sys
import os
import base64
import dash
from jupyter_dash import JupyterDash
import dash_core_components as dcc
import dash_html_components as html
from dash.exceptions import PreventUpdate
import torch
import numpy as np
import crepe
import scipy
from scipy.io import wavfile
import psola
import io
import nemo
from nemo.collections.asr.models import EncDecCTCModel
from nemo.collections.tts.models import TalkNetSpectModel
from nemo.collections.tts.models import TalkNetPitchModel
from nemo.collections.tts.models import TalkNetDursModel
from talknet_singer import TalkNetSingerModel
import json
from tqdm import tqdm
import gdown
import zipfile
import resampy
import traceback
import ffmpeg
import time
import uuid
sys.path.append("hifi-gan")
from env import AttrDict
from meldataset import mel_spectrogram, MAX_WAV_VALUE
from models import Generator
from denoiser import Denoiser
app = JupyterDash(__name__)
UPLOAD_DIRECTORY = "/content"
torch.set_grad_enabled(False)
app.layout = html.Div(
children=[
html.H1(
id="header",
children="Controllable TalkNet",
style={
"font-family": "Georgia",
"color": "#000000",
"font-size": "4em",
"text-align": "center",
"margin-top": "0em",
"margin-bottom": "0em",
},
),
html.Label("Character selection", htmlFor="model-dropdown"),
dcc.Dropdown(
id="model-dropdown",
options=[
{
"label": "Custom model",
"value": "Custom",
},
{
"label": "--- ERROR LOADING MODEL LISTS ---",
"value": "",
"disabled": True,
},
],
value=None,
style={
"max-width": "90vw",
"width": "35em",
"margin-bottom": "0.7em",
},
),
html.Div(
children=[
dcc.Input(
id="drive-id",
type="text",
placeholder="Drive ID for custom model",
style={"width": "22em"},
),
],
id="custom-model",
style={
"display": "none",
},
),
html.Label(
"Upload reference audio to " + UPLOAD_DIRECTORY,
htmlFor="reference-dropdown",
),
dcc.Store(id="current-f0s"),
dcc.Store(id="current-f0s-nosilence"),
dcc.Store(id="current-filename"),
dcc.Loading(
id="audio-loading",
children=[
html.Div(
[
html.Button(
"Update file list",
id="update-button",
style={
"margin-right": "10px",
},
),
dcc.Dropdown(
id="reference-dropdown",
options=[],
value=None,
style={
"max-width": "80vw",
"width": "30em",
},
disabled=False,
),
dcc.Store(id="pitch-clicks"),
html.Button(
"Debug pitch",
id="pitch-button",
style={
"margin-left": "10px",
},
disabled=False,
),
],
style={
"width": "100%",
"display": "flex",
"align-items": "center",
"justify-content": "center",
"flex-direction": "row",
"margin-left": "50px",
"vertical-align": "middle",
},
),
html.Audio(
id="pitch-out",
controls=True,
style={"display": "none"},
),
html.Div(
id="audio-loading-output",
style={
"font-style": "italic",
"margin-bottom": "0.7em",
"text-align": "center",
},
),
],
type="default",
),
html.Div(
[
dcc.Checklist(
id="pitch-options",
options=[
{"label": "Change input pitch", "value": "pf"},
{"label": "Auto-tune output", "value": "pc"},
{"label": "Disable reference audio", "value": "dra"},
],
value=[],
),
html.Div(
[
html.Label("Semitones", htmlFor="pitch-factor"),
dcc.Input(
id="pitch-factor",
type="number",
value="0",
style={"width": "7em"},
min=-11,
max=11,
step=1,
disabled=True,
),
],
style={
"flex-direction": "column",
"margin-left": "10px",
"margin-bottom": "0.7em",
},
),
],
style={
"width": "100%",
"display": "flex",
"align-items": "center",
"justify-content": "center",
"flex-direction": "row",
"margin-left": "50px",
"margin-bottom": "0.7em",
},
),
html.Label("Transcript", htmlFor="transcript-input"),
dcc.Textarea(
id="transcript-input",
value="",
style={
"max-width": "90vw",
"width": "50em",
"height": "8em",
"margin-bottom": "0.7em",
},
),
dcc.Loading(
html.Div(
[
html.Button(
"Generate",
id="gen-button",
),
html.Audio(
id="audio-out",
controls=True,
style={
"display": "none",
},
),
html.Div(
id="generated-info",
style={
"font-style": "italic",
},
),
],
style={
"width": "100%",
"display": "flex",
"align-items": "center",
"justify-content": "center",
"flex-direction": "column",
},
)
),
html.Footer(
children="""
Presented by the Minerman Groupie Association.
""",
style={"margin-top": "2em", "font-size": "0.7em"},
),
],
style={
"width": "100%",
"display": "flex",
"align-items": "center",
"justify-content": "center",
"flex-direction": "column",
"background-color": "#FFF",
},
)
@app.callback(
dash.dependencies.Output("model-dropdown", "options"),
dash.dependencies.Input("header", "children"),
)
def init_dropdown(value):
dropdown = [
{
"label": "Custom model",
"value": "Custom|default",
}
]
prev_values = ["Custom|default"]
def add_to_dropdown(entry):
if entry["value"] in prev_values:
return
dropdown.append(entry)
prev_values.append(entry["value"])
all_dict = {}
for filename in os.listdir("model_lists"):
if len(filename) < 5 or filename[-5:].lower() != ".json":
continue
with open(os.path.join("model_lists", filename)) as f:
j = json.load(f)
for s in j:
for c in s["characters"]:
c["source_file"] = filename[:-5]
if s["source"] not in all_dict:
all_dict[s["source"]] = s["characters"]
else:
all_dict[s["source"]].extend(s["characters"])
for k in sorted(all_dict):
seen_chars = []
seen_ids = []
characters = {}
characters_sing = {}
has_singers = False
for c in all_dict[k]:
if c["drive_id"] in seen_ids:
continue
seen_ids.append(c["drive_id"])
# Handle duplicate names
if c["name"] in seen_chars:
if c["name"] in characters:
rename = (
c["name"] + " [" + characters[c["name"]]["source_file"] + "]"
)
characters[rename] = characters[c["name"]]
del characters[c["name"]]
c["name"] = c["name"] + " [" + c["source_file"] + "]"
else:
seen_chars.append(c["name"])
characters[c["name"]] = {
"drive_id": c["drive_id"],
"is_singing": c["is_singing"],
"source_file": c["source_file"],
}
if c["is_singing"]:
has_singers = True
if has_singers:
for ck in sorted(characters):
if characters[ck]["is_singing"]:
characters_sing[ck] = characters[ck]
del characters[ck]
separator = "--- " + k.strip().upper() + " MODELS (TALKING) ---"
else:
separator = "--- " + k.strip().upper() + " MODELS ---"
if len(characters) > 0:
add_to_dropdown(
{
"label": separator,
"value": str(uuid.uuid4()) + "|default",
"disabled": True,
}
)
for ck in sorted(characters):
add_to_dropdown(
{
"label": ck,
"value": characters[ck]["drive_id"] + "|default",
}
)
if has_singers:
separator = "--- " + k.strip().upper() + " MODELS (SINGING) ---"
add_to_dropdown(
{
"label": separator,
"value": str(uuid.uuid4()) + "|default",
"disabled": True,
}
)
for ck in sorted(characters_sing):
add_to_dropdown(
{
"label": ck,
"value": characters_sing[ck]["drive_id"] + "|singing",
}
)
if len(all_dict) == 0:
add_to_dropdown(
{
"label": "--- NO MODEL LISTS FOUND ---",
"value": str(uuid.uuid4()) + "|default",
"disabled": True,
}
)
return dropdown
def load_hifigan(model_name, conf_name):
# Load HiFi-GAN
conf = os.path.join("hifi-gan", conf_name + ".json")
with open(conf) as f:
json_config = json.loads(f.read())
h = AttrDict(json_config)
torch.manual_seed(h.seed)
hifigan = Generator(h).to(torch.device("cuda"))
state_dict_g = torch.load(model_name, map_location=torch.device("cuda"))
hifigan.load_state_dict(state_dict_g["generator"])
hifigan.eval()
hifigan.remove_weight_norm()
denoiser = Denoiser(hifigan, mode="normal")
return hifigan, h, denoiser
def generate_json(input, outpath):
output = ""
sample_rate = 22050
lpath = input.split("|")[0].strip()
size = os.stat(lpath).st_size
x = {
"audio_filepath": lpath,
"duration": size / (sample_rate * 2),
"text": input.split("|")[1].strip(),
}
output += json.dumps(x) + "\n"
with open(outpath, "w", encoding="utf8") as w:
w.write(output)
asr_model = (
EncDecCTCModel.from_pretrained(model_name="asr_talknet_aligner").cpu().eval()
)
def forward_extractor(tokens, log_probs, blank):
"""Computes states f and p."""
n, m = len(tokens), log_probs.shape[0]
# `f[s, t]` -- max sum of log probs for `s` first codes
# with `t` first timesteps with ending in `tokens[s]`.
f = np.empty((n + 1, m + 1), dtype=float)
f.fill(-(10 ** 9))
p = np.empty((n + 1, m + 1), dtype=int)
f[0, 0] = 0.0 # Start
for s in range(1, n + 1):
c = tokens[s - 1]
for t in range((s + 1) // 2, m + 1):
f[s, t] = log_probs[t - 1, c]
# Option #1: prev char is equal to current one.
if s == 1 or c == blank or c == tokens[s - 3]:
options = f[s : (s - 2 if s > 1 else None) : -1, t - 1]
else: # Is not equal to current one.
options = f[s : (s - 3 if s > 2 else None) : -1, t - 1]
f[s, t] += np.max(options)
p[s, t] = np.argmax(options)
return f, p
def backward_extractor(f, p):
"""Computes durs from f and p."""
n, m = f.shape
n -= 1
m -= 1
durs = np.zeros(n, dtype=int)
if f[-1, -1] >= f[-2, -1]:
s, t = n, m
else:
s, t = n - 1, m
while s > 0:
durs[s - 1] += 1
s -= p[s, t]
t -= 1
assert durs.shape[0] == n
assert np.sum(durs) == m
assert np.all(durs[1::2] > 0)
return durs
def preprocess_tokens(tokens, blank):
new_tokens = [blank]
for c in tokens:
new_tokens.extend([c, blank])
tokens = new_tokens
return tokens
def get_duration(wav_name, transcript):
if not os.path.exists(os.path.join(UPLOAD_DIRECTORY, "output")):
os.mkdir(os.path.join(UPLOAD_DIRECTORY, "output"))
if "_" not in transcript:
generate_json(
os.path.join(UPLOAD_DIRECTORY, "output", wav_name + "_conv.wav")
+ "|"
+ transcript.strip(),
os.path.join(UPLOAD_DIRECTORY, "output", wav_name + ".json"),
)
else:
generate_json(
os.path.join(UPLOAD_DIRECTORY, "output", wav_name + "_conv.wav")
+ "|"
+ "dummy",
os.path.join(UPLOAD_DIRECTORY, "output", wav_name + ".json"),
)
data_config = {
"manifest_filepath": os.path.join(
UPLOAD_DIRECTORY, "output", wav_name + ".json"
),
"sample_rate": 22050,
"batch_size": 1,
}
parser = (
nemo.collections.asr.data.audio_to_text.AudioToCharWithDursF0Dataset.make_vocab(
notation="phonemes",
punct=True,
spaces=True,
stresses=False,
add_blank_at="last",
)
)
dataset = nemo.collections.asr.data.audio_to_text._AudioTextDataset(
manifest_filepath=data_config["manifest_filepath"],
sample_rate=data_config["sample_rate"],
parser=parser,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=data_config["batch_size"],
collate_fn=dataset.collate_fn,
shuffle=False,
)
blank_id = asr_model.decoder.num_classes_with_blank - 1
for sample_idx, test_sample in tqdm(enumerate(dl), total=len(dl)):
log_probs, _, greedy_predictions = asr_model(
input_signal=test_sample[0], input_signal_length=test_sample[1]
)
log_probs = log_probs[0].cpu().detach().numpy()
if "_" not in transcript:
seq_ids = test_sample[2][0].cpu().detach().numpy()
else:
pass
"""arpa_input = (
transcript.replace("0", "")
.replace("1", "")
.replace("2", "")
.replace("_", " _ ")
.strip()
.split(" ")
)
seq_ids = []
for x in arpa_input:
if x == "":
continue
if x.replace("_", " ") not in parser.labels:
continue
seq_ids.append(parser.labels.index(x.replace("_", " ")))"""
seq_ids = test_sample[2][0].cpu().detach().numpy()
target_tokens = preprocess_tokens(seq_ids, blank_id)
f, p = forward_extractor(target_tokens, log_probs, blank_id)
durs = backward_extractor(f, p)
arpa = ""
for s in seq_ids:
if parser.labels[s] == " ":
arpa += "_ "
else:
arpa += parser.labels[s] + " "
del test_sample
return durs, arpa.strip(), seq_ids
return None, None, None
def crepe_f0(wav_path, hop_length=256):
# sr, audio = wavfile.read(io.BytesIO(wav_data))
sr, audio = wavfile.read(wav_path)
audio_x = np.arange(0, len(audio)) / 22050.0
f0time, frequency, confidence, activation = crepe.predict(audio, sr, viterbi=True)
x = np.arange(0, len(audio), hop_length) / 22050.0
freq_interp = np.interp(x, f0time, frequency)
conf_interp = np.interp(x, f0time, confidence)
audio_interp = np.interp(x, audio_x, np.absolute(audio)) / 32768.0
weights = [0.5, 0.25, 0.25]
audio_smooth = np.convolve(audio_interp, np.array(weights)[::-1], "same")
conf_threshold = 0.25
audio_threshold = 0.0005
for i in range(len(freq_interp)):
if conf_interp[i] < conf_threshold:
freq_interp[i] = 0.0
if audio_smooth[i] < audio_threshold:
freq_interp[i] = 0.0
# Hack to make f0 and mel lengths equal
if len(audio) % hop_length == 0:
freq_interp = np.pad(freq_interp, pad_width=[0, 1])
return (
torch.from_numpy(freq_interp.astype(np.float32)),
torch.from_numpy(frequency.astype(np.float32)),
)
def f0_to_audio(f0s):
volume = 0.2
sr = 22050
freq = 440.0
base_audio = (
np.sin(2 * np.pi * np.arange(256.0 * len(f0s)) * freq / sr) * volume
).astype(np.float32)
shifted_audio = psola.vocode(base_audio, sr, target_pitch=f0s)
for i in range(len(f0s)):
if f0s[i] == 0.0:
shifted_audio[i * 256 : (i + 1) * 256] = 0.0
print(type(shifted_audio[0]))
buffer = io.BytesIO()
wavfile.write(buffer, sr, shifted_audio.astype(np.float32))
b64 = base64.b64encode(buffer.getvalue())
sound = "data:audio/x-wav;base64," + b64.decode("ascii")
return sound
@app.callback(
dash.dependencies.Output("custom-model", "style"),
dash.dependencies.Input("model-dropdown", "value"),
)
def update_model(model):
if model is not None and model.split("|")[0] == "Custom":
style = {"margin-bottom": "0.7em", "display": "block"}
else:
style = {"display": "none"}
return style
@app.callback(
[
dash.dependencies.Output("pitch-factor", "disabled"),
dash.dependencies.Output("reference-dropdown", "disabled"),
dash.dependencies.Output("pitch-button", "disabled"),
],
[
dash.dependencies.Input("pitch-options", "value"),
],
)
def update_pitch_options(value):
return ["pf" not in value, "dra" in value, "dra" in value]
playback_style = {
"margin-top": "0.3em",
"margin-bottom": "0.3em",
"display": "block",
"width": "600px",
"max-width": "90vw",
}
playback_hide = {
"display": "none",
}
@app.callback(
dash.dependencies.Output("reference-dropdown", "options"),
[
dash.dependencies.Input("update-button", "n_clicks"),
],
)
def update_filelist(n_clicks):
filelist = []
supported_formats = [".wav", ".ogg", ".mp3", "flac", ".aac"]
for x in sorted(os.listdir(UPLOAD_DIRECTORY)):
if x[-4:].lower() in supported_formats:
filelist.append({"label": x, "value": x})
return filelist
@app.callback(
[
dash.dependencies.Output("audio-loading-output", "children"),
dash.dependencies.Output("current-f0s", "data"),
dash.dependencies.Output("current-f0s-nosilence", "data"),
dash.dependencies.Output("current-filename", "data"),
],
[
dash.dependencies.Input("reference-dropdown", "value"),
],
)
def select_file(dropdown_value):
if dropdown_value is not None:
if not os.path.exists(os.path.join(UPLOAD_DIRECTORY, "output")):
os.mkdir(os.path.join(UPLOAD_DIRECTORY, "output"))
ffmpeg.input(os.path.join(UPLOAD_DIRECTORY, dropdown_value)).output(
os.path.join(UPLOAD_DIRECTORY, "output", dropdown_value + "_conv.wav"),
ar="22050",
ac="1",
acodec="pcm_s16le",
map_metadata="-1",
fflags="+bitexact",
).overwrite_output().run(quiet=True)
fo_with_silence, f0_wo_silence = crepe_f0(
os.path.join(UPLOAD_DIRECTORY, "output", dropdown_value + "_conv.wav")
)
return [
"Analyzed " + dropdown_value,
fo_with_silence,
f0_wo_silence,
dropdown_value,
]
else:
return ["No audio analyzed", None, None]
@app.callback(
[
dash.dependencies.Output("pitch-out", "src"),
dash.dependencies.Output("pitch-out", "style"),
dash.dependencies.Output("pitch-clicks", "data"),
],
[
dash.dependencies.Input("pitch-button", "n_clicks"),
dash.dependencies.Input("pitch-clicks", "data"),
dash.dependencies.Input("current-f0s", "data"),
],
)
def debug_pitch(n_clicks, pitch_clicks, current_f0s):
if not n_clicks or current_f0s is None or n_clicks <= pitch_clicks:
if n_clicks is not None:
pitch_clicks = n_clicks
else:
pitch_clicks = 0
return [
None,
playback_hide,
pitch_clicks,
]
pitch_clicks = n_clicks
return [f0_to_audio(current_f0s), playback_style, pitch_clicks]
def download_model(model, custom_model):
global hifigan_sr, h2, denoiser_sr
d = "https://drive.google.com/uc?id="
if model == "Custom":
drive_id = custom_model
else:
drive_id = model
if not os.path.exists(os.path.join(UPLOAD_DIRECTORY, "models")):
os.mkdir(os.path.join(UPLOAD_DIRECTORY, "models"))
if not os.path.exists(os.path.join(UPLOAD_DIRECTORY, "models", drive_id)):
os.mkdir(os.path.join(UPLOAD_DIRECTORY, "models", drive_id))
zip_path = os.path.join(UPLOAD_DIRECTORY, "models", drive_id, "model.zip")
gdown.download(
d + drive_id,
zip_path,
quiet=False,
)
if not os.path.exists(zip_path):
os.rmdir(os.path.join(UPLOAD_DIRECTORY, "models", drive_id))
return ("Model download failed", None, None)
if os.stat(zip_path).st_size < 16:
os.remove(zip_path)
os.rmdir(os.path.join(UPLOAD_DIRECTORY, "models", drive_id))
return ("Model zip is empty", None, None)
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(os.path.join(UPLOAD_DIRECTORY, "models", drive_id))
os.remove(zip_path)
# Download super-resolution HiFi-GAN
sr_path = "hifi-gan/hifisr"
if not os.path.exists(sr_path):
gdown.download(d + "14fOprFAIlCQkVRxsfInhEPG0n-xN4QOa", sr_path, quiet=False)
if not os.path.exists(sr_path):
raise Exception("HiFI-GAN model failed to download!")
hifigan_sr, h2, denoiser_sr = load_hifigan(sr_path, "config_32k")
return (
None,
os.path.join(UPLOAD_DIRECTORY, "models", drive_id, "TalkNetSpect.nemo"),
os.path.join(UPLOAD_DIRECTORY, "models", drive_id, "hifiganmodel"),
)
tnmodel, tnpath, tndurs, tnpitch = None, None, None, None
hifigan, h, denoiser, hifipath = None, None, None, None
@app.callback(
[
dash.dependencies.Output("audio-out", "src"),
dash.dependencies.Output("generated-info", "children"),
dash.dependencies.Output("audio-out", "style"),
dash.dependencies.Output("audio-out", "title"),
],
[dash.dependencies.Input("gen-button", "n_clicks")],
[
dash.dependencies.State("model-dropdown", "value"),
dash.dependencies.State("drive-id", "value"),
dash.dependencies.State("transcript-input", "value"),
dash.dependencies.State("pitch-options", "value"),
dash.dependencies.State("pitch-factor", "value"),
dash.dependencies.State("current-filename", "data"),
dash.dependencies.State("current-f0s", "data"),
dash.dependencies.State("current-f0s-nosilence", "data"),
],
)
def generate_audio(
n_clicks,
model,
custom_model,
transcript,
pitch_options,
pitch_factor,
wav_name,
f0s,
f0s_wo_silence,
):
global tnmodel, tnpath, tndurs, tnpitch, hifigan, h, denoiser, hifipath
if n_clicks is None:
raise PreventUpdate
if model is None:
return [None, "No character selected", playback_hide, None]
if transcript is None or transcript.strip() == "":
return [
None,
"No transcript entered",
playback_hide,
None,
]
if wav_name is None and "dra" not in pitch_options:
return [
None,
"No reference audio selected",
playback_hide,
None,
]
load_error, talknet_path, hifigan_path = download_model(
model.split("|")[0], custom_model
)
if load_error is not None:
return [
None,
load_error,
playback_hide,
None,
]
try:
with torch.no_grad():
if tnpath != talknet_path:
singer_path = os.path.join(
os.path.dirname(talknet_path), "TalkNetSinger.nemo"
)
if os.path.exists(singer_path):
tnmodel = TalkNetSingerModel.restore_from(singer_path)
else:
tnmodel = TalkNetSpectModel.restore_from(talknet_path)
durs_path = os.path.join(
os.path.dirname(talknet_path), "TalkNetDurs.nemo"
)
pitch_path = os.path.join(
os.path.dirname(talknet_path), "TalkNetPitch.nemo"
)
if os.path.exists(durs_path):
tndurs = TalkNetDursModel.restore_from(durs_path)
tnmodel.add_module("_durs_model", tndurs)
tnpitch = TalkNetPitchModel.restore_from(pitch_path)
tnmodel.add_module("_pitch_model", tnpitch)
else:
tndurs = None
tnpitch = None
tnmodel.eval()
tokens = tnmodel.parse(text=transcript.strip())
arpa = ""
if "dra" in pitch_options:
if tndurs is None or tnpitch is None:
return [
None,
"Model doesn't support pitch prediction",
playback_hide,
None,
]
spect = tnmodel.generate_spectrogram(tokens=tokens)
else:
durs, arpa, t = get_duration(wav_name, transcript)
# Change pitch
if "pf" in pitch_options:
f0_factor = np.power(np.e, (0.0577623 * float(pitch_factor)))
f0s = [x * f0_factor for x in f0s]
f0s_wo_silence = [x * f0_factor for x in f0s_wo_silence]
spect = tnmodel.force_spectrogram(
tokens=tokens,
durs=torch.from_numpy(durs).view(1, -1).to("cuda:0"),
f0=torch.FloatTensor(f0s).view(1, -1).to("cuda:0"),
)
if hifipath != hifigan_path:
hifigan, h, denoiser = load_hifigan(hifigan_path, "config_v1")
y_g_hat = hifigan(spect.float())
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio_denoised = denoiser(audio.view(1, -1), strength=35)[:, 0]
audio_np = (
audio_denoised.detach().cpu().numpy().reshape(-1).astype(np.int16)
)
# Auto-tuning
if "pc" in pitch_options and "dra" not in pitch_options:
_, output_freq, _, _ = crepe.predict(audio_np, 22050, viterbi=True)
output_pitch = torch.from_numpy(output_freq.astype(np.float32))
target_pitch = torch.FloatTensor(f0s_wo_silence)
factor = torch.mean(output_pitch) / torch.mean(target_pitch)
octaves = [0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0]
nearest_octave = min(octaves, key=lambda x: abs(x - factor))
target_pitch *= nearest_octave
if len(target_pitch) < len(output_pitch):
target_pitch = torch.nn.functional.pad(
target_pitch,
(0, list(output_pitch.shape)[0] - list(target_pitch.shape)[0]),
"constant",
0,
)
if len(target_pitch) > len(output_pitch):
target_pitch = target_pitch[0 : list(output_pitch.shape)[0]]
audio_np = psola.vocode(
audio_np, 22050, target_pitch=target_pitch
).astype(np.float32)
normalize = (1.0 / np.max(np.abs(audio_np))) ** 0.9
audio_np = audio_np * normalize * MAX_WAV_VALUE
audio_np = audio_np.astype(np.int16)
# Resample to 32k
wave = resampy.resample(
audio_np,
h.sampling_rate,
h2.sampling_rate,
filter="sinc_window",
window=scipy.signal.windows.hann,
num_zeros=8,
)
wave_out = wave.astype(np.int16)
# HiFi-GAN super-resolution
wave = wave / MAX_WAV_VALUE
wave = torch.FloatTensor(wave).to(torch.device("cuda"))
new_mel = mel_spectrogram(
wave.unsqueeze(0),
h2.n_fft,
h2.num_mels,
h2.sampling_rate,
h2.hop_size,
h2.win_size,
h2.fmin,
h2.fmax,
)
y_g_hat2 = hifigan_sr(new_mel)
audio2 = y_g_hat2.squeeze()
audio2 = audio2 * MAX_WAV_VALUE
audio2_denoised = denoiser(audio2.view(1, -1), strength=35)[:, 0]
# High-pass filter, mixing and denormalizing
audio2_denoised = audio2_denoised.detach().cpu().numpy().reshape(-1)
b = scipy.signal.firwin(
101, cutoff=10500, fs=h2.sampling_rate, pass_zero=False
)
y = scipy.signal.lfilter(b, [1.0], audio2_denoised)
y *= 4.0 # superres strength
y_out = y.astype(np.int16)
y_padded = np.zeros(wave_out.shape)
y_padded[: y_out.shape[0]] = y_out
sr_mix = wave_out + y_padded
buffer = io.BytesIO()
wavfile.write(buffer, 32000, sr_mix.astype(np.int16))
b64 = base64.b64encode(buffer.getvalue())
sound = "data:audio/x-wav;base64," + b64.decode("ascii")
output_name = "TalkNet_" + str(int(time.time()))
return [sound, arpa, playback_style, output_name]
except Exception:
return [
None,
str(traceback.format_exc()),
playback_hide,
None,
]
if __name__ == "__main__":
app.run_server(
mode="external",
debug=True,
dev_tools_ui=True,
dev_tools_hot_reload=True,
threaded=True,
)
| [
"crepe.predict",
"os.remove",
"dash_core_components.Textarea",
"numpy.sum",
"numpy.absolute",
"numpy.abs",
"numpy.argmax",
"numpy.empty",
"json.dumps",
"scipy.io.wavfile.read",
"scipy.signal.firwin",
"nemo.collections.tts.models.TalkNetDursModel.restore_from",
"nemo.collections.tts.models.Ta... | [((726, 753), 'sys.path.append', 'sys.path.append', (['"""hifi-gan"""'], {}), "('hifi-gan')\n", (741, 753), False, 'import sys\n'), ((899, 920), 'jupyter_dash.JupyterDash', 'JupyterDash', (['__name__'], {}), '(__name__)\n', (910, 920), False, 'from jupyter_dash import JupyterDash\n'), ((951, 980), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (973, 980), False, 'import torch\n'), ((8537, 8562), 'os.listdir', 'os.listdir', (['"""model_lists"""'], {}), "('model_lists')\n", (8547, 8562), False, 'import os\n'), ((8040, 8093), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""model-dropdown"""', '"""options"""'], {}), "('model-dropdown', 'options')\n", (8064, 8093), False, 'import dash\n'), ((8099, 8144), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""header"""', '"""children"""'], {}), "('header', 'children')\n", (8122, 8144), False, 'import dash\n'), ((11893, 11938), 'os.path.join', 'os.path.join', (['"""hifi-gan"""', "(conf_name + '.json')"], {}), "('hifi-gan', conf_name + '.json')\n", (11905, 11938), False, 'import os\n'), ((12016, 12037), 'env.AttrDict', 'AttrDict', (['json_config'], {}), '(json_config)\n', (12024, 12037), False, 'from env import AttrDict\n'), ((12042, 12067), 'torch.manual_seed', 'torch.manual_seed', (['h.seed'], {}), '(h.seed)\n', (12059, 12067), False, 'import torch\n'), ((12319, 12351), 'denoiser.Denoiser', 'Denoiser', (['hifigan'], {'mode': '"""normal"""'}), "(hifigan, mode='normal')\n", (12327, 12351), False, 'from denoiser import Denoiser\n'), ((13141, 13178), 'numpy.empty', 'np.empty', (['(n + 1, m + 1)'], {'dtype': 'float'}), '((n + 1, m + 1), dtype=float)\n', (13149, 13178), True, 'import numpy as np\n'), ((13210, 13245), 'numpy.empty', 'np.empty', (['(n + 1, m + 1)'], {'dtype': 'int'}), '((n + 1, m + 1), dtype=int)\n', (13218, 13245), True, 'import numpy as np\n'), ((13947, 13969), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (13955, 13969), True, 'import numpy as np\n'), ((14203, 14225), 'numpy.all', 'np.all', (['(durs[1::2] > 0)'], {}), '(durs[1::2] > 0)\n', (14209, 14225), True, 'import numpy as np\n'), ((15298, 15469), 'nemo.collections.asr.data.audio_to_text.AudioToCharWithDursF0Dataset.make_vocab', 'nemo.collections.asr.data.audio_to_text.AudioToCharWithDursF0Dataset.make_vocab', ([], {'notation': '"""phonemes"""', 'punct': '(True)', 'spaces': '(True)', 'stresses': '(False)', 'add_blank_at': '"""last"""'}), "(\n notation='phonemes', punct=True, spaces=True, stresses=False,\n add_blank_at='last')\n", (15377, 15469), False, 'import nemo\n'), ((15553, 15727), 'nemo.collections.asr.data.audio_to_text._AudioTextDataset', 'nemo.collections.asr.data.audio_to_text._AudioTextDataset', ([], {'manifest_filepath': "data_config['manifest_filepath']", 'sample_rate': "data_config['sample_rate']", 'parser': 'parser'}), "(manifest_filepath\n =data_config['manifest_filepath'], sample_rate=data_config[\n 'sample_rate'], parser=parser)\n", (15610, 15727), False, 'import nemo\n'), ((15759, 15892), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': "data_config['batch_size']", 'collate_fn': 'dataset.collate_fn', 'shuffle': '(False)'}), "(dataset=dataset, batch_size=data_config[\n 'batch_size'], collate_fn=dataset.collate_fn, shuffle=False)\n", (15786, 15892), False, 'import torch\n'), ((17530, 17552), 'scipy.io.wavfile.read', 'wavfile.read', (['wav_path'], {}), '(wav_path)\n', (17542, 17552), False, 'from scipy.io import wavfile\n'), ((17650, 17688), 'crepe.predict', 'crepe.predict', (['audio', 'sr'], {'viterbi': '(True)'}), '(audio, sr, viterbi=True)\n', (17663, 17688), False, 'import crepe\n'), ((17763, 17794), 'numpy.interp', 'np.interp', (['x', 'f0time', 'frequency'], {}), '(x, f0time, frequency)\n', (17772, 17794), True, 'import numpy as np\n'), ((17813, 17845), 'numpy.interp', 'np.interp', (['x', 'f0time', 'confidence'], {}), '(x, f0time, confidence)\n', (17822, 17845), True, 'import numpy as np\n'), ((18766, 18812), 'psola.vocode', 'psola.vocode', (['base_audio', 'sr'], {'target_pitch': 'f0s'}), '(base_audio, sr, target_pitch=f0s)\n', (18778, 18812), False, 'import psola\n'), ((18973, 18985), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (18983, 18985), False, 'import io\n'), ((19195, 19244), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""custom-model"""', '"""style"""'], {}), "('custom-model', 'style')\n", (19219, 19244), False, 'import dash\n'), ((19250, 19300), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""model-dropdown"""', '"""value"""'], {}), "('model-dropdown', 'value')\n", (19273, 19300), False, 'import dash\n'), ((20125, 20182), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""reference-dropdown"""', '"""options"""'], {}), "('reference-dropdown', 'options')\n", (20149, 20182), False, 'import dash\n'), ((12098, 12118), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (12110, 12118), False, 'import torch\n'), ((12512, 12526), 'os.stat', 'os.stat', (['lpath'], {}), '(lpath)\n', (12519, 12526), False, 'import os\n'), ((12689, 12702), 'json.dumps', 'json.dumps', (['x'], {}), '(x)\n', (12699, 12702), False, 'import json\n'), ((14174, 14186), 'numpy.sum', 'np.sum', (['durs'], {}), '(durs)\n', (14180, 14186), True, 'import numpy as np\n'), ((15129, 15189), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""', "(wav_name + '.json')"], {}), "(UPLOAD_DIRECTORY, 'output', wav_name + '.json')\n", (15141, 15189), False, 'import os\n'), ((18381, 18418), 'numpy.pad', 'np.pad', (['freq_interp'], {'pad_width': '[0, 1]'}), '(freq_interp, pad_width=[0, 1])\n', (18387, 18418), True, 'import numpy as np\n'), ((19548, 19600), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""pitch-factor"""', '"""disabled"""'], {}), "('pitch-factor', 'disabled')\n", (19572, 19600), False, 'import dash\n'), ((19610, 19668), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""reference-dropdown"""', '"""disabled"""'], {}), "('reference-dropdown', 'disabled')\n", (19634, 19668), False, 'import dash\n'), ((19678, 19730), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""pitch-button"""', '"""disabled"""'], {}), "('pitch-button', 'disabled')\n", (19702, 19730), False, 'import dash\n'), ((19753, 19802), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""pitch-options"""', '"""value"""'], {}), "('pitch-options', 'value')\n", (19776, 19802), False, 'import dash\n'), ((20395, 20423), 'os.listdir', 'os.listdir', (['UPLOAD_DIRECTORY'], {}), '(UPLOAD_DIRECTORY)\n', (20405, 20423), False, 'import os\n'), ((20198, 20250), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""update-button"""', '"""n_clicks"""'], {}), "('update-button', 'n_clicks')\n", (20221, 20250), False, 'import dash\n'), ((20579, 20639), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""audio-loading-output"""', '"""children"""'], {}), "('audio-loading-output', 'children')\n", (20603, 20639), False, 'import dash\n'), ((20649, 20696), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""current-f0s"""', '"""data"""'], {}), "('current-f0s', 'data')\n", (20673, 20696), False, 'import dash\n'), ((20706, 20763), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""current-f0s-nosilence"""', '"""data"""'], {}), "('current-f0s-nosilence', 'data')\n", (20730, 20763), False, 'import dash\n'), ((20773, 20825), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""current-filename"""', '"""data"""'], {}), "('current-filename', 'data')\n", (20797, 20825), False, 'import dash\n'), ((20848, 20902), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""reference-dropdown"""', '"""value"""'], {}), "('reference-dropdown', 'value')\n", (20871, 20902), False, 'import dash\n'), ((21849, 21893), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""pitch-out"""', '"""src"""'], {}), "('pitch-out', 'src')\n", (21873, 21893), False, 'import dash\n'), ((21903, 21949), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""pitch-out"""', '"""style"""'], {}), "('pitch-out', 'style')\n", (21927, 21949), False, 'import dash\n'), ((21959, 22007), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""pitch-clicks"""', '"""data"""'], {}), "('pitch-clicks', 'data')\n", (21983, 22007), False, 'import dash\n'), ((22030, 22081), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""pitch-button"""', '"""n_clicks"""'], {}), "('pitch-button', 'n_clicks')\n", (22053, 22081), False, 'import dash\n'), ((22091, 22138), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""pitch-clicks"""', '"""data"""'], {}), "('pitch-clicks', 'data')\n", (22114, 22138), False, 'import dash\n'), ((22148, 22194), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""current-f0s"""', '"""data"""'], {}), "('current-f0s', 'data')\n", (22171, 22194), False, 'import dash\n'), ((23149, 23212), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""', 'drive_id', '"""model.zip"""'], {}), "(UPLOAD_DIRECTORY, 'models', drive_id, 'model.zip')\n", (23161, 23212), False, 'import os\n'), ((23221, 23272), 'gdown.download', 'gdown.download', (['(d + drive_id)', 'zip_path'], {'quiet': '(False)'}), '(d + drive_id, zip_path, quiet=False)\n', (23235, 23272), False, 'import gdown\n'), ((23840, 23859), 'os.remove', 'os.remove', (['zip_path'], {}), '(zip_path)\n', (23849, 23859), False, 'import os\n'), ((23945, 23968), 'os.path.exists', 'os.path.exists', (['sr_path'], {}), '(sr_path)\n', (23959, 23968), False, 'import os\n'), ((23978, 24055), 'gdown.download', 'gdown.download', (["(d + '14fOprFAIlCQkVRxsfInhEPG0n-xN4QOa')", 'sr_path'], {'quiet': '(False)'}), "(d + '14fOprFAIlCQkVRxsfInhEPG0n-xN4QOa', sr_path, quiet=False)\n", (23992, 24055), False, 'import gdown\n'), ((24067, 24090), 'os.path.exists', 'os.path.exists', (['sr_path'], {}), '(sr_path)\n', (24081, 24090), False, 'import os\n'), ((24260, 24331), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""', 'drive_id', '"""TalkNetSpect.nemo"""'], {}), "(UPLOAD_DIRECTORY, 'models', drive_id, 'TalkNetSpect.nemo')\n", (24272, 24331), False, 'import os\n'), ((24341, 24407), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""', 'drive_id', '"""hifiganmodel"""'], {}), "(UPLOAD_DIRECTORY, 'models', drive_id, 'hifiganmodel')\n", (24353, 24407), False, 'import os\n'), ((24562, 24606), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""audio-out"""', '"""src"""'], {}), "('audio-out', 'src')\n", (24586, 24606), False, 'import dash\n'), ((24616, 24670), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""generated-info"""', '"""children"""'], {}), "('generated-info', 'children')\n", (24640, 24670), False, 'import dash\n'), ((24680, 24726), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""audio-out"""', '"""style"""'], {}), "('audio-out', 'style')\n", (24704, 24726), False, 'import dash\n'), ((24736, 24782), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""audio-out"""', '"""title"""'], {}), "('audio-out', 'title')\n", (24760, 24782), False, 'import dash\n'), ((24796, 24845), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""gen-button"""', '"""n_clicks"""'], {}), "('gen-button', 'n_clicks')\n", (24819, 24845), False, 'import dash\n'), ((24862, 24912), 'dash.dependencies.State', 'dash.dependencies.State', (['"""model-dropdown"""', '"""value"""'], {}), "('model-dropdown', 'value')\n", (24885, 24912), False, 'import dash\n'), ((24922, 24966), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drive-id"""', '"""value"""'], {}), "('drive-id', 'value')\n", (24945, 24966), False, 'import dash\n'), ((24976, 25028), 'dash.dependencies.State', 'dash.dependencies.State', (['"""transcript-input"""', '"""value"""'], {}), "('transcript-input', 'value')\n", (24999, 25028), False, 'import dash\n'), ((25038, 25087), 'dash.dependencies.State', 'dash.dependencies.State', (['"""pitch-options"""', '"""value"""'], {}), "('pitch-options', 'value')\n", (25061, 25087), False, 'import dash\n'), ((25097, 25145), 'dash.dependencies.State', 'dash.dependencies.State', (['"""pitch-factor"""', '"""value"""'], {}), "('pitch-factor', 'value')\n", (25120, 25145), False, 'import dash\n'), ((25155, 25206), 'dash.dependencies.State', 'dash.dependencies.State', (['"""current-filename"""', '"""data"""'], {}), "('current-filename', 'data')\n", (25178, 25206), False, 'import dash\n'), ((25216, 25262), 'dash.dependencies.State', 'dash.dependencies.State', (['"""current-f0s"""', '"""data"""'], {}), "('current-f0s', 'data')\n", (25239, 25262), False, 'import dash\n'), ((25272, 25328), 'dash.dependencies.State', 'dash.dependencies.State', (['"""current-f0s-nosilence"""', '"""data"""'], {}), "('current-f0s-nosilence', 'data')\n", (25295, 25328), False, 'import dash\n'), ((1028, 1232), 'dash_html_components.H1', 'html.H1', ([], {'id': '"""header"""', 'children': '"""Controllable TalkNet"""', 'style': "{'font-family': 'Georgia', 'color': '#000000', 'font-size': '4em',\n 'text-align': 'center', 'margin-top': '0em', 'margin-bottom': '0em'}"}), "(id='header', children='Controllable TalkNet', style={'font-family':\n 'Georgia', 'color': '#000000', 'font-size': '4em', 'text-align':\n 'center', 'margin-top': '0em', 'margin-bottom': '0em'})\n", (1035, 1232), True, 'import dash_html_components as html\n'), ((1392, 1451), 'dash_html_components.Label', 'html.Label', (['"""Character selection"""'], {'htmlFor': '"""model-dropdown"""'}), "('Character selection', htmlFor='model-dropdown')\n", (1402, 1451), True, 'import dash_html_components as html\n'), ((1461, 1725), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""model-dropdown"""', 'options': "[{'label': 'Custom model', 'value': 'Custom'}, {'label':\n '--- ERROR LOADING MODEL LISTS ---', 'value': '', 'disabled': True}]", 'value': 'None', 'style': "{'max-width': '90vw', 'width': '35em', 'margin-bottom': '0.7em'}"}), "(id='model-dropdown', options=[{'label': 'Custom model',\n 'value': 'Custom'}, {'label': '--- ERROR LOADING MODEL LISTS ---',\n 'value': '', 'disabled': True}], value=None, style={'max-width': '90vw',\n 'width': '35em', 'margin-bottom': '0.7em'})\n", (1473, 1725), True, 'import dash_core_components as dcc\n'), ((2418, 2512), 'dash_html_components.Label', 'html.Label', (["('Upload reference audio to ' + UPLOAD_DIRECTORY)"], {'htmlFor': '"""reference-dropdown"""'}), "('Upload reference audio to ' + UPLOAD_DIRECTORY, htmlFor=\n 'reference-dropdown')\n", (2428, 2512), True, 'import dash_html_components as html\n'), ((2552, 2579), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""current-f0s"""'}), "(id='current-f0s')\n", (2561, 2579), True, 'import dash_core_components as dcc\n'), ((2589, 2626), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""current-f0s-nosilence"""'}), "(id='current-f0s-nosilence')\n", (2598, 2626), True, 'import dash_core_components as dcc\n'), ((2636, 2668), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""current-filename"""'}), "(id='current-filename')\n", (2645, 2668), True, 'import dash_core_components as dcc\n'), ((6325, 6377), 'dash_html_components.Label', 'html.Label', (['"""Transcript"""'], {'htmlFor': '"""transcript-input"""'}), "('Transcript', htmlFor='transcript-input')\n", (6335, 6377), True, 'import dash_html_components as html\n'), ((6387, 6525), 'dash_core_components.Textarea', 'dcc.Textarea', ([], {'id': '"""transcript-input"""', 'value': '""""""', 'style': "{'max-width': '90vw', 'width': '50em', 'height': '8em', 'margin-bottom':\n '0.7em'}"}), "(id='transcript-input', value='', style={'max-width': '90vw',\n 'width': '50em', 'height': '8em', 'margin-bottom': '0.7em'})\n", (6399, 6525), True, 'import dash_core_components as dcc\n'), ((7605, 7770), 'dash_html_components.Footer', 'html.Footer', ([], {'children': '"""\n Presented by the Minerman Groupie Association.\n """', 'style': "{'margin-top': '2em', 'font-size': '0.7em'}"}), '(children=\n """\n Presented by the Minerman Groupie Association.\n """\n , style={\'margin-top\': \'2em\', \'font-size\': \'0.7em\'})\n', (7616, 7770), True, 'import dash_html_components as html\n'), ((8730, 8742), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8739, 8742), False, 'import json\n'), ((12082, 12094), 'models.Generator', 'Generator', (['h'], {}), '(h)\n', (12091, 12094), False, 'from models import Generator\n'), ((12175, 12195), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (12187, 12195), False, 'import torch\n'), ((13752, 13767), 'numpy.max', 'np.max', (['options'], {}), '(options)\n', (13758, 13767), True, 'import numpy as np\n'), ((13790, 13808), 'numpy.argmax', 'np.argmax', (['options'], {}), '(options)\n', (13799, 13808), True, 'import numpy as np\n'), ((14476, 14516), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""'], {}), "(UPLOAD_DIRECTORY, 'output')\n", (14488, 14516), False, 'import os\n'), ((14536, 14576), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""'], {}), "(UPLOAD_DIRECTORY, 'output')\n", (14548, 14576), False, 'import os\n'), ((14772, 14832), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""', "(wav_name + '.json')"], {}), "(UPLOAD_DIRECTORY, 'output', wav_name + '.json')\n", (14784, 14832), False, 'import os\n'), ((15007, 15067), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""', "(wav_name + '.json')"], {}), "(UPLOAD_DIRECTORY, 'output', wav_name + '.json')\n", (15019, 15067), False, 'import os\n'), ((17887, 17905), 'numpy.absolute', 'np.absolute', (['audio'], {}), '(audio)\n', (17898, 17905), True, 'import numpy as np\n'), ((17994, 18011), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (18002, 18011), True, 'import numpy as np\n'), ((21525, 21595), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""', "(dropdown_value + '_conv.wav')"], {}), "(UPLOAD_DIRECTORY, 'output', dropdown_value + '_conv.wav')\n", (21537, 21595), False, 'import os\n'), ((22880, 22920), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""'], {}), "(UPLOAD_DIRECTORY, 'models')\n", (22892, 22920), False, 'import os\n'), ((22940, 22980), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""'], {}), "(UPLOAD_DIRECTORY, 'models')\n", (22952, 22980), False, 'import os\n'), ((23008, 23058), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""', 'drive_id'], {}), "(UPLOAD_DIRECTORY, 'models', drive_id)\n", (23020, 23058), False, 'import os\n'), ((23078, 23128), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""', 'drive_id'], {}), "(UPLOAD_DIRECTORY, 'models', drive_id)\n", (23090, 23128), False, 'import os\n'), ((23335, 23359), 'os.path.exists', 'os.path.exists', (['zip_path'], {}), '(zip_path)\n', (23349, 23359), False, 'import os\n'), ((23546, 23565), 'os.remove', 'os.remove', (['zip_path'], {}), '(zip_path)\n', (23555, 23565), False, 'import os\n'), ((23706, 23736), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (23721, 23736), False, 'import zipfile\n'), ((26369, 26384), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26382, 26384), False, 'import torch\n'), ((30445, 30580), 'resampy.resample', 'resampy.resample', (['audio_np', 'h.sampling_rate', 'h2.sampling_rate'], {'filter': '"""sinc_window"""', 'window': 'scipy.signal.windows.hann', 'num_zeros': '(8)'}), "(audio_np, h.sampling_rate, h2.sampling_rate, filter=\n 'sinc_window', window=scipy.signal.windows.hann, num_zeros=8)\n", (30461, 30580), False, 'import resampy\n'), ((31526, 31602), 'scipy.signal.firwin', 'scipy.signal.firwin', (['(101)'], {'cutoff': '(10500)', 'fs': 'h2.sampling_rate', 'pass_zero': '(False)'}), '(101, cutoff=10500, fs=h2.sampling_rate, pass_zero=False)\n', (31545, 31602), False, 'import scipy\n'), ((31649, 31696), 'scipy.signal.lfilter', 'scipy.signal.lfilter', (['b', '[1.0]', 'audio2_denoised'], {}), '(b, [1.0], audio2_denoised)\n', (31669, 31696), False, 'import scipy\n'), ((31801, 31825), 'numpy.zeros', 'np.zeros', (['wave_out.shape'], {}), '(wave_out.shape)\n', (31809, 31825), True, 'import numpy as np\n'), ((31936, 31948), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (31946, 31948), False, 'import io\n'), ((8669, 8706), 'os.path.join', 'os.path.join', (['"""model_lists"""', 'filename'], {}), "('model_lists', filename)\n", (8681, 8706), False, 'import os\n'), ((12805, 12869), 'nemo.collections.asr.models.EncDecCTCModel.from_pretrained', 'EncDecCTCModel.from_pretrained', ([], {'model_name': '"""asr_talknet_aligner"""'}), "(model_name='asr_talknet_aligner')\n", (12835, 12869), False, 'from nemo.collections.asr.models import EncDecCTCModel\n'), ((21011, 21051), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""'], {}), "(UPLOAD_DIRECTORY, 'output')\n", (21023, 21051), False, 'import os\n'), ((21075, 21115), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""'], {}), "(UPLOAD_DIRECTORY, 'output')\n", (21087, 21115), False, 'import os\n'), ((23382, 23432), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""', 'drive_id'], {}), "(UPLOAD_DIRECTORY, 'models', drive_id)\n", (23394, 23432), False, 'import os\n'), ((23502, 23519), 'os.stat', 'os.stat', (['zip_path'], {}), '(zip_path)\n', (23509, 23519), False, 'import os\n'), ((23587, 23637), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""', 'drive_id'], {}), "(UPLOAD_DIRECTORY, 'models', drive_id)\n", (23599, 23637), False, 'import os\n'), ((23780, 23830), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""models"""', 'drive_id'], {}), "(UPLOAD_DIRECTORY, 'models', drive_id)\n", (23792, 23830), False, 'import os\n'), ((26578, 26605), 'os.path.exists', 'os.path.exists', (['singer_path'], {}), '(singer_path)\n', (26592, 26605), False, 'import os\n'), ((27060, 27085), 'os.path.exists', 'os.path.exists', (['durs_path'], {}), '(durs_path)\n', (27074, 27085), False, 'import os\n'), ((29143, 29187), 'crepe.predict', 'crepe.predict', (['audio_np', '(22050)'], {'viterbi': '(True)'}), '(audio_np, 22050, viterbi=True)\n', (29156, 29187), False, 'import crepe\n'), ((29299, 29332), 'torch.FloatTensor', 'torch.FloatTensor', (['f0s_wo_silence'], {}), '(f0s_wo_silence)\n', (29316, 29332), False, 'import torch\n'), ((30859, 30879), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (30871, 30879), False, 'import torch\n'), ((4845, 5054), 'dash_core_components.Checklist', 'dcc.Checklist', ([], {'id': '"""pitch-options"""', 'options': "[{'label': 'Change input pitch', 'value': 'pf'}, {'label':\n 'Auto-tune output', 'value': 'pc'}, {'label': 'Disable reference audio',\n 'value': 'dra'}]", 'value': '[]'}), "(id='pitch-options', options=[{'label': 'Change input pitch',\n 'value': 'pf'}, {'label': 'Auto-tune output', 'value': 'pc'}, {'label':\n 'Disable reference audio', 'value': 'dra'}], value=[])\n", (4858, 5054), True, 'import dash_core_components as dcc\n'), ((14643, 14707), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""', "(wav_name + '_conv.wav')"], {}), "(UPLOAD_DIRECTORY, 'output', wav_name + '_conv.wav')\n", (14655, 14707), False, 'import os\n'), ((14889, 14953), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""', "(wav_name + '_conv.wav')"], {}), "(UPLOAD_DIRECTORY, 'output', wav_name + '_conv.wav')\n", (14901, 14953), False, 'import os\n'), ((26489, 26518), 'os.path.dirname', 'os.path.dirname', (['talknet_path'], {}), '(talknet_path)\n', (26504, 26518), False, 'import os\n'), ((26637, 26681), 'talknet_singer.TalkNetSingerModel.restore_from', 'TalkNetSingerModel.restore_from', (['singer_path'], {}), '(singer_path)\n', (26668, 26681), False, 'from talknet_singer import TalkNetSingerModel\n'), ((26734, 26778), 'nemo.collections.tts.models.TalkNetSpectModel.restore_from', 'TalkNetSpectModel.restore_from', (['talknet_path'], {}), '(talknet_path)\n', (26764, 26778), False, 'from nemo.collections.tts.models import TalkNetSpectModel\n'), ((26841, 26870), 'os.path.dirname', 'os.path.dirname', (['talknet_path'], {}), '(talknet_path)\n', (26856, 26870), False, 'import os\n'), ((26972, 27001), 'os.path.dirname', 'os.path.dirname', (['talknet_path'], {}), '(talknet_path)\n', (26987, 27001), False, 'import os\n'), ((27116, 27156), 'nemo.collections.tts.models.TalkNetDursModel.restore_from', 'TalkNetDursModel.restore_from', (['durs_path'], {}), '(durs_path)\n', (27145, 27156), False, 'from nemo.collections.tts.models import TalkNetDursModel\n'), ((27249, 27291), 'nemo.collections.tts.models.TalkNetPitchModel.restore_from', 'TalkNetPitchModel.restore_from', (['pitch_path'], {}), '(pitch_path)\n', (27279, 27291), False, 'from nemo.collections.tts.models import TalkNetPitchModel\n'), ((29358, 29382), 'torch.mean', 'torch.mean', (['output_pitch'], {}), '(output_pitch)\n', (29368, 29382), False, 'import torch\n'), ((29385, 29409), 'torch.mean', 'torch.mean', (['target_pitch'], {}), '(target_pitch)\n', (29395, 29409), False, 'import torch\n'), ((30832, 30855), 'torch.FloatTensor', 'torch.FloatTensor', (['wave'], {}), '(wave)\n', (30849, 30855), False, 'import torch\n'), ((32335, 32357), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (32355, 32357), False, 'import traceback\n'), ((2079, 2187), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""drive-id"""', 'type': '"""text"""', 'placeholder': '"""Drive ID for custom model"""', 'style': "{'width': '22em'}"}), "(id='drive-id', type='text', placeholder=\n 'Drive ID for custom model', style={'width': '22em'})\n", (2088, 2187), True, 'import dash_core_components as dcc\n'), ((4305, 4373), 'dash_html_components.Audio', 'html.Audio', ([], {'id': '"""pitch-out"""', 'controls': '(True)', 'style': "{'display': 'none'}"}), "(id='pitch-out', controls=True, style={'display': 'none'})\n", (4315, 4373), True, 'import dash_html_components as html\n'), ((4470, 4591), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""audio-loading-output"""', 'style': "{'font-style': 'italic', 'margin-bottom': '0.7em', 'text-align': 'center'}"}), "(id='audio-loading-output', style={'font-style': 'italic',\n 'margin-bottom': '0.7em', 'text-align': 'center'})\n", (4478, 4591), True, 'import dash_html_components as html\n'), ((6730, 6770), 'dash_html_components.Button', 'html.Button', (['"""Generate"""'], {'id': '"""gen-button"""'}), "('Generate', id='gen-button')\n", (6741, 6770), True, 'import dash_html_components as html\n'), ((6863, 6931), 'dash_html_components.Audio', 'html.Audio', ([], {'id': '"""audio-out"""', 'controls': '(True)', 'style': "{'display': 'none'}"}), "(id='audio-out', controls=True, style={'display': 'none'})\n", (6873, 6931), True, 'import dash_html_components as html\n'), ((7103, 7164), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""generated-info"""', 'style': "{'font-style': 'italic'}"}), "(id='generated-info', style={'font-style': 'italic'})\n", (7111, 7164), True, 'import dash_html_components as html\n'), ((11713, 11725), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11723, 11725), False, 'import uuid\n'), ((30096, 30152), 'psola.vocode', 'psola.vocode', (['audio_np', '(22050)'], {'target_pitch': 'target_pitch'}), '(audio_np, 22050, target_pitch=target_pitch)\n', (30108, 30152), False, 'import psola\n'), ((32186, 32197), 'time.time', 'time.time', ([], {}), '()\n', (32195, 32197), False, 'import time\n'), ((5294, 5341), 'dash_html_components.Label', 'html.Label', (['"""Semitones"""'], {'htmlFor': '"""pitch-factor"""'}), "('Semitones', htmlFor='pitch-factor')\n", (5304, 5341), True, 'import dash_html_components as html\n'), ((5367, 5489), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""pitch-factor"""', 'type': '"""number"""', 'value': '"""0"""', 'style': "{'width': '7em'}", 'min': '(-11)', 'max': '(11)', 'step': '(1)', 'disabled': '(True)'}), "(id='pitch-factor', type='number', value='0', style={'width':\n '7em'}, min=-11, max=11, step=1, disabled=True)\n", (5376, 5489), True, 'import dash_core_components as dcc\n'), ((10638, 10650), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10648, 10650), False, 'import uuid\n'), ((11205, 11217), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11215, 11217), False, 'import uuid\n'), ((21206, 21276), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', '"""output"""', "(dropdown_value + '_conv.wav')"], {}), "(UPLOAD_DIRECTORY, 'output', dropdown_value + '_conv.wav')\n", (21218, 21276), False, 'import os\n'), ((30252, 30268), 'numpy.abs', 'np.abs', (['audio_np'], {}), '(audio_np)\n', (30258, 30268), True, 'import numpy as np\n'), ((2818, 2905), 'dash_html_components.Button', 'html.Button', (['"""Update file list"""'], {'id': '"""update-button"""', 'style': "{'margin-right': '10px'}"}), "('Update file list', id='update-button', style={'margin-right':\n '10px'})\n", (2829, 2905), True, 'import dash_html_components as html\n'), ((3101, 3229), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""reference-dropdown"""', 'options': '[]', 'value': 'None', 'style': "{'max-width': '80vw', 'width': '30em'}", 'disabled': '(False)'}), "(id='reference-dropdown', options=[], value=None, style={\n 'max-width': '80vw', 'width': '30em'}, disabled=False)\n", (3113, 3229), True, 'import dash_core_components as dcc\n'), ((3512, 3540), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""pitch-clicks"""'}), "(id='pitch-clicks')\n", (3521, 3540), True, 'import dash_core_components as dcc\n'), ((3566, 3662), 'dash_html_components.Button', 'html.Button', (['"""Debug pitch"""'], {'id': '"""pitch-button"""', 'style': "{'margin-left': '10px'}", 'disabled': '(False)'}), "('Debug pitch', id='pitch-button', style={'margin-left': '10px'},\n disabled=False)\n", (3577, 3662), True, 'import dash_html_components as html\n'), ((21138, 21184), 'os.path.join', 'os.path.join', (['UPLOAD_DIRECTORY', 'dropdown_value'], {}), '(UPLOAD_DIRECTORY, dropdown_value)\n', (21150, 21184), False, 'import os\n'), ((28424, 28446), 'torch.from_numpy', 'torch.from_numpy', (['durs'], {}), '(durs)\n', (28440, 28446), False, 'import torch\n'), ((28496, 28518), 'torch.FloatTensor', 'torch.FloatTensor', (['f0s'], {}), '(f0s)\n', (28513, 28518), False, 'import torch\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Driver program to train a CNN on MNIST dataset.
"""
from math import log10
import keras
import matplotlib.pyplot as plt
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import Input
from keras.models import load_model, Model
from keras.utils import to_categorical
from scipy.ndimage.filters import gaussian_filter
from custom_callbacks import LossHistory
from custom_models import baseline_model, two_conv_layer_model, two_conv_one_dense_layer_model
from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, \
generate_noisy_outputs
# Initializing essential constants
batch_size = 128
num_classes = 10
epochs = 1
img_rows, img_cols = 28, 28
num_iter = 101
# Initializing essential global variables
input_shape = None
X_train, y_train_labels, y_train, X_test, y_test_labels, y_test = None, None, None, None, None, None
def normalize_tensor(x):
""" Utility function to normalize a tensor by its L2 norm
Params:
x: Tensorflow tensor
Returns:
tensor: Normalized tensor
"""
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def load_data():
""" Helper function to load and initialize data
"""
global input_shape, X_train, y_train_labels, y_train, X_test, y_test_labels, y_test
(X_train, y_train_labels), (X_test, y_test_labels) = mnist.load_data()
X_train, X_test, input_shape = preprocess_image_data(X_train, X_test, img_rows, img_cols, K)
# convert class vectors to binary class matrices
y_train = to_categorical(y_train_labels, num_classes)
y_test = to_categorical(y_test_labels, num_classes)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
def question_1():
global input_shape, X_train, y_train_labels, y_train, X_test, y_test_labels, y_test
print("------------------------------------------------------------------------")
print("Baseline Model")
print("------------------------------------------------------------------------")
model1 = baseline_model(input_shape, num_classes)
loss_callback_1 = LossHistory((X_test, y_test))
model1.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_test, y_test),
callbacks=[loss_callback_1])
model1.save('model1.h5')
plot_learning_curve([loss_callback_1.train_indices, loss_callback_1.test_indices],
[loss_callback_1.train_losses, loss_callback_1.test_losses],
colors=['g-', 'm-'], labels=['Train loss', 'Test loss'],
title="Loss evolution for Baseline Model",
path="../outputs/q1/plots/train_test_loss_baseline.png",
axlabels=["Iterations", "Loss"])
plot_learning_curve([loss_callback_1.test_indices],
[loss_callback_1.test_acc],
colors=['c-'], labels=['Test Accuracy'],
title="Accuracy evolution for Baseline Model",
path="../outputs/q1/plots/test_acc_baseline.png",
axlabels=["Iterations", "Accuracy"])
print("------------------------------------------------------------------------")
print("2 conv layer model")
print("------------------------------------------------------------------------")
model2 = two_conv_layer_model(input_shape, num_classes)
loss_callback_2 = LossHistory((X_test, y_test))
model2.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_test, y_test),
callbacks=[loss_callback_2])
model2.save('model2.h5')
plot_learning_curve([loss_callback_2.train_indices, loss_callback_2.test_indices],
[loss_callback_2.train_losses, loss_callback_2.test_losses],
colors=['g-', 'm-'], labels=['Train loss', 'Test loss'],
title="Loss evolution for 2 conv layered Model",
path="../outputs/q1/plots/train_test_loss_2_conv.png",
axlabels=["Iterations", "Loss"])
plot_learning_curve([loss_callback_1.test_indices],
[loss_callback_1.test_acc],
colors=['c-'], labels=['Test Accuracy'],
title="Accuracy evolution for 2 conv layered Model",
path="../outputs/q1/plots/test_acc_2_conv.png",
axlabels=["Iterations", "Accuracy"])
print("------------------------------------------------------------------------")
print("2 conv layer + 1 hidden dense layer model")
print("------------------------------------------------------------------------")
model3 = two_conv_one_dense_layer_model(input_shape, num_classes)
loss_callback_3 = LossHistory((X_test, y_test))
model3.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_test, y_test),
callbacks=[loss_callback_3])
model3.save('model3.h5')
plot_learning_curve([loss_callback_3.train_indices, loss_callback_3.test_indices],
[loss_callback_3.train_losses, loss_callback_3.test_losses],
colors=['g-', 'm-'], labels=['Train loss', 'Test loss'],
title="Loss evolution for 2 Conv + 1 Dense layer config",
path="../outputs/q1/plots/train_test_loss_2_conv_1_dense.png",
axlabels=["Iterations", "Loss"])
plot_learning_curve([loss_callback_3.test_indices],
[loss_callback_3.test_acc],
colors=['c-'], labels=['Test Accuracy'],
title="Accuracy evolution for 2 conv + 1 dense config",
path="../outputs/q1/plots/test_acc_2_conv_1_dense.png",
axlabels=["Iterations", "Accuracy"])
ids = np.random.choice(X_test.shape[0], 20)
X_samples = X_train[ids]
pred_samples_1 = model1.predict(X_samples)
generate_image_outputs(X_samples, np.argmax(pred_samples_1, axis=1), path="../outputs/q1/predictions/baseline")
pred_samples_2 = model2.predict(X_samples)
generate_image_outputs(X_samples, np.argmax(pred_samples_2, axis=1), path="../outputs/q1/predictions/2_conv")
pred_samples_3 = model3.predict(X_samples)
generate_image_outputs(X_samples, np.argmax(pred_samples_3, axis=1),
path="../outputs/q1/predictions/2_conv_1_dense")
def question_2():
global input_shape, X_train, y_train_labels, y_train, X_test, y_test_labels, y_test
model3 = load_model('model3.h5')
model3.trainable = False
learning_rate = 0.01
validation_interval = 10
# Iterating over each of the 10 classes for generating adversarial examples
for _label in range(0, num_classes):
print("------------------------------------------------------------------------")
print("Adversarial examples for label " + str(_label))
print("------------------------------------------------------------------------")
# y_eval is a dummy matrix useful for evaluating categorical crossentropy loss
y_eval = to_categorical(np.full((batch_size, 1), _label, dtype=int), num_classes=num_classes)
# y_fool is the duplicate label meant to fool the network and generate adversarial examples
y_fool = to_categorical(np.full((y_train_labels.shape[0], 1), _label, dtype=int), num_classes=num_classes)
batch = get_iter_batch(X_test, y_fool, batch_size, num_iter)
# initializing a 28 x 28 matrix for noise
noise = np.zeros((1, 28, 28, 1))
# new functional model to add noise and predict output using existing trained model
input1 = Input(shape=(img_rows, img_cols, 1))
input2 = Input(shape=(img_rows, img_cols, 1))
sum_inp = keras.layers.add([input1, input2])
op = model3(sum_inp)
noise_model = Model(inputs=[input1, input2], outputs=op)
# calculating gradient
a_loss = K.categorical_crossentropy(noise_model.output, y_eval)
grad = K.gradients(a_loss, noise_model.input[1])[0]
grad = K.mean(normalize_tensor(grad), axis=0)
# custom keras backend function that takes in two inputs and yields noise output,
# loss and gradient
custom_iterate = K.function([input1, input2], [noise_model.output, a_loss, grad])
train_indices, train_loss, test_indices, test_loss, test_acc = [], [], [], [], []
ctr = 0
# Batch wise manual gradient descent for learning adversarial noise
for _batch in batch:
X_actual, y_actual = _batch
output, loss, grads = custom_iterate([X_actual, noise])
# Validating at specific intervals
if (ctr % validation_interval == 0):
noise_test = np.zeros(X_test.shape) + noise[0]
preds_test = noise_model.predict([X_test, noise_test])
_test_acc = float(np.where(np.argmax(preds_test, axis=1) == _label)[0].shape[0]) / float(
preds_test.shape[0])
_test_loss = np.mean(loss)
test_indices.append(ctr)
test_loss.append(_test_loss)
test_acc.append(_test_acc)
train_indices.append(ctr)
train_loss.append(np.mean(loss))
# Gradient update
noise = noise - learning_rate * np.array(grads)
line = ("Iteration " + str(ctr + 1).rjust(int(log10(num_iter) + 1))
+ "/" + str(num_iter)
+ " complete. Train Loss: %0.10f " % np.mean(loss))
print(line)
ctr = ctr + 1
noise_test = np.zeros(X_test.shape) + noise[0]
preds = noise_model.predict([X_test, noise_test])
print(
"Accuracy: " + str(float(np.where(np.argmax(preds, axis=1) == _label)[0].shape[0]) / float(preds.shape[0])))
# Visualizing each of the generated noises
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(noise.reshape(28, 28), interpolation='nearest', cmap="gray")
plt.savefig("../outputs/q2/visualizations/sample_" + str(_label) + ".png")
plt.close()
# Plotting loss and accuracy evolution
plot_learning_curve([train_indices, test_indices],
[train_loss, test_loss],
colors=['c-', 'm-'], labels=['Train loss', 'Test loss'],
title="Loss evolution for adversarial noise training",
path="../outputs/q2/plots/train_test_loss_adversarial_noise_" + str(_label) + ".png",
axlabels=["Iterations", "Loss"])
plot_learning_curve([test_indices],
[test_acc],
colors=['r-'], labels=['Test Accuracy'],
title="Accuracy evolution for adversarial noise training",
path="../outputs/q2/plots/test_acc_adversarial_noise_" + str(_label) + ".png",
axlabels=["Iterations", "Accuracy"])
# Predicting for a random set of 9 adversarial images
ids = np.random.choice(X_test.shape[0], 9)
X_samples = X_test[ids]
noise_sample = np.zeros(X_samples.shape) + noise[0]
pred_samples = noise_model.predict([X_samples, noise_sample])
actual_samples = model3.predict(X_samples)
generate_noisy_outputs(X_samples + noise_sample, np.argmax(actual_samples, axis=1),
np.argmax(pred_samples, axis=1), path="../outputs/q2/predictions/" + str(_label))
def question_3():
global input_shape, X_train, y_train_labels, y_train, X_test, y_test_labels, y_test
model = load_model('model3.h5')
model.trainable = False
# Custom model that inputs 28 x 28 matrices and outputs logits (without softmax)
visualize_model = Model(inputs=model.input, outputs=model.get_layer("logits").output)
for _label in range(0, num_classes):
print("------------------------------------------------------------------------")
print("Synthetic image visualization for label " + str(_label))
print("------------------------------------------------------------------------")
y_temp = [_label]
y_temp = to_categorical(y_temp, num_classes)
# Setting cost to be the respective output neurons
cost = visualize_model.output[:, _label]
# Gradient calculation for the cost
grad = K.mean(K.gradients(cost, visualize_model.input)[0], axis=0)
# Custom keras backend function that inputs the images and returns the cost and gradient
custom_iterate = K.function([model.input], [visualize_model.output[:, _label], grad])
# Initializing a gaussian distribution centred around 128
X_init = np.random.normal(loc=128., scale=50., size=(1, 28, 28, 1))
X_init /= 255.
costs = []
iter_indices = []
# Batch wise gradient ascent for learning X_init
for i in range(num_iter):
cost, grads = custom_iterate([X_init])
sigma = (i + 1) * 4 / (num_iter + 0.5)
step_size = 1.0 / np.std(grads)
costs.append(cost[0])
iter_indices.append(i)
# Smoothening using a Gaussian filter
grads = gaussian_filter(grads, sigma)
# Gradient update
X_init = (1 - 0.0001) * X_init + step_size * np.array(grads)
line = ("Iteration " + str(i + 1).rjust(int(log10(num_iter) + 1))
+ "/" + str(num_iter)
+ " complete. Cost: %0.10f " % cost[0])
print(line)
# Visualizing the input image
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(X_init.reshape(28, 28), interpolation='nearest', cmap="gray")
plt.savefig("../outputs/q3/visualizations/max_output_" + str(_label) + ".png")
plt.close()
plot_learning_curve([iter_indices],
[costs],
colors=['b-'], labels=['Cost'],
title="Cost evolution over optimization iterations",
path="../outputs/q3/plots/cost_output_" + str(_label) + ".png",
axlabels=["Iterations", "Cost"])
# Custom model that inputs 28 x 28 image matrices and outputs 2nd maxpooling layer
visualize_model = Model(inputs=model.input, outputs=model.get_layer("maxpooling2").output)
for _id in range(15):
print("------------------------------------------------------------------------")
print("Synthetic image visualization for central neuron of filter " + str(_id))
print("------------------------------------------------------------------------")
# Setting cost as the central neuron of maxpooling layer
# Since row size and column size (7, 7) is odd, we do row/2 and column/2
cost = visualize_model.output[:, visualize_model.output.get_shape()[1] / 2,
visualize_model.output.get_shape()[2] / 2, _id]
grad = K.mean(K.gradients(cost, visualize_model.input)[0], axis=0)
custom_iterate = K.function([model.input], [cost, grad])
X_init = np.random.normal(loc=128., scale=50., size=(1, 28, 28, 1))
X_init /= 255.
# Batch wise gradient ascent for learning X_init
for i in range(num_iter):
cost, grads = custom_iterate([X_init])
sigma = (i + 1) * 4 / (num_iter + 0.5)
step_size = 1.0 / np.std(grads)
grads = gaussian_filter(grads, sigma)
# Gradient update
X_init = (1 - 0.0001) * X_init + step_size * np.array(grads)
line = ("Iteration " + str(i + 1).rjust(int(log10(num_iter) + 1))
+ "/" + str(num_iter)
+ " complete. Cost: %0.10f " % cost[0])
print(line)
# Plotting X_init for each of the filter optimizations
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(X_init.reshape(28, 28), interpolation='nearest', cmap="gray")
plt.text(0.5, 0.05, 'Filter: ' + str(_id), fontsize=28,
horizontalalignment='center', verticalalignment='center',
transform=ax.transAxes, color='white')
plt.savefig("../outputs/q3/visualizations/max_filter_" + str(_id) + ".png")
plt.close()
if __name__ == "__main__":
load_data()
question_1()
question_2()
question_3()
| [
"keras.models.load_model",
"numpy.argmax",
"custom_models.two_conv_layer_model",
"utils.plot_learning_curve",
"keras.models.Model",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.random.normal",
"keras.layers.Input",
"custom_callbacks.LossHistory",
"numpy.full",
"utils.preprocess_image_data"... | [((1452, 1469), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1467, 1469), False, 'from keras.datasets import mnist\n'), ((1505, 1566), 'utils.preprocess_image_data', 'preprocess_image_data', (['X_train', 'X_test', 'img_rows', 'img_cols', 'K'], {}), '(X_train, X_test, img_rows, img_cols, K)\n', (1526, 1566), False, 'from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, generate_noisy_outputs\n'), ((1635, 1678), 'keras.utils.to_categorical', 'to_categorical', (['y_train_labels', 'num_classes'], {}), '(y_train_labels, num_classes)\n', (1649, 1678), False, 'from keras.utils import to_categorical\n'), ((1692, 1734), 'keras.utils.to_categorical', 'to_categorical', (['y_test_labels', 'num_classes'], {}), '(y_test_labels, num_classes)\n', (1706, 1734), False, 'from keras.utils import to_categorical\n'), ((2189, 2229), 'custom_models.baseline_model', 'baseline_model', (['input_shape', 'num_classes'], {}), '(input_shape, num_classes)\n', (2203, 2229), False, 'from custom_models import baseline_model, two_conv_layer_model, two_conv_one_dense_layer_model\n'), ((2252, 2281), 'custom_callbacks.LossHistory', 'LossHistory', (['(X_test, y_test)'], {}), '((X_test, y_test))\n', (2263, 2281), False, 'from custom_callbacks import LossHistory\n'), ((2475, 2832), 'utils.plot_learning_curve', 'plot_learning_curve', (['[loss_callback_1.train_indices, loss_callback_1.test_indices]', '[loss_callback_1.train_losses, loss_callback_1.test_losses]'], {'colors': "['g-', 'm-']", 'labels': "['Train loss', 'Test loss']", 'title': '"""Loss evolution for Baseline Model"""', 'path': '"""../outputs/q1/plots/train_test_loss_baseline.png"""', 'axlabels': "['Iterations', 'Loss']"}), "([loss_callback_1.train_indices, loss_callback_1.\n test_indices], [loss_callback_1.train_losses, loss_callback_1.\n test_losses], colors=['g-', 'm-'], labels=['Train loss', 'Test loss'],\n title='Loss evolution for Baseline Model', path=\n '../outputs/q1/plots/train_test_loss_baseline.png', axlabels=[\n 'Iterations', 'Loss'])\n", (2494, 2832), False, 'from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, generate_noisy_outputs\n'), ((2933, 3206), 'utils.plot_learning_curve', 'plot_learning_curve', (['[loss_callback_1.test_indices]', '[loss_callback_1.test_acc]'], {'colors': "['c-']", 'labels': "['Test Accuracy']", 'title': '"""Accuracy evolution for Baseline Model"""', 'path': '"""../outputs/q1/plots/test_acc_baseline.png"""', 'axlabels': "['Iterations', 'Accuracy']"}), "([loss_callback_1.test_indices], [loss_callback_1.\n test_acc], colors=['c-'], labels=['Test Accuracy'], title=\n 'Accuracy evolution for Baseline Model', path=\n '../outputs/q1/plots/test_acc_baseline.png', axlabels=['Iterations',\n 'Accuracy'])\n", (2952, 3206), False, 'from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, generate_noisy_outputs\n'), ((3526, 3572), 'custom_models.two_conv_layer_model', 'two_conv_layer_model', (['input_shape', 'num_classes'], {}), '(input_shape, num_classes)\n', (3546, 3572), False, 'from custom_models import baseline_model, two_conv_layer_model, two_conv_one_dense_layer_model\n'), ((3595, 3624), 'custom_callbacks.LossHistory', 'LossHistory', (['(X_test, y_test)'], {}), '((X_test, y_test))\n', (3606, 3624), False, 'from custom_callbacks import LossHistory\n'), ((3818, 4179), 'utils.plot_learning_curve', 'plot_learning_curve', (['[loss_callback_2.train_indices, loss_callback_2.test_indices]', '[loss_callback_2.train_losses, loss_callback_2.test_losses]'], {'colors': "['g-', 'm-']", 'labels': "['Train loss', 'Test loss']", 'title': '"""Loss evolution for 2 conv layered Model"""', 'path': '"""../outputs/q1/plots/train_test_loss_2_conv.png"""', 'axlabels': "['Iterations', 'Loss']"}), "([loss_callback_2.train_indices, loss_callback_2.\n test_indices], [loss_callback_2.train_losses, loss_callback_2.\n test_losses], colors=['g-', 'm-'], labels=['Train loss', 'Test loss'],\n title='Loss evolution for 2 conv layered Model', path=\n '../outputs/q1/plots/train_test_loss_2_conv.png', axlabels=[\n 'Iterations', 'Loss'])\n", (3837, 4179), False, 'from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, generate_noisy_outputs\n'), ((4280, 4557), 'utils.plot_learning_curve', 'plot_learning_curve', (['[loss_callback_1.test_indices]', '[loss_callback_1.test_acc]'], {'colors': "['c-']", 'labels': "['Test Accuracy']", 'title': '"""Accuracy evolution for 2 conv layered Model"""', 'path': '"""../outputs/q1/plots/test_acc_2_conv.png"""', 'axlabels': "['Iterations', 'Accuracy']"}), "([loss_callback_1.test_indices], [loss_callback_1.\n test_acc], colors=['c-'], labels=['Test Accuracy'], title=\n 'Accuracy evolution for 2 conv layered Model', path=\n '../outputs/q1/plots/test_acc_2_conv.png', axlabels=['Iterations',\n 'Accuracy'])\n", (4299, 4557), False, 'from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, generate_noisy_outputs\n'), ((4900, 4956), 'custom_models.two_conv_one_dense_layer_model', 'two_conv_one_dense_layer_model', (['input_shape', 'num_classes'], {}), '(input_shape, num_classes)\n', (4930, 4956), False, 'from custom_models import baseline_model, two_conv_layer_model, two_conv_one_dense_layer_model\n'), ((4979, 5008), 'custom_callbacks.LossHistory', 'LossHistory', (['(X_test, y_test)'], {}), '((X_test, y_test))\n', (4990, 5008), False, 'from custom_callbacks import LossHistory\n'), ((5202, 5580), 'utils.plot_learning_curve', 'plot_learning_curve', (['[loss_callback_3.train_indices, loss_callback_3.test_indices]', '[loss_callback_3.train_losses, loss_callback_3.test_losses]'], {'colors': "['g-', 'm-']", 'labels': "['Train loss', 'Test loss']", 'title': '"""Loss evolution for 2 Conv + 1 Dense layer config"""', 'path': '"""../outputs/q1/plots/train_test_loss_2_conv_1_dense.png"""', 'axlabels': "['Iterations', 'Loss']"}), "([loss_callback_3.train_indices, loss_callback_3.\n test_indices], [loss_callback_3.train_losses, loss_callback_3.\n test_losses], colors=['g-', 'm-'], labels=['Train loss', 'Test loss'],\n title='Loss evolution for 2 Conv + 1 Dense layer config', path=\n '../outputs/q1/plots/train_test_loss_2_conv_1_dense.png', axlabels=[\n 'Iterations', 'Loss'])\n", (5221, 5580), False, 'from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, generate_noisy_outputs\n'), ((5681, 5970), 'utils.plot_learning_curve', 'plot_learning_curve', (['[loss_callback_3.test_indices]', '[loss_callback_3.test_acc]'], {'colors': "['c-']", 'labels': "['Test Accuracy']", 'title': '"""Accuracy evolution for 2 conv + 1 dense config"""', 'path': '"""../outputs/q1/plots/test_acc_2_conv_1_dense.png"""', 'axlabels': "['Iterations', 'Accuracy']"}), "([loss_callback_3.test_indices], [loss_callback_3.\n test_acc], colors=['c-'], labels=['Test Accuracy'], title=\n 'Accuracy evolution for 2 conv + 1 dense config', path=\n '../outputs/q1/plots/test_acc_2_conv_1_dense.png', axlabels=[\n 'Iterations', 'Accuracy'])\n", (5700, 5970), False, 'from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, generate_noisy_outputs\n'), ((6082, 6119), 'numpy.random.choice', 'np.random.choice', (['X_test.shape[0]', '(20)'], {}), '(X_test.shape[0], 20)\n', (6098, 6119), True, 'import numpy as np\n'), ((6790, 6813), 'keras.models.load_model', 'load_model', (['"""model3.h5"""'], {}), "('model3.h5')\n", (6800, 6813), False, 'from keras.models import load_model, Model\n'), ((12015, 12038), 'keras.models.load_model', 'load_model', (['"""model3.h5"""'], {}), "('model3.h5')\n", (12025, 12038), False, 'from keras.models import load_model, Model\n'), ((6234, 6267), 'numpy.argmax', 'np.argmax', (['pred_samples_1'], {'axis': '(1)'}), '(pred_samples_1, axis=1)\n', (6243, 6267), True, 'import numpy as np\n'), ((6397, 6430), 'numpy.argmax', 'np.argmax', (['pred_samples_2'], {'axis': '(1)'}), '(pred_samples_2, axis=1)\n', (6406, 6430), True, 'import numpy as np\n'), ((6558, 6591), 'numpy.argmax', 'np.argmax', (['pred_samples_3'], {'axis': '(1)'}), '(pred_samples_3, axis=1)\n', (6567, 6591), True, 'import numpy as np\n'), ((7684, 7736), 'utils.get_iter_batch', 'get_iter_batch', (['X_test', 'y_fool', 'batch_size', 'num_iter'], {}), '(X_test, y_fool, batch_size, num_iter)\n', (7698, 7736), False, 'from utils import preprocess_image_data, get_iter_batch, plot_learning_curve, generate_image_outputs, generate_noisy_outputs\n'), ((7804, 7828), 'numpy.zeros', 'np.zeros', (['(1, 28, 28, 1)'], {}), '((1, 28, 28, 1))\n', (7812, 7828), True, 'import numpy as np\n'), ((7939, 7975), 'keras.layers.Input', 'Input', ([], {'shape': '(img_rows, img_cols, 1)'}), '(shape=(img_rows, img_cols, 1))\n', (7944, 7975), False, 'from keras.layers import Input\n'), ((7993, 8029), 'keras.layers.Input', 'Input', ([], {'shape': '(img_rows, img_cols, 1)'}), '(shape=(img_rows, img_cols, 1))\n', (7998, 8029), False, 'from keras.layers import Input\n'), ((8048, 8082), 'keras.layers.add', 'keras.layers.add', (['[input1, input2]'], {}), '([input1, input2])\n', (8064, 8082), False, 'import keras\n'), ((8134, 8176), 'keras.models.Model', 'Model', ([], {'inputs': '[input1, input2]', 'outputs': 'op'}), '(inputs=[input1, input2], outputs=op)\n', (8139, 8176), False, 'from keras.models import load_model, Model\n'), ((8226, 8280), 'keras.backend.categorical_crossentropy', 'K.categorical_crossentropy', (['noise_model.output', 'y_eval'], {}), '(noise_model.output, y_eval)\n', (8252, 8280), True, 'from keras import backend as K\n'), ((8539, 8603), 'keras.backend.function', 'K.function', (['[input1, input2]', '[noise_model.output, a_loss, grad]'], {}), '([input1, input2], [noise_model.output, a_loss, grad])\n', (8549, 8603), True, 'from keras import backend as K\n'), ((10219, 10231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10229, 10231), True, 'import matplotlib.pyplot as plt\n'), ((10436, 10447), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10445, 10447), True, 'import matplotlib.pyplot as plt\n'), ((11440, 11476), 'numpy.random.choice', 'np.random.choice', (['X_test.shape[0]', '(9)'], {}), '(X_test.shape[0], 9)\n', (11456, 11476), True, 'import numpy as np\n'), ((12580, 12615), 'keras.utils.to_categorical', 'to_categorical', (['y_temp', 'num_classes'], {}), '(y_temp, num_classes)\n', (12594, 12615), False, 'from keras.utils import to_categorical\n'), ((12967, 13035), 'keras.backend.function', 'K.function', (['[model.input]', '[visualize_model.output[:, _label], grad]'], {}), '([model.input], [visualize_model.output[:, _label], grad])\n', (12977, 13035), True, 'from keras import backend as K\n'), ((13120, 13180), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(128.0)', 'scale': '(50.0)', 'size': '(1, 28, 28, 1)'}), '(loc=128.0, scale=50.0, size=(1, 28, 28, 1))\n', (13136, 13180), True, 'import numpy as np\n'), ((14029, 14041), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14039, 14041), True, 'import matplotlib.pyplot as plt\n'), ((14251, 14262), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14260, 14262), True, 'import matplotlib.pyplot as plt\n'), ((15510, 15549), 'keras.backend.function', 'K.function', (['[model.input]', '[cost, grad]'], {}), '([model.input], [cost, grad])\n', (15520, 15549), True, 'from keras import backend as K\n'), ((15567, 15627), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(128.0)', 'scale': '(50.0)', 'size': '(1, 28, 28, 1)'}), '(loc=128.0, scale=50.0, size=(1, 28, 28, 1))\n', (15583, 15627), True, 'import numpy as np\n'), ((16336, 16348), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16346, 16348), True, 'import matplotlib.pyplot as plt\n'), ((16750, 16761), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16759, 16761), True, 'import matplotlib.pyplot as plt\n'), ((7382, 7425), 'numpy.full', 'np.full', (['(batch_size, 1)', '_label'], {'dtype': 'int'}), '((batch_size, 1), _label, dtype=int)\n', (7389, 7425), True, 'import numpy as np\n'), ((7584, 7640), 'numpy.full', 'np.full', (['(y_train_labels.shape[0], 1)', '_label'], {'dtype': 'int'}), '((y_train_labels.shape[0], 1), _label, dtype=int)\n', (7591, 7640), True, 'import numpy as np\n'), ((8296, 8337), 'keras.backend.gradients', 'K.gradients', (['a_loss', 'noise_model.input[1]'], {}), '(a_loss, noise_model.input[1])\n', (8307, 8337), True, 'from keras import backend as K\n'), ((9929, 9951), 'numpy.zeros', 'np.zeros', (['X_test.shape'], {}), '(X_test.shape)\n', (9937, 9951), True, 'import numpy as np\n'), ((11532, 11557), 'numpy.zeros', 'np.zeros', (['X_samples.shape'], {}), '(X_samples.shape)\n', (11540, 11557), True, 'import numpy as np\n'), ((11747, 11780), 'numpy.argmax', 'np.argmax', (['actual_samples'], {'axis': '(1)'}), '(actual_samples, axis=1)\n', (11756, 11780), True, 'import numpy as np\n'), ((11813, 11844), 'numpy.argmax', 'np.argmax', (['pred_samples'], {'axis': '(1)'}), '(pred_samples, axis=1)\n', (11822, 11844), True, 'import numpy as np\n'), ((13626, 13655), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['grads', 'sigma'], {}), '(grads, sigma)\n', (13641, 13655), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((15907, 15936), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['grads', 'sigma'], {}), '(grads, sigma)\n', (15922, 15936), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((9332, 9345), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (9339, 9345), True, 'import numpy as np\n'), ((9544, 9557), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (9551, 9557), True, 'import numpy as np\n'), ((12791, 12831), 'keras.backend.gradients', 'K.gradients', (['cost', 'visualize_model.input'], {}), '(cost, visualize_model.input)\n', (12802, 12831), True, 'from keras import backend as K\n'), ((13472, 13485), 'numpy.std', 'np.std', (['grads'], {}), '(grads)\n', (13478, 13485), True, 'import numpy as np\n'), ((15432, 15472), 'keras.backend.gradients', 'K.gradients', (['cost', 'visualize_model.input'], {}), '(cost, visualize_model.input)\n', (15443, 15472), True, 'from keras import backend as K\n'), ((15873, 15886), 'numpy.std', 'np.std', (['grads'], {}), '(grads)\n', (15879, 15886), True, 'import numpy as np\n'), ((1206, 1217), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (1214, 1217), True, 'from keras import backend as K\n'), ((9051, 9073), 'numpy.zeros', 'np.zeros', (['X_test.shape'], {}), '(X_test.shape)\n', (9059, 9073), True, 'import numpy as np\n'), ((9634, 9649), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (9642, 9649), True, 'import numpy as np\n'), ((9842, 9855), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (9849, 9855), True, 'import numpy as np\n'), ((13743, 13758), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (13751, 13758), True, 'import numpy as np\n'), ((16025, 16040), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (16033, 16040), True, 'import numpy as np\n'), ((9709, 9724), 'math.log10', 'log10', (['num_iter'], {}), '(num_iter)\n', (9714, 9724), False, 'from math import log10\n'), ((13816, 13831), 'math.log10', 'log10', (['num_iter'], {}), '(num_iter)\n', (13821, 13831), False, 'from math import log10\n'), ((16098, 16113), 'math.log10', 'log10', (['num_iter'], {}), '(num_iter)\n', (16103, 16113), False, 'from math import log10\n'), ((9199, 9228), 'numpy.argmax', 'np.argmax', (['preds_test'], {'axis': '(1)'}), '(preds_test, axis=1)\n', (9208, 9228), True, 'import numpy as np\n'), ((10078, 10102), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (10087, 10102), True, 'import numpy as np\n')] |
from base.base_data_loader import BaseDataLoader
from utils.uts_classification.utils import readucr,readmts,transform_labels,readmts_uci_har,readmts_ptb,readmts_ptb_aug
import sklearn
import numpy as np
import os
import pickle as dill
from collections import Counter
class UtsClassificationDataLoader(BaseDataLoader):
def __init__(self, config):
super(UtsClassificationDataLoader, self).__init__(config)
if config.dataset.type == 'uts':
if config.dataset.name == 'AFClassification':
from utils.AFClassication.data import loaddata
(X_train, y_train), (Xval, yval), (final_testset, final_testtarget), (R_train, Rval, Rtest), (
P_train, Pval, Ptest), (Q_train, Qval, Qtest), (T_train, Tval, Ttest) = loaddata()
X_train = X_train[0]
X_val = Xval[0]
y_val = yval
X_test = final_testset[0]
y_test = final_testtarget
self.nb_classes = 3
self.y_train = y_train
self.y_test = y_test
self.X_val = X_val.reshape((X_val.shape[0], X_val.shape[1], 1))
self.y_val = y_val
self.y_true = np.argmax(y_test, axis=1)
elif config.dataset.name == 'ptbdb':
file_path = './datasets/uts_data/ptbdb/'
X_train, y_train, X_val, y_val, X_test, y_test = readmts_ptb_aug(file_path)
self.nb_classes = len(np.unique(np.concatenate((y_train, y_val, y_test), axis=0)))
y_train, y_val, y_test = transform_labels(y_train, y_test, y_val)
self.X_val = X_val.reshape((self.X_val.shape[0], self.X_val.shape[1], 1))
enc = sklearn.preprocessing.OneHotEncoder()
enc.fit(np.concatenate((y_train, y_val, y_test), axis=0).reshape(-1, 1))
self.y_train = enc.transform(y_train.reshape(-1, 1)).toarray()
self.y_val = enc.transform(self.y_val.reshape(-1, 1)).toarray()
self.y_test = enc.transform(y_test.reshape(-1, 1)).toarray()
else:
file_name = 'datasets/uts_data/' + config.dataset.name + '/' + config.dataset.name
X_train, y_train = readucr(file_name + '_TRAIN.txt')
X_test, y_test = readucr(file_name + '_TEST.txt')
self.nb_classes = len(np.unique(np.concatenate((y_train, y_test), axis=0)))
# make the min to zero of labels
y_train, y_test = transform_labels(y_train, y_test)
else:
if config.dataset.name == 'UCI_HAR_Dataset':
file_name = 'datasets/mts_data/' + config.dataset.name
X_train, y_train, X_test, y_test = readmts_uci_har(file_name)
# 调整划分比例
data = np.concatenate((X_train, X_test),axis=0)
label = np.concatenate((y_train, y_test),axis=0)
N = data.shape[0]
ind = int(N*0.9)
X_train = data[:ind]
y_train = label[:ind]
X_test = data[ind:]
y_test = label[ind:]
self.nb_classes = 6
# make the min to zero of labels
y_train, y_test = transform_labels(y_train, y_test)
elif config.dataset.name == 'Challeng2018':
from utils.AFClassication.data_challenge2018 import loaddata
(X_train, y_train), (Xval, yval), (final_testset, final_testtarget)= loaddata()
X_val = Xval
X_test = final_testset
y_val = yval
y_test = final_testtarget
self.nb_classes = 9
self.X_val = X_val
self.y_train = y_train
self.y_test = y_test
self.y_val = y_val
self.y_true = np.argmax(y_test, axis=1)
else:
file_name = 'datasets/mts_data/' + config.dataset.name + '/' + config.dataset.name
X_train, y_train, X_test, y_test, self.nb_classes = readmts(file_name)
if config.dataset.name not in ['ptbdb','AFClassification', 'Challeng2018']:
# save orignal y because later we will use binary
self.y_true = y_test.astype(np.int64)
# transform the labels from integers to one hot vectors
enc = sklearn.preprocessing.OneHotEncoder()
enc.fit(np.concatenate((y_train, y_test), axis=0).reshape(-1, 1))
self.y_train = enc.transform(y_train.reshape(-1, 1)).toarray()
self.y_test = enc.transform(y_test.reshape(-1, 1)).toarray()
if config.dataset.type == 'uts':
self.X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
self.X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
else:
self.X_train = X_train
self.X_test = X_test
self.train_size = self.X_train.shape[0]
self.test_size = self.X_test.shape[0]
self.input_shape = self.X_train.shape[1:]
if(self.config.model.name == "tlenet"):
from models.classification.tlenet import Classifier_TLENET
self.X_train, self.y_train, self.X_test, self.y_test, self.tot_increase_num, \
self.input_shape, self.nb_classes = Classifier_TLENET().pre_processing(self.X_train, self.y_train, self.X_test, self.y_test)
print(self.input_shape)
print("********************************")
def get_train_data(self):
if self.config.dataset.name in ['ptbdb','Challeng2018']:
return self.X_train, self.y_train, self.X_val, self.y_val
else:
return self.X_train, self.y_train
def get_test_data(self):
if (self.config.model.name == "tlenet"):
return self.X_test, self.y_test, self.y_true, self.tot_increase_num
return self.X_test, self.y_test, self.y_true
def get_inputshape(self):
return self.input_shape
def get_nbclasses(self):
return self.nb_classes
def get_train_size(self):
return self.train_size
def get_test_size(self):
return self.test_size | [
"utils.uts_classification.utils.readmts_uci_har",
"utils.uts_classification.utils.transform_labels",
"utils.uts_classification.utils.readucr",
"numpy.argmax",
"utils.AFClassication.data_challenge2018.loaddata",
"utils.uts_classification.utils.readmts_ptb_aug",
"utils.uts_classification.utils.readmts",
... | [((4414, 4451), 'sklearn.preprocessing.OneHotEncoder', 'sklearn.preprocessing.OneHotEncoder', ([], {}), '()\n', (4449, 4451), False, 'import sklearn\n'), ((783, 793), 'utils.AFClassication.data_challenge2018.loaddata', 'loaddata', ([], {}), '()\n', (791, 793), False, 'from utils.AFClassication.data_challenge2018 import loaddata\n'), ((1233, 1258), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (1242, 1258), True, 'import numpy as np\n'), ((2771, 2797), 'utils.uts_classification.utils.readmts_uci_har', 'readmts_uci_har', (['file_name'], {}), '(file_name)\n', (2786, 2797), False, 'from utils.uts_classification.utils import readucr, readmts, transform_labels, readmts_uci_har, readmts_ptb, readmts_ptb_aug\n'), ((2846, 2887), 'numpy.concatenate', 'np.concatenate', (['(X_train, X_test)'], {'axis': '(0)'}), '((X_train, X_test), axis=0)\n', (2860, 2887), True, 'import numpy as np\n'), ((2911, 2952), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {'axis': '(0)'}), '((y_train, y_test), axis=0)\n', (2925, 2952), True, 'import numpy as np\n'), ((3286, 3319), 'utils.uts_classification.utils.transform_labels', 'transform_labels', (['y_train', 'y_test'], {}), '(y_train, y_test)\n', (3302, 3319), False, 'from utils.uts_classification.utils import readucr, readmts, transform_labels, readmts_uci_har, readmts_ptb, readmts_ptb_aug\n'), ((1432, 1458), 'utils.uts_classification.utils.readmts_ptb_aug', 'readmts_ptb_aug', (['file_path'], {}), '(file_path)\n', (1447, 1458), False, 'from utils.uts_classification.utils import readucr, readmts, transform_labels, readmts_uci_har, readmts_ptb, readmts_ptb_aug\n'), ((1600, 1640), 'utils.uts_classification.utils.transform_labels', 'transform_labels', (['y_train', 'y_test', 'y_val'], {}), '(y_train, y_test, y_val)\n', (1616, 1640), False, 'from utils.uts_classification.utils import readucr, readmts, transform_labels, readmts_uci_har, readmts_ptb, readmts_ptb_aug\n'), ((1753, 1790), 'sklearn.preprocessing.OneHotEncoder', 'sklearn.preprocessing.OneHotEncoder', ([], {}), '()\n', (1788, 1790), False, 'import sklearn\n'), ((2268, 2301), 'utils.uts_classification.utils.readucr', 'readucr', (["(file_name + '_TRAIN.txt')"], {}), "(file_name + '_TRAIN.txt')\n", (2275, 2301), False, 'from utils.uts_classification.utils import readucr, readmts, transform_labels, readmts_uci_har, readmts_ptb, readmts_ptb_aug\n'), ((2335, 2367), 'utils.uts_classification.utils.readucr', 'readucr', (["(file_name + '_TEST.txt')"], {}), "(file_name + '_TEST.txt')\n", (2342, 2367), False, 'from utils.uts_classification.utils import readucr, readmts, transform_labels, readmts_uci_har, readmts_ptb, readmts_ptb_aug\n'), ((2543, 2576), 'utils.uts_classification.utils.transform_labels', 'transform_labels', (['y_train', 'y_test'], {}), '(y_train, y_test)\n', (2559, 2576), False, 'from utils.uts_classification.utils import readucr, readmts, transform_labels, readmts_uci_har, readmts_ptb, readmts_ptb_aug\n'), ((3539, 3549), 'utils.AFClassication.data_challenge2018.loaddata', 'loaddata', ([], {}), '()\n', (3547, 3549), False, 'from utils.AFClassication.data_challenge2018 import loaddata\n'), ((3901, 3926), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (3910, 3926), True, 'import numpy as np\n'), ((4112, 4130), 'utils.uts_classification.utils.readmts', 'readmts', (['file_name'], {}), '(file_name)\n', (4119, 4130), False, 'from utils.uts_classification.utils import readucr, readmts, transform_labels, readmts_uci_har, readmts_ptb, readmts_ptb_aug\n'), ((5369, 5388), 'models.classification.tlenet.Classifier_TLENET', 'Classifier_TLENET', ([], {}), '()\n', (5386, 5388), False, 'from models.classification.tlenet import Classifier_TLENET\n'), ((4472, 4513), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {'axis': '(0)'}), '((y_train, y_test), axis=0)\n', (4486, 4513), True, 'import numpy as np\n'), ((1507, 1555), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_val, y_test)'], {'axis': '(0)'}), '((y_train, y_val, y_test), axis=0)\n', (1521, 1555), True, 'import numpy as np\n'), ((2416, 2457), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {'axis': '(0)'}), '((y_train, y_test), axis=0)\n', (2430, 2457), True, 'import numpy as np\n'), ((1815, 1863), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_val, y_test)'], {'axis': '(0)'}), '((y_train, y_val, y_test), axis=0)\n', (1829, 1863), True, 'import numpy as np\n')] |
"""
Tests for basis module of the PySplineFit Module
Released under MIT License. See LICENSE file for details
Copyright (C) 2019 <NAME>
Requires pytest
"""
from .context import pysplinefit
from pysplinefit import basis
import pytest
import numpy as np
def test_basis_functions():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot_span = 4
knot = 5.0/2.0
# The NURBS Book Ex. 2.3
basis_vals = basis.basis_functions(knot_span, knot, degree, knot_vector)
expected = np.array([0.125, 0.75, 0.125])
condition = np.allclose(basis_vals, expected)
assert condition
def test_basis_functions2():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot_span = 4
knot = 5.0/2.0
# The NURBS Book Ex. 2.3
basis_vals = basis.basis_functions(knot_span, knot, degree, knot_vector)
basis_sum = np.sum(basis_vals)
assert np.isclose(basis_sum, 1.0)
def test_basis_function_ders():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot_span = 4
knot = 5.0/2.0
deriv_order = 2
# The NURBS Book Ex. 2.4
ders_vals = basis.basis_function_ders(knot_span, knot, degree, knot_vector, deriv_order)
expected = np.array([[0.125, -0.5, 1.0],
[0.75, 0, -2.0],
[0.125, 0.5, 1.0]])
condition = np.allclose(ders_vals, expected)
assert condition
def test_one_basis_function():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot = 5.0/2.0
# The NURBS Book Ex. 2.5
basis_val1 = basis.one_basis_function(degree, knot_vector, 3, knot)
basis_val2 = basis.one_basis_function(degree, knot_vector, 4, knot)
basis_vals = np.array([basis_val1, basis_val2])
expected = np.array([0.75, 0.125])
condition = np.allclose(basis_vals, expected)
assert condition
def test_one_basis_function_ders():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot_span = 4
knot = 5.0/2.0
deriv_order = 2
# The NURBS Book Ex. 2.4
basis_deriv_vals = basis.one_basis_function_ders(degree, knot_vector, knot_span, knot, deriv_order)
expected = np.array([0.125, 0.5, 1.0])
condition = np.allclose(basis_deriv_vals, expected)
assert condition
| [
"pysplinefit.basis.one_basis_function",
"numpy.sum",
"numpy.allclose",
"pysplinefit.basis.basis_function_ders",
"pysplinefit.basis.basis_functions",
"numpy.isclose",
"numpy.array",
"pysplinefit.basis.one_basis_function_ders"
] | [((499, 558), 'pysplinefit.basis.basis_functions', 'basis.basis_functions', (['knot_span', 'knot', 'degree', 'knot_vector'], {}), '(knot_span, knot, degree, knot_vector)\n', (520, 558), False, 'from pysplinefit import basis\n'), ((575, 605), 'numpy.array', 'np.array', (['[0.125, 0.75, 0.125]'], {}), '([0.125, 0.75, 0.125])\n', (583, 605), True, 'import numpy as np\n'), ((623, 656), 'numpy.allclose', 'np.allclose', (['basis_vals', 'expected'], {}), '(basis_vals, expected)\n', (634, 656), True, 'import numpy as np\n'), ((907, 966), 'pysplinefit.basis.basis_functions', 'basis.basis_functions', (['knot_span', 'knot', 'degree', 'knot_vector'], {}), '(knot_span, knot, degree, knot_vector)\n', (928, 966), False, 'from pysplinefit import basis\n'), ((984, 1002), 'numpy.sum', 'np.sum', (['basis_vals'], {}), '(basis_vals)\n', (990, 1002), True, 'import numpy as np\n'), ((1015, 1041), 'numpy.isclose', 'np.isclose', (['basis_sum', '(1.0)'], {}), '(basis_sum, 1.0)\n', (1025, 1041), True, 'import numpy as np\n'), ((1292, 1368), 'pysplinefit.basis.basis_function_ders', 'basis.basis_function_ders', (['knot_span', 'knot', 'degree', 'knot_vector', 'deriv_order'], {}), '(knot_span, knot, degree, knot_vector, deriv_order)\n', (1317, 1368), False, 'from pysplinefit import basis\n'), ((1385, 1451), 'numpy.array', 'np.array', (['[[0.125, -0.5, 1.0], [0.75, 0, -2.0], [0.125, 0.5, 1.0]]'], {}), '([[0.125, -0.5, 1.0], [0.75, 0, -2.0], [0.125, 0.5, 1.0]])\n', (1393, 1451), True, 'import numpy as np\n'), ((1519, 1551), 'numpy.allclose', 'np.allclose', (['ders_vals', 'expected'], {}), '(ders_vals, expected)\n', (1530, 1551), True, 'import numpy as np\n'), ((1786, 1840), 'pysplinefit.basis.one_basis_function', 'basis.one_basis_function', (['degree', 'knot_vector', '(3)', 'knot'], {}), '(degree, knot_vector, 3, knot)\n', (1810, 1840), False, 'from pysplinefit import basis\n'), ((1858, 1912), 'pysplinefit.basis.one_basis_function', 'basis.one_basis_function', (['degree', 'knot_vector', '(4)', 'knot'], {}), '(degree, knot_vector, 4, knot)\n', (1882, 1912), False, 'from pysplinefit import basis\n'), ((1931, 1965), 'numpy.array', 'np.array', (['[basis_val1, basis_val2]'], {}), '([basis_val1, basis_val2])\n', (1939, 1965), True, 'import numpy as np\n'), ((1982, 2005), 'numpy.array', 'np.array', (['[0.75, 0.125]'], {}), '([0.75, 0.125])\n', (1990, 2005), True, 'import numpy as np\n'), ((2023, 2056), 'numpy.allclose', 'np.allclose', (['basis_vals', 'expected'], {}), '(basis_vals, expected)\n', (2034, 2056), True, 'import numpy as np\n'), ((2340, 2425), 'pysplinefit.basis.one_basis_function_ders', 'basis.one_basis_function_ders', (['degree', 'knot_vector', 'knot_span', 'knot', 'deriv_order'], {}), '(degree, knot_vector, knot_span, knot, deriv_order\n )\n', (2369, 2425), False, 'from pysplinefit import basis\n'), ((2437, 2464), 'numpy.array', 'np.array', (['[0.125, 0.5, 1.0]'], {}), '([0.125, 0.5, 1.0])\n', (2445, 2464), True, 'import numpy as np\n'), ((2482, 2521), 'numpy.allclose', 'np.allclose', (['basis_deriv_vals', 'expected'], {}), '(basis_deriv_vals, expected)\n', (2493, 2521), True, 'import numpy as np\n')] |
#-*- coding: utf-8 -*-
import random
import string
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from image import ImageCaptcha
chars = string.digits + string.ascii_lowercase + string.ascii_uppercase
#生成随机验证码文本
def random_captcha_text(char_set=chars, captcha_size=5):
captcha_text = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_text.append(c)
return ''.join(captcha_text)
#验证码数值化
def gen_captcha_text_and_image():
captcha_text = random_captcha_text()
captcha = ImageCaptcha().generate(captcha_text)
#ImageCaptcha().write(captcha_text, captcha_text + '.png')
captcha_image = Image.open(captcha)
captcha_image = np.array(captcha_image)
return captcha_text, captcha_image
if __name__ == '__main__':
while True:
text, image = gen_captcha_text_and_image()
print(text)
print(image.shape)
#原文本显示在左上角
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, text, ha='center', va='center', transform=ax.transAxes)
plt.imshow(image)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"random.choice",
"PIL.Image.open",
"matplotlib.pyplot.figure",
"numpy.array",
"image.ImageCaptcha"
] | [((641, 660), 'PIL.Image.open', 'Image.open', (['captcha'], {}), '(captcha)\n', (651, 660), False, 'from PIL import Image\n'), ((678, 701), 'numpy.array', 'np.array', (['captcha_image'], {}), '(captcha_image)\n', (686, 701), True, 'import numpy as np\n'), ((354, 377), 'random.choice', 'random.choice', (['char_set'], {}), '(char_set)\n', (367, 377), False, 'import random\n'), ((880, 892), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (890, 892), True, 'import matplotlib.pyplot as plt\n'), ((998, 1015), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1008, 1015), True, 'import matplotlib.pyplot as plt\n'), ((1018, 1028), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1026, 1028), True, 'import matplotlib.pyplot as plt\n'), ((525, 539), 'image.ImageCaptcha', 'ImageCaptcha', ([], {}), '()\n', (537, 539), False, 'from image import ImageCaptcha\n')] |
"""
TODO:
-add ground truth steady-state distribution in phi
-determine correct boudnary condition
Trying to apply upwind/downwind to our problem.
The equation I derived is ...see below
"""
import time
#import matplotlib
#import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import solve_ivp
#from scipy.interpolate import interp1d
#from cumsumb import cumsum
import lib.libMotorPDE as lib
#matplotlib.use('TkAgg')
class MotorPDE(object):
def __init__(self,**kwargs):
defaults = {'N':100,
'N2':100,
'dt':.0005,
'U':None,
'alpha':14,
'beta':126,
'zeta':1,
'A0':0,
'A':5,
'B':5.1,
'T':10,
'fn_test_option':'root_cos',
'fn_vel':50,
'store_position':False,
'ivp_method':'euler',
'source':True,
'regularized':False,
'testing_ss':False,
'init_pars':None,
'domain':None,
'irregular':False}
# define defaults if not defined in kwargs
for (prop, default) in defaults.items():
setattr(self, prop, kwargs.get(prop, default))
#print('source type',self.source)
assert(self.A <= self.B)
assert(self.A0 <= self.A)
#assert(self.U is not None)
#self.dx = (self.B-self.A0)/self.N
# center an index at z=A and build from there.
if self.irregular:
x_left = np.linspace(self.A0,self.A,self.N)
x_right = np.linspace(self.A,self.B,self.N2)
#print('xleft,xright',x_left,x_right)
self.x = np.append(x_left,x_right[1:])
self.dx = np.diff(self.x)
#self.dx = np.append(self.dx[-1],self.dx[0])
#self.dx = np.append(self.dx,self.dx[-1])
# note that A_idx is chosen just right of z=A
# this is because the mesh is finer on the right and
# easier to manage.
self.A_idx = len(x_left)
self.B_idx = len(self.x)-1
#print(self.x[self.A_idx])
else:
self.x,self.dx = np.linspace(self.A0,self.B,self.N,
endpoint=False,retstep=True)
# index of A
self.A_idx = np.argmin(np.abs(self.x-(self.A)))
# index of position B
self.B_idx = np.argmin(np.abs(self.x-self.B))
self.idx_full = np.arange(len(self.x))
# indices of all points except appropriate boundary
# [0,1,2,3,4,5,6,7] to [0,1,2,3,4,5,6]
self.idx_except_last = self.idx_full[:-1]
# [0,1,2,3,4,5,6,7] to [1,2,3,4,5,6,7]
self.idx_except_first = self.idx_full[1:]
#self.idx_A2B = self.idx_full[self.A_idx:self.B_idx]
# [0,1,2,3,4,5,6,7] to [1,2,3,4,5,6,7]
self.roll_next = np.roll(self.idx_full,-1)[:-1]
# [0,1,2,3,4,5,6,7] to [0,1,2,3,4,5,6]
self.roll_prev = np.roll(self.idx_full,1)[1:]
self.TN = int(self.T/self.dt) # time discretization
# preallocate output array for upwinding scheme here for efficiency.
self.out = np.zeros_like(self.x)
if not self.store_position and self.ivp_method == 'euler':
TN = 1
else:
TN = self.TN
self.t = np.linspace(0,self.T,TN)
self.sol = np.zeros((TN,len(self.x)))
self.U_arr = np.zeros(TN)
if self.regularized:
# regularized delta function
s = (self.A-self.A0)/self.dx
i = np.floor(s) # floor index of A
r = s-i # distance from floor in index
self.delta_idxs = np.mod(np.arange(i-2,i+1+1,1),self.N)+1
self.delta_idxs = self.delta_idxs.astype(int)
q = np.sqrt(1+4*r*(1-r))
self.ws = np.array([(3-2*r-q)/8,
(3-2*r+q)/8,
(1+2*r+q)/8,
(1+2*r-q)/8])
def boundary_left(self,U,sol):
"""
left boundary condition changes depending on sign of U
U < 0: Dirichlet or do nothing
U > 0: Dirichlet +self.alpha*(1-self.theta)/U
"""
if U < 0:
return 0
elif U > 0:
return self.alpha*(1-self.theta_n)/U
def boundary_right(self,U,sol):
"""
Right boundary condition changes depending on sign of U
U < 0: -self.alpha*(1-self.theta_n)/U
U > 0: phi_x = phi_t
"""
if U < 0:
return -self.alpha*(1-self.theta_n)/U
elif U > 0:
return sol
def run(self):
"""
decides on which integration scheme to use
based on user option (self.ivp_method)
"""
# initial condition
if self.init_pars is None:
self.init = np.zeros_like(self.x)
elif self.init_pars['type'] == 'gaussian':
self.init = lib.gauss(self.x-(self.A0+self.B)/2,
sig=self.init_pars['pars'])
self.sol[0,:] = self.init
if self.ivp_method == 'euler':
self.sol = self.run_euler()
else:
obj_integrated = solve_ivp(self.upwind,[0,self.T],
self.init,args=(self.U,),
t_eval=self.t,
method=self.ivp_method)
self.sol = obj_integrated.y.T
def run_euler(self):
#assert (self.CFL < 1), "CFL condition not met for Euler method"
self.i = 0
while self.i < (self.TN-1):
if self.U == 'dynamic':
# generate population distribution from sol
# draw from population distribution
if np.add.reduce(self.sol[self.i,:]) != 0:
xs = lib.inverse_transform_sampling(self,
self.sol[self.i,:],100)
else:
xs = np.zeros(100)
# get total force
f_up = np.add.reduce(lib.force_position(xs))*self.dx
#print(f_up)
f_down = 0
Uval = (-f_up + f_down)/(self.zeta)
# update velocity
#Uval = self.update_velocity(f_up,f_down,Uval)
#print(Uval)
else:
Uval = self.U
k_next, k_now = lib.get_time_index(self.store_position,self.i)
sol_now = self.sol[k_now,:]
self.sol[k_next,:] = sol_now + self.dt*(self.upwind(self.t[k_now],
sol_now,
Uval))
self.i += 1
return self.sol
def upwind(self,t,sol,U):
"""
Implementation of upwinding scheme to be used in Euler loop
method of lines
"""
if False:
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['text.usetex'] = False
mpl.rcParams['text.latex.preamble'] = r'\usepackage{amsmath}'
fig = plt.figure(figsize=(4, 5))
ax1 = fig.add_subplot(111)
ax1.set_title('input sol to upwind')
ax1.plot(sol)
plt.show()
plt.close()
time.sleep(2)
if callable(U):
Uval = U(t,vel=self.fn_vel,option=self.fn_test_option)
elif isinstance(U,float) or isinstance(U,int):
Uval = U
if self.ivp_method == 'euler' and self.store_position:
self.U_arr[self.i] = Uval
if Uval > 0:
# boundaries
idx_update = self.idx_except_first
if self.irregular:
dx = self.dx[0]
else:
dx = self.dx
self.out[0] = -self.beta*sol[0]-sol[0]*Uval/dx
else:
# boundaries
idx_update = self.idx_except_last
if self.irregular:
dx = self.dx[-1]
else:
dx = self.dx
self.out[-1] = -self.beta*sol[-1]+sol[-1]*Uval/dx
if Uval <= 0:
U_minus = Uval
U_plus = 0
else:
U_minus = 0
U_plus = Uval
if self.irregular:
dx = self.dx
else:
dx = self.dx
p_plus = (sol[self.roll_next]-sol[self.idx_except_last])/dx
p_minus = (sol[self.idx_except_first]-sol[self.roll_prev])/dx
#if Uval > 0:
# print('p_plus',p_plus[-5:])
#print('p_plus',p_plus[-5:])
# update derivatives
wind = p_plus*U_minus + p_minus*U_plus
#print(self.i,'plus,minus',wind,U_minus,U_plus)
self.out[idx_update] = -self.beta*(sol[idx_update]) - wind
#print(dx,self.dx,self.irregular,self.alpha*(1-self.theta_n)/dx)
#print()
#print(self.out[self.A_idx])
if self.irregular:
self.theta_n = np.add.reduce(sol[:-1]*self.dx)
else:
self.theta_n = np.add.reduce(sol)*self.dx
#assert((self.theta_n <= 1) and (self.theta_n >= 0))
#print('thetan',self.theta_n)
# update input
if (self.source == True or self.source == 'motor')\
and self.regularized == False:
if self.irregular:
dx = self.dx[self.A_idx]
else:
dx = self.dx
#print(dx,self.dx,self.irregular,self.alpha*(1-self.theta_n)/dx)
#print()
#print(self.out[self.A_idx])
self.out[self.A_idx] += self.alpha*(1-self.theta_n)/dx
#print('out@A_idx',self.out[self.A_idx])
#print(self.out[self.A_idx],dx,self.alpha,(1-self.theta_n))
elif (self.source == True or self.source == 'motor')\
and self.regularized == True:
if self.irregular:
dx = self.dx[self.delta_idxs]
else:
dx = self.dx
self.out[self.delta_idxs] += self.ws*self.alpha*(1-self.theta_n)/dx
# Gaussian source
#sig = .3
#k = self.alpha*(1-self.theta_n)/(sig*np.sqrt(2*np.pi))
#out[self.idx_full] += k*np.exp(-0.5*(self.x-self.A)**2/sig**2)
elif callable(self.source) and self.regularized == True:
if self.irregular:
dx = self.dx[self.A_idx]
else:
dx = self.dx
self.out[self.delta_idxs] += self.ws*self.source(t)/dx
elif callable(self.source) and self.regularized == False:
if self.irregular:
dx = self.dx[self.A_idx]
else:
dx = self.dx
self.out[self.A_idx] += self.source(t)/dx
elif self.source == False:
pass
else:
raise ValueError('Unrecognized source option',self.source,
self.regularized)
#elif self.source == 'regularized_custom':
# #print(self.source(t))
# out[self.delta_idxs] += self.ws*self.source(t)/self.dx
#if Uval > 0:
# print('out',self.out[-5:])
return self.out
def main():
pass
if __name__ == "__main__":
main()
| [
"numpy.abs",
"numpy.floor",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.zeros_like",
"numpy.add.reduce",
"matplotlib.pyplot.close",
"scipy.integrate.solve_ivp",
"numpy.append",
"numpy.linspace",
"lib.libMotorPDE.gauss",
"lib.libMotorPDE.get_time_index",
"matplotlib.pyplot.show",
"nu... | [((3417, 3438), 'numpy.zeros_like', 'np.zeros_like', (['self.x'], {}), '(self.x)\n', (3430, 3438), True, 'import numpy as np\n'), ((3591, 3617), 'numpy.linspace', 'np.linspace', (['(0)', 'self.T', 'TN'], {}), '(0, self.T, TN)\n', (3602, 3617), True, 'import numpy as np\n'), ((3683, 3695), 'numpy.zeros', 'np.zeros', (['TN'], {}), '(TN)\n', (3691, 3695), True, 'import numpy as np\n'), ((1710, 1746), 'numpy.linspace', 'np.linspace', (['self.A0', 'self.A', 'self.N'], {}), '(self.A0, self.A, self.N)\n', (1721, 1746), True, 'import numpy as np\n'), ((1767, 1803), 'numpy.linspace', 'np.linspace', (['self.A', 'self.B', 'self.N2'], {}), '(self.A, self.B, self.N2)\n', (1778, 1803), True, 'import numpy as np\n'), ((1875, 1905), 'numpy.append', 'np.append', (['x_left', 'x_right[1:]'], {}), '(x_left, x_right[1:])\n', (1884, 1905), True, 'import numpy as np\n'), ((1927, 1942), 'numpy.diff', 'np.diff', (['self.x'], {}), '(self.x)\n', (1934, 1942), True, 'import numpy as np\n'), ((2371, 2437), 'numpy.linspace', 'np.linspace', (['self.A0', 'self.B', 'self.N'], {'endpoint': '(False)', 'retstep': '(True)'}), '(self.A0, self.B, self.N, endpoint=False, retstep=True)\n', (2382, 2437), True, 'import numpy as np\n'), ((3118, 3144), 'numpy.roll', 'np.roll', (['self.idx_full', '(-1)'], {}), '(self.idx_full, -1)\n', (3125, 3144), True, 'import numpy as np\n'), ((3222, 3247), 'numpy.roll', 'np.roll', (['self.idx_full', '(1)'], {}), '(self.idx_full, 1)\n', (3229, 3247), True, 'import numpy as np\n'), ((3824, 3835), 'numpy.floor', 'np.floor', (['s'], {}), '(s)\n', (3832, 3835), True, 'import numpy as np\n'), ((4076, 4104), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * r * (1 - r))'], {}), '(1 + 4 * r * (1 - r))\n', (4083, 4104), True, 'import numpy as np\n'), ((4119, 4217), 'numpy.array', 'np.array', (['[(3 - 2 * r - q) / 8, (3 - 2 * r + q) / 8, (1 + 2 * r + q) / 8, (1 + 2 * r -\n q) / 8]'], {}), '([(3 - 2 * r - q) / 8, (3 - 2 * r + q) / 8, (1 + 2 * r + q) / 8, (1 +\n 2 * r - q) / 8])\n', (4127, 4217), True, 'import numpy as np\n'), ((5211, 5232), 'numpy.zeros_like', 'np.zeros_like', (['self.x'], {}), '(self.x)\n', (5224, 5232), True, 'import numpy as np\n'), ((5583, 5689), 'scipy.integrate.solve_ivp', 'solve_ivp', (['self.upwind', '[0, self.T]', 'self.init'], {'args': '(self.U,)', 't_eval': 'self.t', 'method': 'self.ivp_method'}), '(self.upwind, [0, self.T], self.init, args=(self.U,), t_eval=self.\n t, method=self.ivp_method)\n', (5592, 5689), False, 'from scipy.integrate import solve_ivp\n'), ((6935, 6982), 'lib.libMotorPDE.get_time_index', 'lib.get_time_index', (['self.store_position', 'self.i'], {}), '(self.store_position, self.i)\n', (6953, 6982), True, 'import lib.libMotorPDE as lib\n'), ((7746, 7772), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 5)'}), '(figsize=(4, 5))\n', (7756, 7772), True, 'import matplotlib.pyplot as plt\n'), ((7925, 7935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7933, 7935), True, 'import matplotlib.pyplot as plt\n'), ((7948, 7959), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7957, 7959), True, 'import matplotlib.pyplot as plt\n'), ((7972, 7985), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7982, 7985), False, 'import time\n'), ((9725, 9758), 'numpy.add.reduce', 'np.add.reduce', (['(sol[:-1] * self.dx)'], {}), '(sol[:-1] * self.dx)\n', (9738, 9758), True, 'import numpy as np\n'), ((2537, 2560), 'numpy.abs', 'np.abs', (['(self.x - self.A)'], {}), '(self.x - self.A)\n', (2543, 2560), True, 'import numpy as np\n'), ((2632, 2655), 'numpy.abs', 'np.abs', (['(self.x - self.B)'], {}), '(self.x - self.B)\n', (2638, 2655), True, 'import numpy as np\n'), ((5309, 5379), 'lib.libMotorPDE.gauss', 'lib.gauss', (['(self.x - (self.A0 + self.B) / 2)'], {'sig': "self.init_pars['pars']"}), "(self.x - (self.A0 + self.B) / 2, sig=self.init_pars['pars'])\n", (5318, 5379), True, 'import lib.libMotorPDE as lib\n'), ((9798, 9816), 'numpy.add.reduce', 'np.add.reduce', (['sol'], {}), '(sol)\n', (9811, 9816), True, 'import numpy as np\n'), ((3969, 3999), 'numpy.arange', 'np.arange', (['(i - 2)', '(i + 1 + 1)', '(1)'], {}), '(i - 2, i + 1 + 1, 1)\n', (3978, 3999), True, 'import numpy as np\n'), ((6198, 6232), 'numpy.add.reduce', 'np.add.reduce', (['self.sol[self.i, :]'], {}), '(self.sol[self.i, :])\n', (6211, 6232), True, 'import numpy as np\n'), ((6263, 6325), 'lib.libMotorPDE.inverse_transform_sampling', 'lib.inverse_transform_sampling', (['self', 'self.sol[self.i, :]', '(100)'], {}), '(self, self.sol[self.i, :], 100)\n', (6293, 6325), True, 'import lib.libMotorPDE as lib\n'), ((6427, 6440), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (6435, 6440), True, 'import numpy as np\n'), ((6529, 6551), 'lib.libMotorPDE.force_position', 'lib.force_position', (['xs'], {}), '(xs)\n', (6547, 6551), True, 'import lib.libMotorPDE as lib\n')] |
"""
Integration using Scipy-provided tool
ODEs in the system go as follows: first all coordinate (x, y, z) equations in the order of body_config, then all velocity (vx, vy, vz) ones.
"""
import time
from math import sqrt
import numpy as np
import scipy.integrate
from scipy.constants import G
from ..common import SystemState, GlobalConfig
from .. import common
METHODS = ('RK45', 'RK23', 'DOP853', 'Radau', 'BDF', 'LSODA')
def initial_condition(body_cfg : SystemState):
y0 = []
body_num = len(body_cfg.names)
# First, set coordinates
for body_idx in range(body_num):
y0.extend(list(body_cfg.r[body_idx]))
# Set velocities
for body_idx in range(body_num):
y0.extend(list(body_cfg.v[body_idx]))
return y0
def get_r_from_y(y, body_idx, body_num):
assert body_idx < body_num
start_idx = body_idx*3
stop_idx = (body_idx+1)*3
return [y[i] for i in range(start_idx, stop_idx)]
def get_v_from_y(y, body_idx, body_num):
assert body_idx < body_num
start_idx = body_num*3 + body_idx*3
stop_idx = body_num*3 + (body_idx+1)*3
return [y[i] for i in range(start_idx, stop_idx)]
def norm3(r):
return sqrt(r[0]**2 + r[1]**2 + r[2]**2)**3
def f(t, y, m):
f_val = []
body_num = len(m)
# Coordinate ODEs
for body_idx in range(body_num):
f_val.extend(get_v_from_y(y, body_idx, body_num))
# Velocity ODEs
for body_idx in range(body_num):
my_r = np.array(get_r_from_y(y, body_idx, body_num))
accel = np.zeros(3, dtype=np.float64)
for other_body_idx in range(body_num):
if other_body_idx == body_idx:
continue
other_r = np.array(get_r_from_y(y, other_body_idx, body_num))
accel += m[other_body_idx] * (other_r - my_r) / norm3(other_r - my_r)
f_val.extend(G*accel)
return f_val
def get_state(integration_result, initial_body_cfg, iter_idx : int):
body_num = len(initial_body_cfg.names)
y_t = integration_result.y[:, iter_idx - 1]
r, v = [], []
for body_idx in range(body_num):
r.append(get_r_from_y(y_t, body_idx, body_num))
v.append(get_v_from_y(y_t, body_idx, body_num))
r, v = np.array(r), np.array(v)
return SystemState(names=initial_body_cfg.names, m=initial_body_cfg.m, r=r, v=v)
def simulate(run_name : str, global_config : GlobalConfig, body_config : SystemState) -> None:
t_span = (0, global_config.dt*global_config.iter_num)
t_eval = np.linspace(global_config.dt, global_config.dt*global_config.iter_num, num=global_config.iter_num)
y0 = initial_condition(body_config)
assert global_config.method in METHODS
t0 = time.time()
result = scipy.integrate.solve_ivp(f, t_span, y0, method=global_config.method, t_eval=t_eval, args=(body_config.m,), first_step=global_config.dt)
# result = scipy.integrate.solve_ivp(f, t_span, y0, method=global_config.method, t_eval=t_eval, args=(body_config.m,))
print('Integration time: {:.3} s'.format(time.time() - t0))
print(result)
assert result.success
# Write down output
for do_write, iter_idx in common.get_iter_indices(global_config.iter_num, global_config.output_point_num):
if do_write:
state = get_state(result, body_config, iter_idx)
common.write_body_config(run_name, state, iter_idx)
| [
"math.sqrt",
"numpy.zeros",
"time.time",
"numpy.array",
"numpy.linspace"
] | [((2494, 2598), 'numpy.linspace', 'np.linspace', (['global_config.dt', '(global_config.dt * global_config.iter_num)'], {'num': 'global_config.iter_num'}), '(global_config.dt, global_config.dt * global_config.iter_num,\n num=global_config.iter_num)\n', (2505, 2598), True, 'import numpy as np\n'), ((2687, 2698), 'time.time', 'time.time', ([], {}), '()\n', (2696, 2698), False, 'import time\n'), ((1180, 1219), 'math.sqrt', 'sqrt', (['(r[0] ** 2 + r[1] ** 2 + r[2] ** 2)'], {}), '(r[0] ** 2 + r[1] ** 2 + r[2] ** 2)\n', (1184, 1219), False, 'from math import sqrt\n'), ((1525, 1554), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (1533, 1554), True, 'import numpy as np\n'), ((2215, 2226), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (2223, 2226), True, 'import numpy as np\n'), ((2228, 2239), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2236, 2239), True, 'import numpy as np\n'), ((3017, 3028), 'time.time', 'time.time', ([], {}), '()\n', (3026, 3028), False, 'import time\n')] |
import os
import logging
from os.path import join as opj
import numpy as np
from tempfile import TemporaryDirectory
import rasterio
from ost.helpers import vector as vec, utils as h
logger = logging.getLogger(__name__)
def mosaic(
filelist,
outfile,
cut_to_aoi=False
):
check_file = opj(
os.path.dirname(outfile), '.{}.processed'.format(os.path.basename(outfile)[:-4])
)
logfile = opj(
os.path.dirname(outfile), '{}.errLog'.format(os.path.basename(outfile)[:-4])
)
with rasterio.open(filelist.split(' ')[0]) as src:
dtype = src.meta['dtype']
dtype = 'float' if dtype == 'float32' else dtype
with TemporaryDirectory() as temp_dir:
if cut_to_aoi:
tempfile = opj(temp_dir, os.path.basename(outfile))
else:
tempfile = outfile
cmd = ('otbcli_Mosaic -ram 4096'
' -progress 1'
' -comp.feather large'
' -harmo.method band'
' -harmo.cost rmse'
' -temp_dir {}'
' -il {}'
' -out {} {}'.format(temp_dir, filelist,
tempfile, dtype)
)
return_code = h.run_command(cmd, logfile)
if return_code != 0:
if os.path.isfile(tempfile):
os.remove(tempfile)
return
if cut_to_aoi:
# get aoi ina way rasterio wants it
features = vec.gdf_to_json_geometry(vec.wkt_to_gdf(cut_to_aoi))
# import raster and mask
with rasterio.open(tempfile) as src:
out_image, out_transform = rasterio.mask.mask(src, features, crop=True)
out_meta = src.meta.copy()
ndv = src.nodata
out_image = np.ma.masked_where(out_image == ndv, out_image)
out_meta.update({'driver': 'GTiff', 'height': out_image.shape[1],
'width': out_image.shape[2], 'transform': out_transform,
'tiled': True, 'blockxsize': 128, 'blockysize': 128})
with rasterio.open(outfile, 'w', **out_meta) as dest:
dest.write(out_image.data)
# remove intermediate file
os.remove(tempfile)
# check
return_code = h.check_out_tiff(outfile)
if return_code != 0:
if os.path.isfile(outfile):
os.remove(outfile)
# write file, so we know this ts has been succesfully processed
if return_code == 0:
with open(str(check_file), 'w') as file:
file.write('passed all tests \n')
| [
"ost.helpers.vector.wkt_to_gdf",
"os.remove",
"rasterio.open",
"tempfile.TemporaryDirectory",
"numpy.ma.masked_where",
"os.path.basename",
"os.path.dirname",
"ost.helpers.utils.run_command",
"os.path.isfile",
"ost.helpers.utils.check_out_tiff",
"rasterio.mask.mask",
"logging.getLogger"
] | [((193, 220), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (210, 220), False, 'import logging\n'), ((2262, 2287), 'ost.helpers.utils.check_out_tiff', 'h.check_out_tiff', (['outfile'], {}), '(outfile)\n', (2278, 2287), True, 'from ost.helpers import vector as vec, utils as h\n'), ((329, 353), 'os.path.dirname', 'os.path.dirname', (['outfile'], {}), '(outfile)\n', (344, 353), False, 'import os\n'), ((444, 468), 'os.path.dirname', 'os.path.dirname', (['outfile'], {}), '(outfile)\n', (459, 468), False, 'import os\n'), ((683, 703), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (701, 703), False, 'from tempfile import TemporaryDirectory\n'), ((1236, 1263), 'ost.helpers.utils.run_command', 'h.run_command', (['cmd', 'logfile'], {}), '(cmd, logfile)\n', (1249, 1263), True, 'from ost.helpers import vector as vec, utils as h\n'), ((2211, 2230), 'os.remove', 'os.remove', (['tempfile'], {}), '(tempfile)\n', (2220, 2230), False, 'import os\n'), ((2324, 2347), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (2338, 2347), False, 'import os\n'), ((1308, 1332), 'os.path.isfile', 'os.path.isfile', (['tempfile'], {}), '(tempfile)\n', (1322, 1332), False, 'import os\n'), ((1498, 1524), 'ost.helpers.vector.wkt_to_gdf', 'vec.wkt_to_gdf', (['cut_to_aoi'], {}), '(cut_to_aoi)\n', (1512, 1524), True, 'from ost.helpers import vector as vec, utils as h\n'), ((1573, 1596), 'rasterio.open', 'rasterio.open', (['tempfile'], {}), '(tempfile)\n', (1586, 1596), False, 'import rasterio\n'), ((1644, 1688), 'rasterio.mask.mask', 'rasterio.mask.mask', (['src', 'features'], {'crop': '(True)'}), '(src, features, crop=True)\n', (1662, 1688), False, 'import rasterio\n'), ((1781, 1828), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(out_image == ndv)', 'out_image'], {}), '(out_image == ndv, out_image)\n', (1799, 1828), True, 'import numpy as np\n'), ((2079, 2118), 'rasterio.open', 'rasterio.open', (['outfile', '"""w"""'], {}), "(outfile, 'w', **out_meta)\n", (2092, 2118), False, 'import rasterio\n'), ((2361, 2379), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (2370, 2379), False, 'import os\n'), ((378, 403), 'os.path.basename', 'os.path.basename', (['outfile'], {}), '(outfile)\n', (394, 403), False, 'import os\n'), ((489, 514), 'os.path.basename', 'os.path.basename', (['outfile'], {}), '(outfile)\n', (505, 514), False, 'import os\n'), ((777, 802), 'os.path.basename', 'os.path.basename', (['outfile'], {}), '(outfile)\n', (793, 802), False, 'import os\n'), ((1350, 1369), 'os.remove', 'os.remove', (['tempfile'], {}), '(tempfile)\n', (1359, 1369), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
from wisdem.ccblade.ccblade import CCAirfoil, CCBlade
plot_flag = False
# geometry
Rhub = 1.5
Rtip = 63.0
r = np.array(
[
2.8667,
5.6000,
8.3333,
11.7500,
15.8500,
19.9500,
24.0500,
28.1500,
32.2500,
36.3500,
40.4500,
44.5500,
48.6500,
52.7500,
56.1667,
58.9000,
61.6333,
]
)
chord = np.array(
[
3.542,
3.854,
4.167,
4.557,
4.652,
4.458,
4.249,
4.007,
3.748,
3.502,
3.256,
3.010,
2.764,
2.518,
2.313,
2.086,
1.419,
]
)
theta = np.array(
[
13.308,
13.308,
13.308,
13.308,
11.480,
10.162,
9.011,
7.795,
6.544,
5.361,
4.188,
3.125,
2.319,
1.526,
0.863,
0.370,
0.106,
]
)
B = 3 # number of blades
tilt = 5.0
precone = 2.5
yaw = 0.0
nSector = 8 # azimuthal discretization
# atmosphere
rho = 1.225
mu = 1.81206e-5
# power-law wind shear profile
shearExp = 0.2
hubHt = 90.0
afinit = CCAirfoil.initFromAerodynFile # just for shorthand
# load all airfoils
airfoil_types = [0] * 8
airfoil_types[0] = afinit("../_airfoil_files/Cylinder1.dat")
airfoil_types[1] = afinit("../_airfoil_files/Cylinder2.dat")
airfoil_types[2] = afinit("../_airfoil_files/DU40_A17.dat")
airfoil_types[3] = afinit("../_airfoil_files/DU35_A17.dat")
airfoil_types[4] = afinit("../_airfoil_files/DU30_A17.dat")
airfoil_types[5] = afinit("../_airfoil_files/DU25_A17.dat")
airfoil_types[6] = afinit("../_airfoil_files/DU21_A17.dat")
airfoil_types[7] = afinit("../_airfoil_files/NACA64_A17.dat")
# place at appropriate radial stations
af_idx = [0, 0, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 7, 7, 7, 7]
af = [0] * len(r)
for i in range(len(r)):
af[i] = airfoil_types[af_idx[i]]
# 1 ----------
precone = 0.0
precurve = np.linspace(0, 4.9, len(r)) ** 2
precurveTip = 25.0
# create CCBlade object
rotor = CCBlade(
r,
chord,
theta,
af,
Rhub,
Rtip,
B,
rho,
mu,
precone,
tilt,
yaw,
shearExp,
hubHt,
nSector,
precurve=precurve,
precurveTip=precurveTip,
)
# 1 ----------
if plot_flag:
plt.plot(precurve, r, "k")
plt.plot(precurve, -r, "k")
plt.axis("equal")
plt.grid()
plt.savefig("rotorshape.pdf")
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"wisdem.ccblade.ccblade.CCBlade",
"numpy.array",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] | [((166, 303), 'numpy.array', 'np.array', (['[2.8667, 5.6, 8.3333, 11.75, 15.85, 19.95, 24.05, 28.15, 32.25, 36.35, \n 40.45, 44.55, 48.65, 52.75, 56.1667, 58.9, 61.6333]'], {}), '([2.8667, 5.6, 8.3333, 11.75, 15.85, 19.95, 24.05, 28.15, 32.25, \n 36.35, 40.45, 44.55, 48.65, 52.75, 56.1667, 58.9, 61.6333])\n', (174, 303), True, 'import numpy as np\n'), ((484, 617), 'numpy.array', 'np.array', (['[3.542, 3.854, 4.167, 4.557, 4.652, 4.458, 4.249, 4.007, 3.748, 3.502, \n 3.256, 3.01, 2.764, 2.518, 2.313, 2.086, 1.419]'], {}), '([3.542, 3.854, 4.167, 4.557, 4.652, 4.458, 4.249, 4.007, 3.748, \n 3.502, 3.256, 3.01, 2.764, 2.518, 2.313, 2.086, 1.419])\n', (492, 617), True, 'import numpy as np\n'), ((771, 909), 'numpy.array', 'np.array', (['[13.308, 13.308, 13.308, 13.308, 11.48, 10.162, 9.011, 7.795, 6.544, 5.361,\n 4.188, 3.125, 2.319, 1.526, 0.863, 0.37, 0.106]'], {}), '([13.308, 13.308, 13.308, 13.308, 11.48, 10.162, 9.011, 7.795, \n 6.544, 5.361, 4.188, 3.125, 2.319, 1.526, 0.863, 0.37, 0.106])\n', (779, 909), True, 'import numpy as np\n'), ((2163, 2309), 'wisdem.ccblade.ccblade.CCBlade', 'CCBlade', (['r', 'chord', 'theta', 'af', 'Rhub', 'Rtip', 'B', 'rho', 'mu', 'precone', 'tilt', 'yaw', 'shearExp', 'hubHt', 'nSector'], {'precurve': 'precurve', 'precurveTip': 'precurveTip'}), '(r, chord, theta, af, Rhub, Rtip, B, rho, mu, precone, tilt, yaw,\n shearExp, hubHt, nSector, precurve=precurve, precurveTip=precurveTip)\n', (2170, 2309), False, 'from wisdem.ccblade.ccblade import CCAirfoil, CCBlade\n'), ((2412, 2438), 'matplotlib.pyplot.plot', 'plt.plot', (['precurve', 'r', '"""k"""'], {}), "(precurve, r, 'k')\n", (2420, 2438), True, 'import matplotlib.pyplot as plt\n'), ((2443, 2470), 'matplotlib.pyplot.plot', 'plt.plot', (['precurve', '(-r)', '"""k"""'], {}), "(precurve, -r, 'k')\n", (2451, 2470), True, 'import matplotlib.pyplot as plt\n'), ((2475, 2492), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2483, 2492), True, 'import matplotlib.pyplot as plt\n'), ((2497, 2507), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2505, 2507), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2541), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rotorshape.pdf"""'], {}), "('rotorshape.pdf')\n", (2523, 2541), True, 'import matplotlib.pyplot as plt\n'), ((2546, 2556), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2554, 2556), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from rdkit import Chem
# bond mapping
bond_dict = {'SINGLE': 0, 'DOUBLE': 1, 'TRIPLE': 2, "AROMATIC": 3}
number_to_bond = {0: Chem.rdchem.BondType.SINGLE,
1: Chem.rdchem.BondType.DOUBLE,
2: Chem.rdchem.BondType.TRIPLE,
3: Chem.rdchem.BondType.AROMATIC}
bond_dir_dict = {'NONE': 0,
'BEGINWEDGE': 1,
'BEGINDASH': 2,
'ENDDOWNRIGHT': 3,
'ENDUPRIGHT': 4,
'EITHERDOUBLE': 5,
'UNKNOWN': 6}
number_to_bond_dir = {0: Chem.rdchem.BondDir.NONE,
1: Chem.rdchem.BondDir.BEGINWEDGE,
2: Chem.rdchem.BondDir.BEGINDASH,
3: Chem.rdchem.BondDir.ENDDOWNRIGHT,
4: Chem.rdchem.BondDir.ENDUPRIGHT,
5: Chem.rdchem.BondDir.EITHERDOUBLE,
6: Chem.rdchem.BondDir.UNKNOWN}
chi_dict = {'CHI_UNSPECIFIED': 0,
'CHI_TETRAHEDRAL_CW': 1,
'CHI_TETRAHEDRAL_CCW': 2,
'CHI_OTHER': 3}
number_to_chi = {0: Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
1: Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
2: Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
3: Chem.rdchem.ChiralType.CHI_OTHER}
def dataset_info(dataset):
if dataset == 'qm9':
values = {'atom_types': ["H", "C", "N", "O", "F"],
'maximum_valence': {0: 1, 1: 4, 2: 3, 3: 2, 4: 1},
'hist_dim': 4,
'max_valence_value': 9,
'max_n_atoms': 30,
'number_to_atom': {0: "H", 1: "C", 2: "N", 3: "O", 4: "F"},
'bucket_sizes': np.array(list(range(4, 28, 2)) + [29]),
# 'n_edges': [6788282, 1883788, 222444, 63914],
# 'n_nodes': [0, 729895, 120522, 161809, 2828],
'n_edges': [3, 1, 1, 1],
'n_nodes': [1, 1, 1, 1, 1],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'qm9_ev':
values = {'atom_types': ["C1", "N1", "N4", "F1", "N3", "C3", "C4", "O2", "O4",
"N2", "C2", "O1", "O3"],
'maximum_valence': {0: 1, 1: 1, 2: 4, 3: 1, 4: 3, 5: 3, 6: 4, 7: 2, 8: 4,
9: 2, 10: 2, 11: 1, 12: 3},
'hist_dim': 4,
'max_valence_value': 9,
'max_n_atoms': 30,
'number_to_atom': {0: "C", 1: "N", 2: "N", 3: "F", 4: "N", 5: "C", 6: "C", 7: "O", 8: "O",
9: "N", 10: "C", 11: "O", 12: "O"},
'bucket_sizes': np.array(list(range(4, 28, 2)) + [29]),
'n_edges': [6788282, 1883788, 222444, 63914],
'n_nodes': [99645, 11763, 19003, 2828, 55743, 265029, 166614, 116325, 0,
34013, 198607, 45483, 1],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'qm9_ev2':
values = {'atom_types': ["C1(0)", "N1(0)", "N4(1)", "F1(0)", "N3(0)", "C3(0)", "C4(0)", "O2(0)",
"N2(0)", "C2(0)", "O1(0)", "N2(-1)", "C4(1)", "C3(1)", "C3(-1)", "O1(-1)",
"N3(1)", "O3(1)"],
'maximum_valence': {0: 1, 1: 1, 2: 4, 3: 1, 4: 3, 5: 3, 6: 4, 7: 2,
8: 2, 9: 2, 10: 1, 11: 2, 12: 4, 13: 3, 14: 3, 15: 1,
16: 1, 17: 3},
'hist_dim': 4,
'max_valence_value': 9,
'max_n_atoms': 30,
'number_to_atom': {0: "C", 1: "N", 2: "N", 3: "F", 4: "N", 5: "C", 6: "C", 7: "O",
8: "N", 9: "C", 10: "O", 11: "N", 12: "C", 13: "C", 14: "C", 15: "O",
16: "N", 17: "O"},
'bucket_sizes': np.array(list(range(4, 28, 2)) + [29]),
'n_edges': [6788282, 1883788, 222444, 63914],
'n_nodes': [99645, 11763, 19003, 2828, 55738, 260947, 166171, 116325,
26110, 198607, 45188, 7903, 443, 337, 705, 295,
5, 1],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'qm9_long':
values = {'atom_types': ["C4(0)", "N3(0)", "N2(-1)", "O2(0)", "F1(0)", "C3(-1)", "N4(1)", "C4(1)", "C3(1)",
"O1(-1)", "N3(1)", "C2(0)", "O3(1)"],
'maximum_valence': {0: 4, 1: 3, 2: 2, 3: 2, 4: 1, 5: 3, 6: 4, 7: 4, 8: 3,
9: 1, 10: 3, 11: 2, 12: 3},
'hist_dim': 4,
'max_valence_value': 9,
'max_n_atoms': 30,
'number_to_atom': {0: "C", 1: "N", 2: "N", 3: "O", 4: "F", 5: "C", 6: "N", 7: "C", 8: "C",
9: "O", 10: "N", 11: "C", 12: "O"},
'bucket_sizes': np.array(list(range(4, 28, 2)) + [29]),
'n_edges': [6788282, 1883788, 222444, 63914],
'n_nodes': [725369, 93611, 7903, 161513, 2828, 705, 19003, 443, 3377,
295, 5, 1, 1],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'qm9_long2':
values = {
'atom_types': ["C4(0)0", "N3(0)0", "N2(-1)0", "O2(0)0", "F1(0)0", "C3(-1)0", "N4(1)0", "C4(1)0", "C3(1)0",
"O1(-1)0", "N3(1)0", "C2(0)0", "O3(1)0", "C4(0)1"],
'maximum_valence': {0: 4, 1: 3, 2: 2, 3: 2, 4: 1, 5: 3, 6: 4, 7: 4, 8: 3,
9: 1, 10: 3, 11: 2, 12: 3, 13: 4},
'hist_dim': 4,
'max_valence_value': 9,
'max_n_atoms': 30,
'number_to_atom': {0: "C", 1: "N", 2: "N", 3: "O", 4: "F", 5: "C", 6: "N", 7: "C", 8: "C",
9: "O", 10: "N", 11: "C", 12: "O", 13: "C"},
'bucket_sizes': np.array(list(range(4, 28, 2)) + [29]),
# 'n_edges': [6788282, 1883788, 222444, 63914],
# 'n_nodes': [725365, 93611, 7903, 161513, 2828, 705, 19003, 443, 3377,
# 295, 5, 1, 1, 4],
'n_edges': [3, 1, 1, 1],
'n_nodes': [1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'zinc':
values = {'atom_types': ["H", "C", "N", "O", "F", "S", "Cl", "Br", "I"],
'maximum_valence': {0: 1, 1: 4, 2: 3, 3: 2, 4: 1, 5: 6, 6: 7, 7: 5, 8: 7},
'hist_dim': 7,
'max_valence_value': 34,
'max_n_atoms': 85,
'number_to_atom': {0: "H", 1: "C", 2: "N", 3: "O", 4: "F", 5: "S", 6: "Cl", 7: "Br", 8: "I"},
'bucket_sizes': np.array(
[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 58,
84]),
# 'n_edges': [111623688, 8153922, 2791900, 27394],
# 'n_nodes': [0, 3764414, 624130, 509483, 70097, 90962, 90962, 11220, 800],
'n_edges': [3, 1, 1, 1],
'n_nodes': [1, 1, 1, 1, 1, 1, 1, 1, 1],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'zinc_ev':
values = {'atom_types': ["C1", "N1", "N4", "F1", "N3", "C3", "C4", "O2", "O4", "N2", "C2", "O1",
"O3", "S6", "S2", "Br1", "Cl1", "S4", "I1",
"S1", "S3"],
'maximum_valence': {0: 1, 1: 1, 2: 4, 3: 1, 4: 3, 5: 3, 6: 4, 7: 2, 8: 4, 9: 2, 10: 2, 11: 1,
12: 3, 13: 6, 14: 2, 15: 1, 16: 1, 17: 4, 18: 1,
19: 1, 20: 3},
'hist_dim': 6,
'max_valence_value': 34, # used in hist
'max_n_atoms': 85,
'number_to_atom': {0: "C", 1: "N", 2: "N", 3: "F", 4: "N", 5: "C", 6: "C", 7: "O", 8: "O",
9: "N", 10: "C", 11: "O", 12: "O", 13: "S", 14: "S", 15: "Br", 16: "Cl", 17: "S",
18: "I",
19: "S", 20: "S"},
'bucket_sizes': np.array(
[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 58,
84]),
'n_edges': [6788282, 1883788, 222444, 63914],
'n_nodes': [390455, 15355, 68143, 70045, 376805, 1212178, 1327055, 461739, 0, 163746, 834636, 47802,
16, 24669, 63488, 11224, 37885, 1995, 783,
805, 5],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'zinc_ev2':
values = {'atom_types': ["C1(0)", "N1(0)", "N4(1)", "F1(0)", "N3(0)", "C3(0)", "C4(0)", "O2(0)",
"N2(0)", "C2(0)", "O1(0)", "O3(1)", "S6(0)", "S2(0)", "Br1(0)", "Cl1(0)",
"S4(0)", "I1(0)", "S1(0)", "S3(1)", "O1(-1)", "N2(-1)", "S1(-1)", "C3(-1)"],
'maximum_valence': {0: 1, 1: 1, 2: 4, 3: 1, 4: 3, 5: 3, 6: 4, 7: 2,
8: 2, 9: 2, 10: 1, 11: 3, 12: 6, 13: 2, 14: 1, 15: 1,
16: 4, 17: 1, 18: 1, 19: 3, 20: 1, 21: 2, 22: 1, 23: 3},
'hist_dim': 6,
'max_valence_value': 34, # used in hist
'max_n_atoms': 85,
'number_to_atom': {0: "C", 1: "N", 2: "N", 3: "F", 4: "N", 5: "C", 6: "C", 7: "O",
8: "N", 9: "C", 10: "O", 11: "O", 12: "S", 13: "S", 14: "Br", 15: "Cl", 16: "S",
17: "I",
18: "S", 19: "S", 20: "O", 21: "N", 22: "S", 23: "C"},
'bucket_sizes': np.array(
[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 58,
84]),
'n_edges': [6788282, 1883788, 222444, 63914],
'n_nodes': [389601, 15334, 68041, 70003, 376904, 1212826, 1326480, 461336,
162405, 834993, 26444, 13, 24696, 63532, 11265, 37927,
1983, 803, 420, 6, 21380, 1362, 403, 3],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'zinc_long':
values = {
'atom_types': ['Br1(0)', 'C4(0)', 'Cl1(0)', 'F1(0)', 'H1(0)', 'I1(0)', 'N2(-1)', 'N3(0)', 'N4(1)', 'O1(-1)',
'O2(0)', 'S2(0)', 'S4(0)', 'S6(0)'],
'maximum_valence': {0: 1, 1: 4, 2: 1, 3: 1, 4: 1, 5: 1, 6: 2, 7: 3, 8: 4, 9: 1,
10: 2, 11: 2, 12: 4, 13: 6},
'hist_dim': 6,
'max_valence_value': 34,
'max_n_atoms': 85,
'number_to_atom': {0: 'Br', 1: 'C', 2: 'Cl', 3: 'F', 4: 'H', 5: 'I', 6: 'N', 7: 'N', 8: 'N', 9: 'O',
10: 'O', 11: 'S', 12: 'S', 13: 'S'},
'bucket_sizes': np.array(
[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 58,
84]),
'n_edges': [111623688, 8153922, 2791900, 27394],
'n_nodes': [11251, 3758488, 37721, 70303, 0, 799, 1337, 553583, 67890, 21442,
487609, 63815, 1980, 24630],
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'zinc_long2':
values = {
'atom_types': ['Br1(0)0', 'C4(0)0', 'Cl1(0)0', 'F1(0)0', 'H1(0)0', 'I1(0)0', 'N2(-1)0', 'N3(0)0', 'N4(1)0',
'O1(-1)0',
'O2(0)0', 'S2(0)0', 'S4(0)0', 'S6(0)0', "C4(0)1", "C4(0)2", 'S4(0)2', 'S1(-1)0', 'S4(0)1',
"O3(1)0", 'S6(0)2', "P5(0)0", "P5(0)1", "P4(1)0", "S3(1)0", "C3(-1)0", "P5(0)2", "P3(0)0",
"S6(0)1", "S3(1)1"],
'maximum_valence': {0: 1, 1: 4, 2: 1, 3: 1, 4: 1, 5: 1, 6: 2, 7: 3, 8: 4, 9: 1,
10: 2, 11: 2, 12: 4, 13: 6, 14: 4, 15: 4, 16: 4, 17: 1, 18: 4,
19: 3, 20: 6, 21: 5, 22: 5, 23: 4, 24: 3, 25: 3, 26: 5, 27: 3,
28: 6, 29: 3},
'hist_dim': 6,
'max_valence_value': 34,
'max_n_atoms': 85,
'number_to_atom': {0: 'Br', 1: 'C', 2: 'Cl', 3: 'F', 4: 'H', 5: 'I', 6: 'N', 7: 'N', 8: 'N', 9: 'O',
10: 'O', 11: 'S', 12: 'S', 13: 'S', 14: "C", 15: "C", 16: "S", 17: "S", 18: "S",
19: "O", 20: "S", 21: "P", 22: "P", 23: "P", 24: "S", 25: "C", 26: "P", 27: "P",
28: "S", 29: "S"},
'bucket_sizes': np.array(
[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 58,
84]),
# 'bucket_sizes': np.array(list(range(10, 40, 2))),
# 'n_edges': [111623688, 8153922, 2791900, 27394],
# 'n_nodes': [11233, 3570490, 37961, 70252, 0, 785, 1363, 555064, 68066, 21567,
# 488317, 63847, 80, 24651, 94566, 100218, 930, 392, 955,
# 16, 3, 55, 27, 2, 5, 3, 22, 4,
# 5, 1],
'n_edges': [3, 1, 1, 1],
'n_nodes': [1] * 30,
'batch_size': 100,
'n_epochs': 200,
}
elif dataset == 'moses':
values = {
'atom_types': ['Cl1(0)0', 'S2(0)0', 'N3(0)0', 'O2(0)0', 'F1(0)0', 'C4(0)0', 'S6(0)0', 'S4(0)0', 'Br1(0)0'],
'maximum_valence': {0: 1, 1: 2, 2: 3, 3: 2, 4: 1, 5: 4, 6: 6, 7: 4, 8: 1},
'hist_dim': 6,
'max_valence_value': 34,
'max_n_atoms': 85,
'number_to_atom': {0: 'Cl', 1: 'S', 2: 'N', 3: 'O', 4: 'F', 5: 'C', 6: 'S', 7: 'S', 8: 'B'},
'bucket_sizes': np.array(
[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 58,
84]),
# 'n_edges': [6.06030803e+08, 4.75575800e+07, 1.80507840e+07, 2.27430000e+05],
# 'n_nodes': [1.6752500e+05, 3.4812500e+05, 4.1872520e+06, 3.1869590e+06, 4.4005500e+05, 2.2158453e+07, 1.5217000e+05, 2.7260000e+03, 4.7220000e+04],
'n_edges': [3, 1, 1, 1],
'n_nodes': [1] * 9,
'batch_size': 512,
}
else:
print("Error: The datasets that you could use are QM9 or ZINC, not " + str(dataset))
exit(1)
# loss existing edges
edges_to_consider = values['n_edges'][1:] # the first position represent the non-present edges
n_no_edges = values['n_edges'][0]
n_yes_edges = sum(edges_to_consider)
n_no_edges = max(n_no_edges, n_yes_edges) / n_no_edges
n_yes_edges = max(n_no_edges, n_yes_edges) / n_yes_edges
values['loss_no_edge'] = n_no_edges / max(n_no_edges, n_yes_edges)
values['loss_yes_edge'] = n_yes_edges / max(n_no_edges, n_yes_edges)
# values['loss_no_edge'] = 1
# values['loss_yes_edge'] = n_no_edges / n_yes_edges
# values not normalized
values['loss_node_weights'] = [max(values['n_nodes']) / i if i > 0 else 1
for i in values['n_nodes']]
# normalized values
values['loss_node_weights'] = [i / max(values['loss_node_weights'])
for i in values['loss_node_weights']]
# values['loss_node_weights'] = [1 if i > 0 else 1
# for i in values['n_nodes']]
# values not normalized
values['loss_edge_weights'] = [max(edges_to_consider) / i if i > 0 else 1
for i in edges_to_consider]
# normalized values
values['loss_edge_weights'] = [i / max(values['loss_edge_weights'])
for i in values['loss_edge_weights']]
# values['loss_edge_weights'] = [1 if i > 0 else 1
# for i in edges_to_consider]
return values
def dataset_atom_rep(dataset, atom):
if dataset == 'qm9' or dataset == 'zinc':
atom_str = atom.GetSymbol()
elif dataset == 'qm9_ev' or dataset == 'zinc_ev':
symbol = atom.GetSymbol()
valence = atom.GetExplicitValence()
atom_str = "%s%i" % (symbol, valence)
elif dataset == 'qm9_ev2' or dataset == 'zinc_ev2':
symbol = atom.GetSymbol()
valence = atom.GetExplicitValence()
charge = atom.GetFormalCharge()
atom_str = "%s%i(%i)" % (symbol, valence, charge)
elif dataset == 'qm9_long' or dataset == 'zinc_long':
symbol = atom.GetSymbol()
valence = atom.GetTotalValence()
charge = atom.GetFormalCharge()
atom_str = "%s%i(%i)" % (symbol, valence, charge)
elif dataset == 'qm9_long2' or dataset == 'zinc_long2':
symbol = atom.GetSymbol()
valence = atom.GetTotalValence()
charge = atom.GetFormalCharge()
chi = atom.GetChiralTag()
atom_str = "%s%i(%i)%i" % (symbol, valence, charge, chi)
elif dataset == 'moses':
symbol = atom.GetSymbol()
valence = atom.GetTotalValence()
charge = atom.GetFormalCharge()
chi = atom.GetChiralTag()
atom_str = "%s%i(%i)%i" % (symbol, valence, charge, chi)
else:
print('Unrecognized dataset')
exit(1)
return atom_str
def add_atoms(new_mol, node_symbol, dataset):
for number in node_symbol:
if dataset == 'qm9' or dataset == 'zinc' or dataset == 'qm9_ev' or dataset == 'zinc_ev':
new_mol.AddAtom(Chem.Atom(dataset_info(dataset)['number_to_atom'][number]))
elif dataset == 'qm9_ev2' or dataset == 'zinc_ev2':
new_atom = Chem.Atom(dataset_info(dataset)['number_to_atom'][number])
charge_num = int(dataset_info(dataset)['atom_types'][number].split('(')[1].strip(')'))
new_atom.SetFormalCharge(charge_num)
new_mol.AddAtom(new_atom)
elif dataset == 'qm9_long' or dataset == 'zinc_long':
new_atom = Chem.Atom(dataset_info(dataset)['number_to_atom'][number])
charge_num = int(dataset_info(dataset)['atom_types'][number].split('(')[1].strip(')'))
new_atom.SetFormalCharge(charge_num)
new_mol.AddAtom(new_atom)
elif dataset == 'qm9_long2' or dataset == 'zinc_long2':
new_atom = Chem.Atom(dataset_info(dataset)['number_to_atom'][number])
charge_num = int(dataset_info(dataset)['atom_types'][number][:-1].split('(')[1].strip(')'))
chi_number = int(dataset_info(dataset)['atom_types'][number][-1])
new_atom.SetFormalCharge(charge_num)
new_atom.SetChiralTag(number_to_chi[chi_number])
new_mol.AddAtom(new_atom)
elif dataset == 'moses':
new_atom = Chem.Atom(dataset_info(dataset)['number_to_atom'][number])
charge_num = int(dataset_info(dataset)['atom_types'][number][:-1].split('(')[1].strip(')'))
chi_number = int(dataset_info(dataset)['atom_types'][number][-1])
new_atom.SetFormalCharge(charge_num)
new_atom.SetChiralTag(number_to_chi[chi_number])
new_mol.AddAtom(new_atom)
else:
print('Unrecognized dataset')
exit(1)
def add_bonds(new_mol, bond, row, col, dataset):
bond_str = number_to_bond[bond][:-1]
bond_prop = number_to_bond[bond][-1]
new_mol.AddBond(int(row), int(col), bond_str)
| [
"numpy.array"
] | [((7060, 7167), 'numpy.array', 'np.array', (['[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 53, 55, 58, 84]'], {}), '([28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, \n 49, 50, 51, 53, 55, 58, 84])\n', (7068, 7167), True, 'import numpy as np\n'), ((8570, 8677), 'numpy.array', 'np.array', (['[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 53, 55, 58, 84]'], {}), '([28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, \n 49, 50, 51, 53, 55, 58, 84])\n', (8578, 8677), True, 'import numpy as np\n'), ((10254, 10361), 'numpy.array', 'np.array', (['[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 53, 55, 58, 84]'], {}), '([28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, \n 49, 50, 51, 53, 55, 58, 84])\n', (10262, 10361), True, 'import numpy as np\n'), ((11503, 11610), 'numpy.array', 'np.array', (['[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 53, 55, 58, 84]'], {}), '([28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, \n 49, 50, 51, 53, 55, 58, 84])\n', (11511, 11610), True, 'import numpy as np\n'), ((13249, 13356), 'numpy.array', 'np.array', (['[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 53, 55, 58, 84]'], {}), '([28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, \n 49, 50, 51, 53, 55, 58, 84])\n', (13257, 13356), True, 'import numpy as np\n'), ((14401, 14508), 'numpy.array', 'np.array', (['[28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 53, 55, 58, 84]'], {}), '([28, 31, 33, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, \n 49, 50, 51, 53, 55, 58, 84])\n', (14409, 14508), True, 'import numpy as np\n')] |
import numpy as np
import configparser
import json
import heapq
import tensorflow as tf
class KNN_Sequence:
def __init__(self):
self.graph = None
self.sess = None
self.dtype = tf.float32
return
def train(self, training_data, labels, train_set_sample_ids, samples_length):
self.training_data = training_data
self.labels = labels
self.train_set_sample_ids = train_set_sample_ids
self.samples_length = samples_length
data_id_by_length = {}
for i in train_set_sample_ids:
if self.samples_length[i, 0] not in data_id_by_length:
data_id_by_length[self.samples_length[i, 0]] = []
data_id_by_length[self.samples_length[i, 0]].append(i)
for each in data_id_by_length:
sample_num = len(data_id_by_length[each])
sample_len = int(each)
print(str(each)+':', sample_num)
part_features = np.zeros((sample_num, sample_len, self.training_data.shape[1]))
part_labels = np.zeros((sample_num, 1))
for i, sample in enumerate(data_id_by_length[each]):
part_features[i, :, :] = self.training_data[sample:sample+sample_len, :]
part_labels[i, 0] = self.labels[sample, 0]
file_name = './data/knn/l_%d.npz' % sample_len
np.savez(file_name, features=part_features, labels=part_labels)
return
def test_cpu(self, data, labels, test_set_ids, samples_length, k):
data_id_by_length = {}
for i in test_set_ids:
if samples_length[i, 0] not in data_id_by_length:
data_id_by_length[samples_length[i, 0]] = []
data_id_by_length[samples_length[i, 0]].append(i)
final_labels = []
final_predicts = []
all_part_accuracy = []
for each_length in data_id_by_length:
ids = data_id_by_length[each_length]
sample_num = len(ids)
sample_len = int(each_length)
print(str(each_length) + ':', sample_num)
part_features = np.zeros((sample_num, sample_len, data.shape[1]))
part_labels = np.zeros((sample_num, 1))
for i, sample in enumerate(ids):
part_features[i, :, :] = data[sample:sample + sample_len, :]
part_labels[i, 0] = labels[sample, 0]
predict_labels = self.predict_cpu(part_features, sample_len, k)
final_labels.append(part_labels)
final_predicts.append(predict_labels)
part_accuracy = np.mean((part_labels == predict_labels)*1.)
print('L%d accuracy: %f' % (sample_len, part_accuracy))
all_part_accuracy.append(part_accuracy)
whole_accuracy = 0.
for i in range(len(all_part_accuracy)):
whole_accuracy += all_part_accuracy[i] * final_labels[i].shape[0]
whole_accuracy /= len(test_set_ids)
print('whole_accuracy: %f' % whole_accuracy)
self.sess.close()
return
def predict_cpu(self, data_features, sample_len, k):
sample_len = int(sample_len)
saved_data = np.load('./data/knn/l_%d.npz' % sample_len)
saved_features = saved_data['features']
saved_labels = saved_data['labels']
test_sample_num = data_features.shape[0]
print('samples num to compare %d of L%d' % (saved_labels.shape[0], sample_len))
test_labels = np.zeros((test_sample_num, 1))
for i in range(test_sample_num):
print('\x1B[1A\x1B[K%d/%d\r' % (i, test_sample_num))
for_test = data_features[i, :, :]
distance = np.sum(np.sum((for_test-saved_features) ** 2, axis=2), axis=1)
print(distance.shape)
nearest_index = heapq.nsmallest(k, range(saved_features.shape[0]), distance.take)
nearest_labels = saved_labels[nearest_index, :]
count = {}
for j in range(k):
count[nearest_labels[j, 0]] = count.get(nearest_labels[j, 0], 0) + 1
max_label = None
max_time = 0
for each in count:
if count[each] > max_time:
max_label = each
test_labels[i, 0] = max_label
print('test L%d over.' % sample_len)
return test_labels
def test(self, data, labels, test_set_ids, samples_length, k):
data_id_by_length = {}
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
for i in test_set_ids:
if samples_length[i, 0] not in data_id_by_length:
data_id_by_length[samples_length[i, 0]] = []
data_id_by_length[samples_length[i, 0]].append(i)
self.tf_cal_node = {}
with self.graph.as_default():
for each_length in data_id_by_length:
sample_len = int(each_length)
self.tf_cal_node[sample_len] = self._build_distance_calculate_top_k(sample_len, data.shape[1], k)
# saved_data = np.load('./data/knn/l_%d.npz' % sample_len)
# saved_features = saved_data['features']
# self.tf_cal_node[sample_len] = self._build_distance_calculate_top_k_v2(sample_len, data.shape[1], k,
# saved_features)
# self.sess.run(tf.global_variables_initializer())
print(self.tf_cal_node.keys())
final_labels = []
final_predicts = []
all_part_accuracy = []
for each_length in data_id_by_length:
ids = data_id_by_length[each_length]
sample_num = len(ids)
sample_len = int(each_length)
print(str(each_length) + ':', sample_num)
part_features = np.zeros((sample_num, sample_len, data.shape[1]))
part_labels = np.zeros((sample_num, 1))
for i, sample in enumerate(ids):
part_features[i, :, :] = data[sample:sample + sample_len, :]
part_labels[i, 0] = labels[sample, 0]
predict_labels = self.predict(part_features, sample_len, k)
final_labels.append(part_labels)
final_predicts.append(predict_labels)
part_accuracy = np.mean((part_labels == predict_labels)*1.)
print('L%d accuracy: %f' % (sample_len, part_accuracy))
all_part_accuracy.append(part_accuracy)
whole_accuracy = 0.
for i in range(len(all_part_accuracy)):
whole_accuracy += all_part_accuracy[i] * final_labels[i].shape[0]
whole_accuracy /= len(test_set_ids)
print('whole_accuracy: %f' % whole_accuracy)
self.sess.close()
return
def predict(self, data_features, sample_len, k):
sample_len = int(sample_len)
saved_data = np.load('./data/knn/l_%d.npz' % sample_len)
saved_features = saved_data['features']
saved_labels = saved_data['labels']
test_sample_num = data_features.shape[0]
print('samples num to compare %d of L%d' % (saved_labels.shape[0], sample_len))
cal_node = self.tf_cal_node[sample_len]
test_labels = np.zeros((test_sample_num, 1))
for i in range(test_sample_num):
# print('\x1B[1A\x1B[K%d/%d\r' % (i, test_sample_num))
print('%d/%d' % (i, test_sample_num))
for_test = data_features[i, :, :]
# distance = np.sum(np.sum((for_test-saved_features) ** 2, axis=2), axis=1)
# print(distance.shape)
# nearest_index = heapq.nsmallest(k, range(saved_features.shape[0]), distance.take)
nearest_index = self.sess.run(cal_node['topk_indices'], feed_dict={cal_node['for_test']: for_test,
cal_node['saved_features']: saved_features})
# nearest_index = self.sess.run(cal_node['topk_indices'], feed_dict={cal_node['for_test']: for_test})
nearest_labels = saved_labels[nearest_index, :]
count = {}
for j in range(k):
count[nearest_labels[j, 0]] = count.get(nearest_labels[j, 0], 0) + 1
max_label = None
max_time = 0
for each in count:
if count[each] > max_time:
max_label = each
test_labels[i, 0] = max_label
print('test L%d over.' % sample_len)
return test_labels
def _build_distance_calculate_top_k_v2(self, sample_length, features_size, k, saved_features):
for_test = tf.placeholder(self.dtype, shape=[sample_length, features_size], name='L%d_place_holder' % sample_length)
saved_features = tf.Variable(initial_value=saved_features, dtype=self.dtype, name='L%d_saved_features' % sample_length)
distance = tf.reduce_sum(tf.pow(saved_features-for_test, 2), axis=[2, 1])
top_nearest = tf.nn.top_k(-distance, k)
index = top_nearest.indices
return {'for_test': for_test, 'topk_indices': index}
def _build_distance_calculate_top_k(self, sample_length, features_size, k):
for_test = tf.placeholder(self.dtype, shape=[sample_length, features_size])
saved_features = tf.placeholder(self.dtype, shape=[None, sample_length, features_size])
distance = tf.reduce_sum(tf.pow(saved_features-for_test, 2), axis=[2, 1])
top_nearest = tf.nn.top_k(-distance, k)
index = top_nearest.indices
return {'for_test': for_test, 'saved_features': saved_features, 'topk_indices': index}
if __name__ == '__main__':
common_para = configparser.ConfigParser()
common_para.read('common_para.ini')
sequence_fix_length = common_para['common_parameters'].getint('sequence_fix_length')
foresight_steps = common_para['common_parameters'].getint('foresight_steps')
class_num = common_para['common_parameters'].getint('class_num')
data = np.load(common_para['path']['data_file'])
# print(data['features'])
# print(data['labels'])
# print(data['samples_length'])
### generate training samples' id
with open(common_para['path']['data_set_ids_file'], 'r') as f:
data_set_ids = json.load(f)
training_sample_ids = data_set_ids['training_set']
test_sample_ids = data_set_ids['test_set']
knn = KNN_Sequence()
# knn.train(data['features'], data['labels'], training_sample_ids, data['samples_length'])
knn.test_cpu(data['features'], data['labels'], test_sample_ids[:100000], data['samples_length'], 100)
| [
"numpy.load",
"json.load",
"numpy.sum",
"tensorflow.nn.top_k",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.pow",
"tensorflow.placeholder",
"tensorflow.Variable",
"numpy.mean",
"tensorflow.Graph",
"numpy.savez",
"configparser.ConfigParser"
] | [((9620, 9647), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (9645, 9647), False, 'import configparser\n'), ((9939, 9980), 'numpy.load', 'np.load', (["common_para['path']['data_file']"], {}), "(common_para['path']['data_file'])\n", (9946, 9980), True, 'import numpy as np\n'), ((3155, 3198), 'numpy.load', 'np.load', (["('./data/knn/l_%d.npz' % sample_len)"], {}), "('./data/knn/l_%d.npz' % sample_len)\n", (3162, 3198), True, 'import numpy as np\n'), ((3450, 3480), 'numpy.zeros', 'np.zeros', (['(test_sample_num, 1)'], {}), '((test_sample_num, 1))\n', (3458, 3480), True, 'import numpy as np\n'), ((4448, 4458), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4456, 4458), True, 'import tensorflow as tf\n'), ((4479, 4507), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (4489, 4507), True, 'import tensorflow as tf\n'), ((6841, 6884), 'numpy.load', 'np.load', (["('./data/knn/l_%d.npz' % sample_len)"], {}), "('./data/knn/l_%d.npz' % sample_len)\n", (6848, 6884), True, 'import numpy as np\n'), ((7184, 7214), 'numpy.zeros', 'np.zeros', (['(test_sample_num, 1)'], {}), '((test_sample_num, 1))\n', (7192, 7214), True, 'import numpy as np\n'), ((8589, 8699), 'tensorflow.placeholder', 'tf.placeholder', (['self.dtype'], {'shape': '[sample_length, features_size]', 'name': "('L%d_place_holder' % sample_length)"}), "(self.dtype, shape=[sample_length, features_size], name=\n 'L%d_place_holder' % sample_length)\n", (8603, 8699), True, 'import tensorflow as tf\n'), ((8720, 8827), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'saved_features', 'dtype': 'self.dtype', 'name': "('L%d_saved_features' % sample_length)"}), "(initial_value=saved_features, dtype=self.dtype, name=\n 'L%d_saved_features' % sample_length)\n", (8731, 8827), True, 'import tensorflow as tf\n'), ((8927, 8952), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['(-distance)', 'k'], {}), '(-distance, k)\n', (8938, 8952), True, 'import tensorflow as tf\n'), ((9150, 9214), 'tensorflow.placeholder', 'tf.placeholder', (['self.dtype'], {'shape': '[sample_length, features_size]'}), '(self.dtype, shape=[sample_length, features_size])\n', (9164, 9214), True, 'import tensorflow as tf\n'), ((9240, 9310), 'tensorflow.placeholder', 'tf.placeholder', (['self.dtype'], {'shape': '[None, sample_length, features_size]'}), '(self.dtype, shape=[None, sample_length, features_size])\n', (9254, 9310), True, 'import tensorflow as tf\n'), ((9415, 9440), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['(-distance)', 'k'], {}), '(-distance, k)\n', (9426, 9440), True, 'import tensorflow as tf\n'), ((10205, 10217), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10214, 10217), False, 'import json\n'), ((962, 1025), 'numpy.zeros', 'np.zeros', (['(sample_num, sample_len, self.training_data.shape[1])'], {}), '((sample_num, sample_len, self.training_data.shape[1]))\n', (970, 1025), True, 'import numpy as np\n'), ((1052, 1077), 'numpy.zeros', 'np.zeros', (['(sample_num, 1)'], {}), '((sample_num, 1))\n', (1060, 1077), True, 'import numpy as np\n'), ((1363, 1426), 'numpy.savez', 'np.savez', (['file_name'], {'features': 'part_features', 'labels': 'part_labels'}), '(file_name, features=part_features, labels=part_labels)\n', (1371, 1426), True, 'import numpy as np\n'), ((2104, 2153), 'numpy.zeros', 'np.zeros', (['(sample_num, sample_len, data.shape[1])'], {}), '((sample_num, sample_len, data.shape[1]))\n', (2112, 2153), True, 'import numpy as np\n'), ((2180, 2205), 'numpy.zeros', 'np.zeros', (['(sample_num, 1)'], {}), '((sample_num, 1))\n', (2188, 2205), True, 'import numpy as np\n'), ((2582, 2628), 'numpy.mean', 'np.mean', (['((part_labels == predict_labels) * 1.0)'], {}), '((part_labels == predict_labels) * 1.0)\n', (2589, 2628), True, 'import numpy as np\n'), ((5798, 5847), 'numpy.zeros', 'np.zeros', (['(sample_num, sample_len, data.shape[1])'], {}), '((sample_num, sample_len, data.shape[1]))\n', (5806, 5847), True, 'import numpy as np\n'), ((5874, 5899), 'numpy.zeros', 'np.zeros', (['(sample_num, 1)'], {}), '((sample_num, 1))\n', (5882, 5899), True, 'import numpy as np\n'), ((6272, 6318), 'numpy.mean', 'np.mean', (['((part_labels == predict_labels) * 1.0)'], {}), '((part_labels == predict_labels) * 1.0)\n', (6279, 6318), True, 'import numpy as np\n'), ((8856, 8892), 'tensorflow.pow', 'tf.pow', (['(saved_features - for_test)', '(2)'], {}), '(saved_features - for_test, 2)\n', (8862, 8892), True, 'import tensorflow as tf\n'), ((9344, 9380), 'tensorflow.pow', 'tf.pow', (['(saved_features - for_test)', '(2)'], {}), '(saved_features - for_test, 2)\n', (9350, 9380), True, 'import tensorflow as tf\n'), ((3663, 3711), 'numpy.sum', 'np.sum', (['((for_test - saved_features) ** 2)'], {'axis': '(2)'}), '((for_test - saved_features) ** 2, axis=2)\n', (3669, 3711), True, 'import numpy as np\n')] |
import json
import numpy as np
import numba
import sys
if sys.version_info < (3,):
integer_types = (int, long,)
else:
integer_types = (int,)
eps = np.finfo(np.float64).eps
def timeparams(ntimesamples=None, fs=None, duration=None):
# we need enough info from duration, fs and ntimesamples
havents = not (ntimesamples is None)
havefs = not (fs is None)
havedur = not (duration is None)
timeparams = np.array([havents, havefs, havedur])
if not (timeparams.sum() >= 2):
raise ValueError(
"at least 2 values are required for duration, ntimesamples, and fs")
# if havents:
# ntimesamples = check_int(ntimesamples, negative=False)
# if havefs:
# fs = check_arg(fs, 'fs')
# if havedur:
# duration = check_arg(duration, 'duration')
if timeparams.sum() == 2:
# now calculate what's missing
if havents:
if havefs:
duration = ntimesamples / fs
else: # have duration
fs = ntimesamples / duration
else: # have duration and have fs
ntimesamples = fs * duration
if divmod(ntimesamples, 1.0)[1] != 0.0:
raise ValueError(
"duration and fs do not correspond to integer ntimesamples")
else:
ntimesamples = int(ntimesamples)
return (ntimesamples, fs, duration)
from contextlib import contextmanager
import os.path
import math
import sys
import numpy as np
from numbers import Number
from collections import Set, Mapping, deque
def check_startendargs(soundstartframe, soundnframes, startframe, endframe):
soundendframe = soundstartframe + soundnframes
if startframe is None:
startindex = soundstartframe
else:
startindex = soundstartframe + startframe
if endframe is None:
endindex = soundstartframe + soundnframes
else:
endindex = soundstartframe + endframe
if not soundstartframe <= startindex <= soundendframe:
raise IndexError('startframe out of bounds')
if not soundstartframe <= endindex <= soundendframe:
raise IndexError('endframe out of bounds')
return startindex, endindex
def check_episode(startframe, endframe, starttime, endtime, fs, nframes):
if (startframe is not None) and (starttime is not None):
raise ValueError("Both 'startframe' and 'starttime' provided")
if (endframe is not None) and (endtime is not None):
raise ValueError("Both 'endframe' and 'endtime' provided")
if starttime is not None:
startframe = int(round(starttime * fs))
elif startframe is None:
startframe = 0
if endtime is not None:
endframe = int(round(endtime * fs))
elif endframe is None:
endframe = nframes
if not isinstance(startframe, integer_types):
raise TypeError("'startframe' ({}) should be an "
"int".format(type(startframe)))
if not isinstance(endframe, integer_types):
raise TypeError(
"'endframe' ({}) should be an int".format(type(endframe)))
if not endframe >= startframe:
raise ValueError("'endframe' ({}) lower than 'startframe' "
"({})".format(endframe, startframe))
if endframe > nframes:
raise ValueError("'endframe' ({}) higher than 'nframes' "
"({})".format(endframe, nframes))
if startframe < 0:
raise ValueError("'startframe' ({}) should be >= 0".format(startframe))
return startframe, endframe
@contextmanager
def tempdir(dir='.', keep=False, report=False):
import tempfile
import shutil
try:
tempdirname = tempfile.mkdtemp(dir=dir)
if report:
print('created cache file {}'.format(tempdirname))
yield tempdirname
except:
raise
finally:
if not keep:
shutil.rmtree(tempdirname)
if report:
print('removed cache file {}'.format(tempdirname))
def _check_dir(path):
if os.path.isdir(path):
return path
else:
raise ValueError('%s is not a directory' % path)
def _check_fileexists(filename):
if not os.path.exists(filename):
raise IOError("file '%s' does not exist" % filename)
return filename
def _check_notfileexists(filename, overwrite=False):
if os.path.exists(filename) and (not overwrite):
raise IOError("file '%s' already exist" % filename)
return filename
def _check_h5file(path):
if not (os.path.exists(path) and (os.path.splitext(path)[-1] == '.h5')):
raise IOError("'%s' is not a path to a h5 file" % path)
return path
def _check_mode(mode):
if mode == 'w':
raise IOError("'w' mode is not allowed; delete SoundStore first")
if mode not in ('r', 'a', 'r+'):
raise IOError("mode can only be 'r', 'a', 'r+'")
return mode
def packing_code(samplewidth):
if samplewidth == 1: # 8 bits are unsigned, 16 & 32 signed
return 'B', 128.0 # unsiged 8 bits
elif samplewidth == 2:
return 'h', 32768.0 # signed 16 bits
elif samplewidth == 4:
return 'i', 32768.0 * 65536.0 # signed 32 bits
raise ValueError('WaveIO Packing Error: not able to parse {} bytes'.format(samplewidth))
def duration_string(seconds):
intervals = ((60.*60.*24.*7., 'weeks'),
(60.*60.*24., 'days'),
(60.*60., 'hours'),
(60., 'minutes'),
(1., 'seconds'),
(0.001, 'milliseconds'))
for interval in intervals:
if seconds >= interval[0]:
return '{:.2f} {}'.format(seconds/interval[0], interval[1])
return '{:.3f} {}'.format(seconds/intervals[-1][0], intervals[-1][1])
def stringcode(number, labels="abcdefghijklmnopqrstuvwxyz", maxnumber=None):
nlabels = len(labels)
if maxnumber is None:
maxnumber = number
if maxnumber < number:
raise ValueError("'maxnumber' should be at least {}".format(number))
codelen = int(math.ceil(math.log(maxnumber+1, nlabels)))
a,b = divmod(number,nlabels)
code = [labels[b]]
for i in range(1, codelen):
a,b = divmod(a, nlabels)
code.insert(0, labels[b])
return ''.join(code)
try: # Python 2
zero_depth_bases = (basestring, Number, xrange, bytearray)
iteritems = 'iteritems'
except NameError: # Python 3
zero_depth_bases = (str, bytes, Number, range, bytearray)
iteritems = 'items'
def getsize(obj):
"""Recursively iterate to sum size of object & members."""
def inner(obj, _seen_ids = set()):
obj_id = id(obj)
if obj_id in _seen_ids:
return 0
_seen_ids.add(obj_id)
size = sys.getsizeof(obj)
if isinstance(obj, zero_depth_bases):
pass # bypass remaining control flow and return
elif isinstance(obj, (tuple, list, Set, deque)):
size += sum(inner(i) for i in obj)
elif isinstance(obj, Mapping) or hasattr(obj, iteritems):
size += sum(inner(k) + inner(v) for k, v in getattr(obj, iteritems)())
# Now assume custom object instances
elif hasattr(obj, '__slots__'):
size += sum(inner(getattr(obj, s)) for s in obj.__slots__ if hasattr(obj, s))
else:
attr = getattr(obj, '__dict__', None)
if attr is not None:
size += inner(attr)
return size
return inner(obj)
def fit_frames(totalsize, framesize, stepsize=None):
"""
Calculates how many frames of 'framesize' fit in 'totalsize',
given a step size of 'stepsize'.
Parameters
----------
totalsize: int
Size of total
framesize: int
Size of frame
stepsize: int
Step size, defaults to framesize (i.e. no overlap)
Returns a tuple (nframes, newsize, remainder)
"""
if ((totalsize % 1) != 0) or (totalsize < 1):
raise ValueError("invalid totalsize (%d)" % totalsize)
if ((framesize % 1) != 0) or (framesize < 1):
raise ValueError("invalid framesize (%d)" % framesize)
if framesize > totalsize:
return 0, 0, totalsize
if stepsize is None:
stepsize = framesize
else:
if ((stepsize % 1) != 0) or (stepsize < 1):
raise ValueError("invalid stepsize")
totalsize = int(totalsize)
framesize = int(framesize)
stepsize = int(stepsize)
nframes = ((totalsize - framesize) // stepsize) + 1
newsize = nframes * stepsize + (framesize - stepsize)
remainder = totalsize - newsize
return nframes, newsize, remainder
def iter_timewindowindices(ntimeframes, framesize, stepsize=None,
include_remainder=True, startindex=None,
endindex=None):
"""
Parameters
----------
framesize: int
Size of the frame in timesamples. Note that the last frame may be
smaller than `framesize`, depending on the number of timesamples.
stepsize: <int, None>
Size of the shift in time per iteration in number of timesamples.
Default is None, which means that `stepsize` equals `framesize`.
include_remainder: <bool, True>
Determines whether remainder (< framesize) should be included.
startindex: <int, None>
Start frame number.
Default is None, which means to start at the beginning (sample 0)
endindex: <int, None>
End frame number.
Default is None, which means to end at the end.
Returns
-------
An iterator that yield tuples (start, end) representing the start and
end indices of a time frame of size framesize that moves in stepsize
steps. If include_remainder is True, it ends with a tuple representing
the remainder, if present.
"""
# framesize = check_arg(framesize, 'framesize')
if stepsize is None:
stepsize = framesize
# stepsize = check_arg(stepsize, 'stepsize')
if startindex is None:
startindex = 0
# startindex = check_arg(startindex, 'startindex')
if endindex is None:
endindex = ntimeframes
# endindex = check_arg(endindex, 'startindex')
if startindex > (ntimeframes - 1):
raise ValueError("startindex too high")
if endindex > ntimeframes:
raise ValueError("endindex is too high")
if startindex >= endindex:
raise ValueError("startindex ({}) should be lower than endindex ({})".format(startindex, endindex))
nframes, newsize, remainder = fit_frames(
totalsize=(endindex - startindex),
framesize=framesize,
stepsize=stepsize)
framestart = startindex
frameend = framestart + framesize
for i in range(nframes):
yield framestart, frameend
framestart += stepsize
frameend = framestart + framesize
if include_remainder and (remainder > 0) and (
framestart < ntimeframes):
yield framestart, framestart+remainder
def write_json(datadict, path, sort_keys=True, indent=4, overwrite=False):
if (not os.path.exists(path)) or overwrite:
with open(path, 'w') as f:
f.write(json.dumps(datadict, sort_keys=sort_keys, indent=indent))
else:
raise IOError("'{}' exists, use 'overwrite' parameter if "
"appropriate".format(path))
def read_json(path):
with open(path, 'r+') as f:
return json.loads(f.read())
@numba.jit
def minmax(x):
max = -np.inf
min = np.inf
for i in x:
if i > max:
max = i
elif i < min:
min = i
return (min, max)
| [
"json.dumps",
"numpy.finfo",
"tempfile.mkdtemp",
"numpy.array",
"sys.getsizeof",
"shutil.rmtree",
"math.log"
] | [((157, 177), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (165, 177), True, 'import numpy as np\n'), ((428, 464), 'numpy.array', 'np.array', (['[havents, havefs, havedur]'], {}), '([havents, havefs, havedur])\n', (436, 464), True, 'import numpy as np\n'), ((3698, 3723), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'dir': 'dir'}), '(dir=dir)\n', (3714, 3723), False, 'import tempfile\n'), ((6771, 6789), 'sys.getsizeof', 'sys.getsizeof', (['obj'], {}), '(obj)\n', (6784, 6789), False, 'import sys\n'), ((3904, 3930), 'shutil.rmtree', 'shutil.rmtree', (['tempdirname'], {}), '(tempdirname)\n', (3917, 3930), False, 'import shutil\n'), ((6090, 6122), 'math.log', 'math.log', (['(maxnumber + 1)', 'nlabels'], {}), '(maxnumber + 1, nlabels)\n', (6098, 6122), False, 'import math\n'), ((11201, 11257), 'json.dumps', 'json.dumps', (['datadict'], {'sort_keys': 'sort_keys', 'indent': 'indent'}), '(datadict, sort_keys=sort_keys, indent=indent)\n', (11211, 11257), False, 'import json\n')] |
"""
Our implementation of obstacle detection pipeline steps
@authors: <NAME>, <NAME>, <NAME>, <NAME>
"""
import numpy as np
import pandas as pd
from datetime import datetime
from scipy.spatial import ConvexHull
from scipy.ndimage.interpolation import rotate
def roi_filter_rounded(pcloud, verbose=True, **params):
""" Region of Interest filter """
a = (-params["max_x"] - params["max_x"]) / 2
b = (params["min_y"] - params["max_y"]) / 2
if verbose:
print("Input pcloud size: {}".format(len(pcloud)))
pcloud["equation"] = (pcloud["x"] ** 2) / (a ** 2) + (pcloud["y"] ** 2) / (b ** 2)
pcloud["camera"] = (
(pcloud["z"] > params["min_z"])
& (pcloud["z"] < params["max_z"])
& (pcloud["x"] > params["min_x"])
& (pcloud["equation"] <= 1.0)
)
pcloud = pcloud[pcloud["camera"]]
if verbose:
print("Output ROI pcloud size: {}".format(len(pcloud)))
return pcloud
def roi_filter(pcloud, verbose=True, **params):
"""
Region Of Interest function, which filter required area
that relative to LIDAR scanner (point (0, 0, 0) is a center)
"""
if verbose:
print("Input pcloud size: {}".format(len(pcloud)))
pcloud["camera"] = (
(pcloud["x"] > params["min_x"])
& (pcloud["x"] < params["max_x"])
& (pcloud["y"] > params["min_y"])
& (pcloud["y"] < params["max_y"])
& (pcloud["z"] > params["min_z"])
& (pcloud["z"] < params["max_z"])
)
pcloud = pcloud[pcloud["camera"]]
if verbose:
print("Output ROI pcloud size: {}".format(len(pcloud)))
return pcloud
def obstacle_filter(pcloud, obstacle_lst, proc_labels=True, verbose=True):
"""
Obstacle filtering function
pcloud: pandas.DataFrame,
Point cloud DataFrame that have columns=['x', 'y', 'z', 'seg_id']
obstacle_lst: list,
A list of segments id you want to be remain after filtering
"""
# sanity check
assert isinstance(pcloud, pd.DataFrame)
origin_point_size = len(pcloud)
if proc_labels:
pcloud.seg_id = pcloud.seg_id.astype("uint32")
pcloud.seg_id = pcloud.seg_id.apply(lambda x: x & 0xFFFF)
pcloud = pcloud[pcloud["seg_id"].isin(list(obstacle_lst.keys()))]
else:
pcloud = pcloud[pcloud["seg_id"].isin(obstacle_lst)]
if verbose:
print("Filter required segments")
print(
"Point size before: {} and after filtering: {}".format(
origin_point_size, len(pcloud)
)
)
return pcloud
def outlier_filter(tcluster, verbose=True):
"""Outlier filter with 3-sigmas rule"""
# tcluster['norm'] = np.sqrt(np.square(tcluster).sum(axis=1))
start_time = datetime.now()
try:
_mean, _std = tcluster["norm"].mean(), tcluster["norm"].std()
lower, higher = _mean - 3 * _std, _mean + 3 * _std
except BaseException:
tcluster["norm"] = np.sqrt(np.square(tcluster[["x", "y", "z"]]).sum(axis=1))
_mean, _std = tcluster["norm"].mean(), tcluster["norm"].std()
lower, higher = _mean - 3 * _std, _mean + 3 * _std
end_time = (datetime.now() - start_time).total_seconds()
if verbose:
print("Computing lower-higher bounds {}".format(end_time))
start_time = datetime.now()
tcluster = tcluster[(tcluster["norm"] > lower) & (tcluster["norm"] < higher)]
end_time = (datetime.now() - start_time).total_seconds()
if verbose:
print("Applying bounds {}".format(end_time))
return tcluster
def get_bounding_boxes(clusters):
box_coord_list = []
for i in range(len(clusters)):
x_min, x_max, y_min, y_max, z_min, z_max = list(clusters.iloc[i])
box = np.zeros([8, 3])
box[0, :] = [x_min, y_min, z_min]
box[1, :] = [x_max, y_min, z_min]
box[2, :] = [x_max, y_max, z_min]
box[3, :] = [x_min, y_max, z_min]
box[4, :] = [x_min, y_min, z_max]
box[5, :] = [x_max, y_min, z_max]
box[6, :] = [x_max, y_max, z_max]
box[7, :] = [x_min, y_max, z_max]
box = np.transpose(box)
box_coord_list.append(box)
return box_coord_list
def get_OBB(cluster):
"""compute Oriented Bounding Boxes for cluster"""
# sanity check
assert isinstance(cluster, pd.DataFrame)
# get min max values for Z axis
z_min, z_max = cluster["z"].min(), cluster["z"].max()
# get minimum bounding box on XoY surfuce
xy_minimum_bb = minimum_bounding_box(cluster[["x", "y"]].values)
# make array [z_min, z_min , z_min , z_min , z_max, z_max, z_max, z_max]
z_array = np.array([z_min] * 4 + [z_max] * 4)
# double xy bbox z_array
xy_minimum_bb = np.concatenate((xy_minimum_bb, xy_minimum_bb), axis=0)
# concatenate xy with z values and get array of 8x3 shape
obb = np.hstack((xy_minimum_bb, z_array.reshape(8, 1)))
return obb
def minimum_bounding_box(points):
"""compute minimum bounding box in XoY"""
pi2 = np.pi / 2.0
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = np.zeros((len(hull_points) - 1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = np.zeros((len(edges)))
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
rotations = np.vstack(
[np.cos(angles), np.cos(angles - pi2), np.cos(angles + pi2), np.cos(angles)]
).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
# closest leftmost
rval[0] = np.dot([x2, y2], r)
# closest rightmost
rval[1] = np.dot([x2, y1], r)
# farthest leftmost
rval[2] = np.dot([x1, y2], r)
# farthest rightmost
rval[3] = np.dot([x1, y1], r)
return rval
| [
"numpy.arctan2",
"numpy.concatenate",
"numpy.nanmax",
"numpy.square",
"numpy.zeros",
"numpy.transpose",
"numpy.nanmin",
"numpy.argmin",
"numpy.mod",
"numpy.array",
"numpy.cos",
"numpy.dot",
"scipy.spatial.ConvexHull",
"datetime.datetime.now",
"numpy.unique"
] | [((2736, 2750), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2748, 2750), False, 'from datetime import datetime\n'), ((3291, 3305), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3303, 3305), False, 'from datetime import datetime\n'), ((4617, 4652), 'numpy.array', 'np.array', (['([z_min] * 4 + [z_max] * 4)'], {}), '([z_min] * 4 + [z_max] * 4)\n', (4625, 4652), True, 'import numpy as np\n'), ((4703, 4757), 'numpy.concatenate', 'np.concatenate', (['(xy_minimum_bb, xy_minimum_bb)'], {'axis': '(0)'}), '((xy_minimum_bb, xy_minimum_bb), axis=0)\n', (4717, 4757), True, 'import numpy as np\n'), ((5272, 5308), 'numpy.arctan2', 'np.arctan2', (['edges[:, 1]', 'edges[:, 0]'], {}), '(edges[:, 1], edges[:, 0])\n', (5282, 5308), True, 'import numpy as np\n'), ((5364, 5381), 'numpy.unique', 'np.unique', (['angles'], {}), '(angles)\n', (5373, 5381), True, 'import numpy as np\n'), ((5630, 5662), 'numpy.dot', 'np.dot', (['rotations', 'hull_points.T'], {}), '(rotations, hull_points.T)\n', (5636, 5662), True, 'import numpy as np\n'), ((5707, 5742), 'numpy.nanmin', 'np.nanmin', (['rot_points[:, 0]'], {'axis': '(1)'}), '(rot_points[:, 0], axis=1)\n', (5716, 5742), True, 'import numpy as np\n'), ((5755, 5790), 'numpy.nanmax', 'np.nanmax', (['rot_points[:, 0]'], {'axis': '(1)'}), '(rot_points[:, 0], axis=1)\n', (5764, 5790), True, 'import numpy as np\n'), ((5803, 5838), 'numpy.nanmin', 'np.nanmin', (['rot_points[:, 1]'], {'axis': '(1)'}), '(rot_points[:, 1], axis=1)\n', (5812, 5838), True, 'import numpy as np\n'), ((5851, 5886), 'numpy.nanmax', 'np.nanmax', (['rot_points[:, 1]'], {'axis': '(1)'}), '(rot_points[:, 1], axis=1)\n', (5860, 5886), True, 'import numpy as np\n'), ((5987, 6003), 'numpy.argmin', 'np.argmin', (['areas'], {}), '(areas)\n', (5996, 6003), True, 'import numpy as np\n'), ((6171, 6187), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (6179, 6187), True, 'import numpy as np\n'), ((6237, 6256), 'numpy.dot', 'np.dot', (['[x2, y2]', 'r'], {}), '([x2, y2], r)\n', (6243, 6256), True, 'import numpy as np\n'), ((6307, 6326), 'numpy.dot', 'np.dot', (['[x2, y1]', 'r'], {}), '([x2, y1], r)\n', (6313, 6326), True, 'import numpy as np\n'), ((6377, 6396), 'numpy.dot', 'np.dot', (['[x1, y2]', 'r'], {}), '([x1, y2], r)\n', (6383, 6396), True, 'import numpy as np\n'), ((6448, 6467), 'numpy.dot', 'np.dot', (['[x1, y1]', 'r'], {}), '([x1, y1], r)\n', (6454, 6467), True, 'import numpy as np\n'), ((3722, 3738), 'numpy.zeros', 'np.zeros', (['[8, 3]'], {}), '([8, 3])\n', (3730, 3738), True, 'import numpy as np\n'), ((4089, 4106), 'numpy.transpose', 'np.transpose', (['box'], {}), '(box)\n', (4101, 4106), True, 'import numpy as np\n'), ((5330, 5349), 'numpy.mod', 'np.mod', (['angles', 'pi2'], {}), '(angles, pi2)\n', (5336, 5349), True, 'import numpy as np\n'), ((5069, 5087), 'scipy.spatial.ConvexHull', 'ConvexHull', (['points'], {}), '(points)\n', (5079, 5087), False, 'from scipy.spatial import ConvexHull\n'), ((3145, 3159), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3157, 3159), False, 'from datetime import datetime\n'), ((3404, 3418), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3416, 3418), False, 'from datetime import datetime\n'), ((5448, 5462), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (5454, 5462), True, 'import numpy as np\n'), ((5464, 5484), 'numpy.cos', 'np.cos', (['(angles - pi2)'], {}), '(angles - pi2)\n', (5470, 5484), True, 'import numpy as np\n'), ((5486, 5506), 'numpy.cos', 'np.cos', (['(angles + pi2)'], {}), '(angles + pi2)\n', (5492, 5506), True, 'import numpy as np\n'), ((5508, 5522), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (5514, 5522), True, 'import numpy as np\n'), ((2950, 2986), 'numpy.square', 'np.square', (["tcluster[['x', 'y', 'z']]"], {}), "(tcluster[['x', 'y', 'z']])\n", (2959, 2986), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
from os.path import join
from absl import flags
import os, collections, json, codecs, pickle, re, xlnet
import numpy as np
import tensorflow as tf
import sentencepiece as spm
from xlnet_config import FLAGS
from data_utils import SEP_ID, VOCAB_SIZE, CLS_ID
import model_utils
import function_builder
from classifier_utils import PaddingInputExample
#from classifier_utils import convert_single_example
from prepro_utils import preprocess_text, encode_ids
from lstm_crf_layer import BLSTM_CRF
from tensorflow.contrib.layers.python.layers import initializers
import logging as logger
logger.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s', level=logger.INFO)
SEG_ID_A = 0
SEG_ID_B = 1
SEG_ID_CLS = 2
SEG_ID_SEP = 3
SEG_ID_PAD = 4
'''
# Model
flags.DEFINE_string("model_config_path", default="pretrain_model/config.json", help="Model config path.")
flags.DEFINE_float("dropout", default=0.1, help="Dropout rate.")
flags.DEFINE_float("dropatt", default=0.1, help="Attention dropout rate.")
flags.DEFINE_integer("clamp_len", default=-1, help="Clamp length")
flags.DEFINE_string("summary_type", default="last", help="Method used to summarize a sequence into a compact vector.")
flags.DEFINE_bool("use_summ_proj", default=True, help="Whether to use projection for summarizing sequences.")
flags.DEFINE_bool("use_bfloat16", False, help="Whether to use bfloat16.")
# Parameter initialization
flags.DEFINE_enum("init", default="normal", enum_values=["normal", "uniform"], help="Initialization method.")
flags.DEFINE_float("init_std", default=0.02, help="Initialization std when init is normal.")
flags.DEFINE_float("init_range", default=0.1, help="Initialization std when init is uniform.")
# I/O paths
flags.DEFINE_bool("overwrite_data", default=False, help="If False, will use cached data if available.")
flags.DEFINE_string("init_checkpoint", default="pretrain_model/model.ckpt-35",
help="checkpoint path for initializing the model. "
"Could be a pretrained model or a finetuned model.")
flags.DEFINE_string("output_dir", default="proc_data/imdb", help="Output dir for TF records.")
flags.DEFINE_string("spiece_model_file", default="token_model/chinese/spiece.model", help="Sentence Piece model path.")
flags.DEFINE_string("model_dir", default="finetuning_model/imdb", help="Directory for saving the finetuned model.")
flags.DEFINE_string("data_dir", default="data/aclImdb", help="Directory for input data.")
# TPUs and machines
flags.DEFINE_bool("use_tpu", default=False, help="whether to use TPU.")
flags.DEFINE_integer("num_hosts", default=1, help="How many TPU hosts.")
flags.DEFINE_integer("num_core_per_host", default=8,
help="8 for TPU v2 and v3-8, 16 for larger TPU v3 pod. In the context "
"of GPU training, it refers to the number of GPUs used.")
flags.DEFINE_string("tpu_job_name", default=None, help="TPU worker job name.")
flags.DEFINE_string("tpu", default=None, help="TPU name.")
flags.DEFINE_string("tpu_zone", default=None, help="TPU zone.")
flags.DEFINE_string("gcp_project", default=None, help="gcp project.")
flags.DEFINE_string("master", default=None, help="master")
flags.DEFINE_integer("iterations", default=1000, help="number of iterations per TPU training loop.")
# training
flags.DEFINE_bool("do_train", default=True, help="whether to do training")
flags.DEFINE_integer("train_steps", default=1000, help="Number of training steps")
flags.DEFINE_integer("warmup_steps", default=500, help="number of warmup steps")
flags.DEFINE_float("learning_rate", default=2e-5, help="initial learning rate")
flags.DEFINE_float("lr_layer_decay_rate", 1.0,
"Top layer: lr[L] = FLAGS.learning_rate."
"Low layer: lr[l-1] = lr[l] * lr_layer_decay_rate.")
flags.DEFINE_float("min_lr_ratio", default=0.0, help="min lr ratio for cos decay.")
flags.DEFINE_float("clip", default=1.0, help="Gradient clipping")
flags.DEFINE_integer("max_save", default=0, help="Max number of checkpoints to save. Use 0 to save all.")
flags.DEFINE_integer("save_steps", default=10, help="Save the model for every save_steps. If None, not to save any model.")
flags.DEFINE_integer("train_batch_size", default=32, help="Batch size for training")
flags.DEFINE_float("weight_decay", default=0.00, help="Weight decay rate")
flags.DEFINE_float("adam_epsilon", default=1e-8, help="Adam epsilon")
flags.DEFINE_string("decay_method", default="poly", help="poly or cos")
# evaluation
flags.DEFINE_bool("do_eval", default=True, help="whether to do eval")
flags.DEFINE_bool("do_predict", default=False, help="whether to do prediction")
flags.DEFINE_float("predict_threshold", default=0, help="Threshold for binary prediction.")
flags.DEFINE_string("eval_split", default="dev", help="could be dev or test")
flags.DEFINE_integer("eval_batch_size", default=8, help="batch size for evaluation")
flags.DEFINE_integer("predict_batch_size", default=128, help="batch size for prediction.")
flags.DEFINE_string("predict_dir", default=None, help="Dir for saving prediction files.")
flags.DEFINE_bool("eval_all_ckpt", default=True, help="Eval all ckpts. If False, only evaluate the last one.")
flags.DEFINE_string("predict_ckpt", default=None, help="Ckpt path for do_predict. If None, use the last one.")
# task specific
flags.DEFINE_string("task_name", default="imdb", help="Task name")
flags.DEFINE_integer("max_seq_length", default=128, help="Max sequence length")
flags.DEFINE_integer("shuffle_buffer", default=2048, help="Buffer size used for shuffle.")
flags.DEFINE_integer("num_passes", default=1,
help="Num passes for processing training data. "
"This is use to batch data without loss for TPUs.")
flags.DEFINE_bool("uncased", default=False, help="Use uncased.")
flags.DEFINE_string("cls_scope", default=None, help="Classifier layer scope.")
flags.DEFINE_bool("is_regression", default=False, help="Whether it's a regression task.")
FLAGS = flags.FLAGS
'''
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_ids,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_data(cls, input_file):
"""Reads a BIO data."""
with codecs.open(input_file, 'r', encoding='utf-8') as f:
lines = []
words = []
labels = []
for line in f:
contends = line.strip()
tokens = contends.split(' ')
if len(tokens) == 2:
words.append(tokens[0])
labels.append(tokens[1])
else:
if len(contends) == 0:
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
lines.append([l, w])
words = []
labels = []
continue
if contends.startswith("-DOCSTART-"):
words.append('')
continue
return lines
#**********************************************************************************************************************#
class NerProcessor(DataProcessor):
def __init__(self, output_dir):
self.labels = set()
self.output_dir = output_dir
def get_train_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "train.txt")), "train"
)
def get_dev_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "dev.txt")), "dev"
)
def get_test_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self, labels=None):
if labels is not None:
try:
# 支持从文件中读取标签类型
if os.path.exists(labels) and os.path.isfile(labels):
with codecs.open(labels, 'r', encoding='utf-8') as fd:
for line in fd:
self.labels.append(line.strip())
else:
# 否则通过传入的参数,按照逗号分割
self.labels = labels.split(',')
self.labels = set(self.labels) # to set
except Exception as e:
print(e)
# 通过读取train文件获取标签的方法会出现一定的风险。
if os.path.exists(os.path.join(self.output_dir, 'label_list.pkl')):
with codecs.open(os.path.join(self.output_dir, 'label_list.pkl'), 'rb') as rf:
self.labels = pickle.load(rf)
else:
if len(self.labels) > 0:
self.labels = self.labels.union(set(["X", "[CLS]", "[SEP]"]))
with codecs.open(os.path.join(self.output_dir, 'label_list.pkl'), 'wb') as rf:
pickle.dump(self.labels, rf)
else:
# self.labels = ["O", 'B-TIM', 'I-TIM', "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "X", "[CLS]", "[SEP]"]
self.labels = ["O", 'B-SP', 'I-SP', "B-SS", "I-SS", "[CLS]", "[SEP]", "[PAD]"]
return self.labels
def _create_example(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text = line[1]
label = line[0]
# if i == 0:
# print('label: ', label)
examples.append(InputExample(guid=guid, text_a=text, label=label))
return examples
def _read_data(self, input_file):
"""Reads a BIO data."""
with codecs.open(input_file, 'r', encoding='utf-8') as f:
lines = []
words = []
labels = []
for line in f:
contends = line.strip()
tokens = contends.split(' ')
if len(tokens) == 2:
words.append(tokens[0])
labels.append(tokens[-1])
else:
if len(contends) == 0 and len(words) > 0:
label = []
word = []
for l, w in zip(labels, words):
if len(l) > 0 and len(w) > 0:
if l == "0": l = "O" # 噪音处理
label.append(l)
#self.labels.add(l)
word.append(w)
lines.append([' '.join(label), ' '.join(word)])
words = []
labels = []
continue
if contends.startswith("-DOCSTART-"):
continue
return lines
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenize_fn):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[1] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_ids=[0] * max_seq_length,
is_real_example=False)
if label_list is not None:
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenize_fn(example.text_a)
labels_a = example.label.split()
tokens_b = None
if example.text_b:
tokens_b = tokenize_fn(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for two [SEP] & one [CLS] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for one [SEP] & one [CLS] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:max_seq_length - 2]
tokens = []
segment_ids = []
label_ids = []
for i, token in enumerate(tokens_a):
tokens.append(token)
segment_ids.append(SEG_ID_A)
label_ids.append(label_map[labels_a[i]])
tokens.append(SEP_ID)
segment_ids.append(SEG_ID_A)
label_ids.append(label_map["[SEP]"])
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(SEG_ID_B)
tokens.append(SEP_ID)
segment_ids.append(SEG_ID_B)
tokens.append(CLS_ID)
segment_ids.append(SEG_ID_CLS)
label_ids.append(label_map["[CLS]"])
input_ids = tokens
# The mask has 0 for real tokens and 1 for padding tokens. Only real
# tokens are attended to.
input_mask = [0] * len(input_ids)
# Zero-pad up to the sequence length.
if len(input_ids) < max_seq_length:
delta_len = max_seq_length - len(input_ids)
input_ids = [0] * delta_len + input_ids
input_mask = [1] * delta_len + input_mask
segment_ids = [SEG_ID_PAD] * delta_len + segment_ids
label_ids = [label_map["[PAD]"]] * delta_len + label_ids
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids)
return feature
#**********************************************************************************************************************#
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenize_fn, output_file,
num_passes=1):
"""Convert a set of `InputExample`s to a TFRecord file."""
# do not create duplicated records
if tf.gfile.Exists(output_file) and not FLAGS.overwrite_data:
tf.logging.info("Do not overwrite tfrecord {} exists.".format(output_file))
return
tf.logging.info("Create new tfrecord {}.".format(output_file))
writer = tf.python_io.TFRecordWriter(output_file)
if num_passes > 1:
examples *= num_passes
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example {} of {}".format(ex_index,
len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenize_fn)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_float_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
features["is_real_example"] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.float32),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
tf.logging.info("Input tfrecord file {}".format(input_file))
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def input_fn(params, input_context=None):
"""The actual input function."""
if FLAGS.use_tpu:
batch_size = params["batch_size"]
elif is_training:
batch_size = FLAGS.train_batch_size
elif FLAGS.do_eval:
batch_size = FLAGS.eval_batch_size
else:
batch_size = FLAGS.predict_batch_size
d = tf.data.TFRecordDataset(input_file)
# Shard the dataset to difference devices
if input_context is not None:
tf.logging.info("Input pipeline id %d out of %d",
input_context.input_pipeline_id, input_context.num_replicas_in_sync)
d = d.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = d.shuffle(buffer_size=FLAGS.shuffle_buffer)
d = d.repeat()
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def get_model_fn(n_class):
def model_fn(features, labels, mode, params):
#### Training or Evaluation
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
#### Get loss from inputs
#********************************************************************************************#
bsz_per_core = tf.shape(features["input_ids"])[0]
inp = tf.transpose(features["input_ids"], [1, 0])
seg_id = tf.transpose(features["segment_ids"], [1, 0])
inp_mask = tf.transpose(features["input_mask"], [1, 0])
label_ids = features["label_ids"]
xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)
run_config = xlnet.create_run_config(is_training, True, FLAGS)
xlnet_model = xlnet.XLNetModel(
xlnet_config=xlnet_config,
run_config=run_config,
input_ids=inp,
seg_ids=seg_id,
input_mask=inp_mask)
#summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj)
# 获取对应的embedding 输入数据[batch_size, seq_length, embedding_size]
xlnet_model_out = xlnet_model.get_sequence_output()
embedding = tf.transpose(xlnet_model_out, [1, 0, 2])
max_seq_length = embedding.shape[1].value
# 算序列真实长度
used = tf.sign(tf.abs(features["input_ids"]))
lengths = tf.reduce_sum(used, reduction_indices=1) # [batch_size] 大小的向量,包含了当前batch中的序列长度
# 添加CRF output layer
blstm_crf = BLSTM_CRF(embedded_chars=embedding, hidden_unit=10, cell_type="lstm", num_layers=1,
dropout_rate=0.5, initializers=initializers, num_labels=n_class,
seq_length=max_seq_length, labels=label_ids, lengths=lengths, is_training=is_training)
total_loss, logits, trans, pred_ids = blstm_crf.add_blstm_crf_layer(crf_only=True)
#********************************************************************************************#
#### Check model parameters
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])
tf.logging.info('#params: {}'.format(num_params))
#### load pretrained models
scaffold_fn = model_utils.init_from_checkpoint(FLAGS)
#### Evaluation mode
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(label_ids, pred_ids):
return {
"eval_loss": tf.metrics.mean_squared_error(labels=label_ids, predictions=pred_ids),
}
eval_metrics = metric_fn(features["label_ids"], pred_ids)
eval_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics
)
return eval_spec
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"logits": logits,
"labels": label_ids,
"pred_ids": pred_ids,
"input_mask": features["input_mask"]
}
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions
)
return output_spec
#### Configuring the optimizer
train_op, learning_rate, _ = model_utils.get_train_op(FLAGS, total_loss)
monitor_dict = {}
monitor_dict["lr"] = learning_rate
#### Constucting training TPUEstimatorSpec with new cache.
train_spec = tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op=train_op)
return train_spec
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
#### Validate flags
if FLAGS.save_steps is not None:
FLAGS.iterations = min(FLAGS.iterations, FLAGS.save_steps)
if FLAGS.do_predict:
predict_dir = FLAGS.predict_dir
if not tf.gfile.Exists(predict_dir):
tf.gfile.MakeDirs(predict_dir)
processors = {
"ner": NerProcessor
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval, `do_predict` or "
"`do_submit` must be True.")
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name](FLAGS.output_dir)
label_list = processor.get_labels()
sp = spm.SentencePieceProcessor()
sp.Load(FLAGS.spiece_model_file)
def tokenize_fn(text):
text = preprocess_text(text, lower=FLAGS.uncased, remove_space=True, keep_accents=False)
pieces = text.split()
ids = [sp.PieceToId(piece) for piece in pieces]
return ids
run_config = model_utils.configure_tpu(FLAGS)
model_fn = get_model_fn(len(label_list))
spm_basename = os.path.basename(FLAGS.spiece_model_file)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
if FLAGS.use_tpu:
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
else:
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
train_file_base = "{}.len-{}.train.tf_record".format(
spm_basename, FLAGS.max_seq_length)
train_file = os.path.join(FLAGS.output_dir, train_file_base)
tf.logging.info("Use tfrecord file {}".format(train_file))
train_examples = processor.get_train_examples(FLAGS.data_dir)
np.random.shuffle(train_examples)
tf.logging.info("Num of train samples: {}".format(len(train_examples)))
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenize_fn,
train_file, FLAGS.num_passes)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps)
if FLAGS.do_eval or FLAGS.do_predict:
if FLAGS.eval_split == "dev":
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
else:
eval_examples = processor.get_test_examples(FLAGS.data_dir)
tf.logging.info("Num of eval samples: {}".format(len(eval_examples)))
if FLAGS.do_eval:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
#
# Modified in XL: We also adopt the same mechanism for GPUs.
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file_base = "{}.len-{}.{}.eval.tf_record".format(
spm_basename, FLAGS.max_seq_length, FLAGS.eval_split)
eval_file = os.path.join(FLAGS.output_dir, eval_file_base)
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenize_fn,
eval_file)
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=True)
# Filter out all checkpoints in the directory
steps_and_files = []
filenames = tf.gfile.ListDirectory(FLAGS.model_dir)
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
cur_filename = join(FLAGS.model_dir, ckpt_name)
global_step = int(cur_filename.split("-")[-1])
tf.logging.info("Add {} to eval list.".format(cur_filename))
steps_and_files.append([global_step, cur_filename])
steps_and_files = sorted(steps_and_files, key=lambda x: x[0])
# Decide whether to evaluate all ckpts
if not FLAGS.eval_all_ckpt:
steps_and_files = steps_and_files[-1:]
eval_results = []
for global_step, filename in sorted(steps_and_files, key=lambda x: x[0]):
ret = estimator.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=filename)
ret["step"] = global_step
ret["path"] = filename
eval_results.append(ret)
tf.logging.info("=" * 80)
log_str = "Eval result | "
for key, val in sorted(ret.items(), key=lambda x: x[0]):
log_str += "{} {} | ".format(key, val)
tf.logging.info(log_str)
eval_results.sort(key=lambda x: x["loss"], reverse=True)
tf.logging.info("=" * 80)
log_str = "Best result | "
for key, val in sorted(eval_results[0].items(), key=lambda x: x[0]):
log_str += "{} {} | ".format(key, val)
tf.logging.info(log_str)
if FLAGS.do_predict:
eval_file_base = "{}.len-{}.{}.predict.tf_record".format(
spm_basename, FLAGS.max_seq_length, FLAGS.eval_split)
eval_file = os.path.join(FLAGS.output_dir, eval_file_base)
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenize_fn,
eval_file)
pred_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
predict_results = []
with tf.gfile.Open(os.path.join(predict_dir, "{}.tsv".format(
task_name)), "w") as fout:
fout.write("index\tprediction\n")
for pred_cnt, result in enumerate(estimator.predict(
input_fn=pred_input_fn,
yield_single_examples=True,
checkpoint_path=FLAGS.predict_ckpt)):
if pred_cnt % 1000 == 0:
tf.logging.info("Predicting submission for example: {}".format(
pred_cnt))
pred_ids = [int(x) for x in result["pred_ids"].flat]
input_mask = [int(x) for x in result["input_mask"].flat]
label_out = [label_list[pred_ids[i]] for i in range(len(input_mask)) if input_mask[i] != 1]
predict_results.append(label_out)
fout.write("{}\t{}\n".format(pred_cnt, label_out))
predict_json_path = os.path.join(predict_dir, "{}.logits.json".format(
task_name))
with tf.gfile.Open(predict_json_path, "w") as fp:
json.dump(predict_results, fp, indent=4)
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.gfile.Exists",
"tensorflow.reduce_sum",
"pickle.dump",
"sentencepiece.SentencePieceProcessor",
"tensorflow.logging.info",
"tensorflow.trainable_variables",
"tensorflow.logging.set_verbosity",
"os.path.isfile",
"tensorflow.estimator.Estimator",
"pickle.load",
"xlnet.create_run_config"... | [((665, 794), 'logging.basicConfig', 'logger.basicConfig', ([], {'format': '"""%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s"""', 'level': 'logger.INFO'}), "(format=\n '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n level=logger.INFO)\n", (683, 794), True, 'import logging as logger\n'), ((17022, 17062), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), '(output_file)\n', (17049, 17062), True, 'import tensorflow as tf\n'), ((23976, 24017), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (24000, 24017), True, 'import tensorflow as tf\n'), ((24645, 24668), 'xlnet_config.FLAGS.task_name.lower', 'FLAGS.task_name.lower', ([], {}), '()\n', (24666, 24668), False, 'from xlnet_config import FLAGS\n'), ((24870, 24898), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (24896, 24898), True, 'import sentencepiece as spm\n'), ((25169, 25201), 'model_utils.configure_tpu', 'model_utils.configure_tpu', (['FLAGS'], {}), '(FLAGS)\n', (25194, 25201), False, 'import model_utils\n'), ((25268, 25309), 'os.path.basename', 'os.path.basename', (['FLAGS.spiece_model_file'], {}), '(FLAGS.spiece_model_file)\n', (25284, 25309), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((31301, 31313), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (31311, 31313), True, 'import tensorflow as tf\n'), ((15852, 15886), 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Example ***"""'], {}), "('*** Example ***')\n", (15867, 15886), True, 'import tensorflow as tf\n'), ((15892, 15934), 'tensorflow.logging.info', 'tf.logging.info', (["('guid: %s' % example.guid)"], {}), "('guid: %s' % example.guid)\n", (15907, 15934), True, 'import tensorflow as tf\n'), ((16788, 16816), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['output_file'], {}), '(output_file)\n', (16803, 16816), True, 'import tensorflow as tf\n'), ((17765, 17790), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (17788, 17790), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((18541, 18583), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (18559, 18583), True, 'import tensorflow as tf\n'), ((18606, 18650), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.float32'], {}), '([seq_length], tf.float32)\n', (18624, 18650), True, 'import tensorflow as tf\n'), ((18674, 18716), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (18692, 18716), True, 'import tensorflow as tf\n'), ((18738, 18780), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (18756, 18780), True, 'import tensorflow as tf\n'), ((18808, 18840), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (18826, 18840), True, 'import tensorflow as tf\n'), ((19032, 19081), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), '(record, name_to_features)\n', (19055, 19081), True, 'import tensorflow as tf\n'), ((19722, 19757), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), '(input_file)\n', (19745, 19757), True, 'import tensorflow as tf\n'), ((20942, 20985), 'tensorflow.transpose', 'tf.transpose', (["features['input_ids']", '[1, 0]'], {}), "(features['input_ids'], [1, 0])\n", (20954, 20985), True, 'import tensorflow as tf\n'), ((21000, 21045), 'tensorflow.transpose', 'tf.transpose', (["features['segment_ids']", '[1, 0]'], {}), "(features['segment_ids'], [1, 0])\n", (21012, 21045), True, 'import tensorflow as tf\n'), ((21062, 21106), 'tensorflow.transpose', 'tf.transpose', (["features['input_mask']", '[1, 0]'], {}), "(features['input_mask'], [1, 0])\n", (21074, 21106), True, 'import tensorflow as tf\n'), ((21168, 21220), 'xlnet.XLNetConfig', 'xlnet.XLNetConfig', ([], {'json_path': 'FLAGS.model_config_path'}), '(json_path=FLAGS.model_config_path)\n', (21185, 21220), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((21239, 21288), 'xlnet.create_run_config', 'xlnet.create_run_config', (['is_training', '(True)', 'FLAGS'], {}), '(is_training, True, FLAGS)\n', (21262, 21288), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((21308, 21430), 'xlnet.XLNetModel', 'xlnet.XLNetModel', ([], {'xlnet_config': 'xlnet_config', 'run_config': 'run_config', 'input_ids': 'inp', 'seg_ids': 'seg_id', 'input_mask': 'inp_mask'}), '(xlnet_config=xlnet_config, run_config=run_config,\n input_ids=inp, seg_ids=seg_id, input_mask=inp_mask)\n', (21324, 21430), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((21698, 21738), 'tensorflow.transpose', 'tf.transpose', (['xlnet_model_out', '[1, 0, 2]'], {}), '(xlnet_model_out, [1, 0, 2])\n', (21710, 21738), True, 'import tensorflow as tf\n'), ((21867, 21907), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['used'], {'reduction_indices': '(1)'}), '(used, reduction_indices=1)\n', (21880, 21907), True, 'import tensorflow as tf\n'), ((21990, 22238), 'lstm_crf_layer.BLSTM_CRF', 'BLSTM_CRF', ([], {'embedded_chars': 'embedding', 'hidden_unit': '(10)', 'cell_type': '"""lstm"""', 'num_layers': '(1)', 'dropout_rate': '(0.5)', 'initializers': 'initializers', 'num_labels': 'n_class', 'seq_length': 'max_seq_length', 'labels': 'label_ids', 'lengths': 'lengths', 'is_training': 'is_training'}), "(embedded_chars=embedding, hidden_unit=10, cell_type='lstm',\n num_layers=1, dropout_rate=0.5, initializers=initializers, num_labels=\n n_class, seq_length=max_seq_length, labels=label_ids, lengths=lengths,\n is_training=is_training)\n", (21999, 22238), False, 'from lstm_crf_layer import BLSTM_CRF\n'), ((22684, 22723), 'model_utils.init_from_checkpoint', 'model_utils.init_from_checkpoint', (['FLAGS'], {}), '(FLAGS)\n', (22716, 22723), False, 'import model_utils\n'), ((23646, 23689), 'model_utils.get_train_op', 'model_utils.get_train_op', (['FLAGS', 'total_loss'], {}), '(FLAGS, total_loss)\n', (23670, 23689), False, 'import model_utils\n'), ((23839, 23912), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op'}), '(mode=mode, loss=total_loss, train_op=train_op)\n', (23865, 23912), True, 'import tensorflow as tf\n'), ((24552, 24585), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (24567, 24585), True, 'import tensorflow as tf\n'), ((24592, 24627), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (24609, 24627), True, 'import tensorflow as tf\n'), ((24973, 25059), 'prepro_utils.preprocess_text', 'preprocess_text', (['text'], {'lower': 'FLAGS.uncased', 'remove_space': '(True)', 'keep_accents': '(False)'}), '(text, lower=FLAGS.uncased, remove_space=True, keep_accents=\n False)\n', (24988, 25059), False, 'from prepro_utils import preprocess_text, encode_ids\n'), ((25440, 25666), 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'predict_batch_size': 'FLAGS.predict_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size'}), '(use_tpu=FLAGS.use_tpu, model_fn=model_fn,\n config=run_config, train_batch_size=FLAGS.train_batch_size,\n predict_batch_size=FLAGS.predict_batch_size, eval_batch_size=FLAGS.\n eval_batch_size)\n', (25467, 25666), True, 'import tensorflow as tf\n'), ((25735, 25795), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'model_fn', 'config': 'run_config'}), '(model_fn=model_fn, config=run_config)\n', (25757, 25795), True, 'import tensorflow as tf\n'), ((25961, 26008), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', 'train_file_base'], {}), '(FLAGS.output_dir, train_file_base)\n', (25973, 26008), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((26147, 26180), 'numpy.random.shuffle', 'np.random.shuffle', (['train_examples'], {}), '(train_examples)\n', (26164, 26180), True, 'import numpy as np\n'), ((27690, 27736), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', 'eval_file_base'], {}), '(FLAGS.output_dir, eval_file_base)\n', (27702, 27736), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((28284, 28323), 'tensorflow.gfile.ListDirectory', 'tf.gfile.ListDirectory', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (28306, 28323), True, 'import tensorflow as tf\n'), ((29489, 29514), 'tensorflow.logging.info', 'tf.logging.info', (["('=' * 80)"], {}), "('=' * 80)\n", (29504, 29514), True, 'import tensorflow as tf\n'), ((29672, 29696), 'tensorflow.logging.info', 'tf.logging.info', (['log_str'], {}), '(log_str)\n', (29687, 29696), True, 'import tensorflow as tf\n'), ((29866, 29912), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', 'eval_file_base'], {}), '(FLAGS.output_dir, eval_file_base)\n', (29878, 29912), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((8077, 8123), 'codecs.open', 'codecs.open', (['input_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(input_file, 'r', encoding='utf-8')\n", (8088, 8123), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((10373, 10420), 'os.path.join', 'os.path.join', (['self.output_dir', '"""label_list.pkl"""'], {}), "(self.output_dir, 'label_list.pkl')\n", (10385, 10420), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((11598, 11644), 'codecs.open', 'codecs.open', (['input_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(input_file, 'r', encoding='utf-8')\n", (11609, 11644), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((19847, 19970), 'tensorflow.logging.info', 'tf.logging.info', (['"""Input pipeline id %d out of %d"""', 'input_context.input_pipeline_id', 'input_context.num_replicas_in_sync'], {}), "('Input pipeline id %d out of %d', input_context.\n input_pipeline_id, input_context.num_replicas_in_sync)\n", (19862, 19970), True, 'import tensorflow as tf\n'), ((20896, 20927), 'tensorflow.shape', 'tf.shape', (["features['input_ids']"], {}), "(features['input_ids'])\n", (20904, 20927), True, 'import tensorflow as tf\n'), ((21821, 21850), 'tensorflow.abs', 'tf.abs', (["features['input_ids']"], {}), "(features['input_ids'])\n", (21827, 21850), True, 'import tensorflow as tf\n'), ((23055, 23144), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metric_ops': 'eval_metrics'}), '(mode=mode, loss=total_loss, eval_metric_ops=\n eval_metrics)\n', (23081, 23144), True, 'import tensorflow as tf\n'), ((24218, 24246), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['predict_dir'], {}), '(predict_dir)\n', (24233, 24246), True, 'import tensorflow as tf\n'), ((24255, 24285), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['predict_dir'], {}), '(predict_dir)\n', (24272, 24285), True, 'import tensorflow as tf\n'), ((29214, 29239), 'tensorflow.logging.info', 'tf.logging.info', (["('=' * 80)"], {}), "('=' * 80)\n", (29229, 29239), True, 'import tensorflow as tf\n'), ((29393, 29417), 'tensorflow.logging.info', 'tf.logging.info', (['log_str'], {}), '(log_str)\n', (29408, 29417), True, 'import tensorflow as tf\n'), ((31173, 31210), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['predict_json_path', '"""w"""'], {}), "(predict_json_path, 'w')\n", (31186, 31210), True, 'import tensorflow as tf\n'), ((31225, 31265), 'json.dump', 'json.dump', (['predict_results', 'fp'], {'indent': '(4)'}), '(predict_results, fp, indent=4)\n', (31234, 31265), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((9318, 9353), 'os.path.join', 'os.path.join', (['data_dir', '"""train.txt"""'], {}), "(data_dir, 'train.txt')\n", (9330, 9353), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((9487, 9520), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.txt"""'], {}), "(data_dir, 'dev.txt')\n", (9499, 9520), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((9653, 9687), 'os.path.join', 'os.path.join', (['data_dir', '"""test.txt"""'], {}), "(data_dir, 'test.txt')\n", (9665, 9687), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((10546, 10561), 'pickle.load', 'pickle.load', (['rf'], {}), '(rf)\n', (10557, 10561), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((18199, 18234), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), '(feature=features)\n', (18216, 18234), True, 'import tensorflow as tf\n'), ((19305, 19325), 'tensorflow.cast', 'tf.cast', (['t', 'tf.int32'], {}), '(t, tf.int32)\n', (19312, 19325), True, 'import tensorflow as tf\n'), ((22522, 22538), 'numpy.prod', 'np.prod', (['v.shape'], {}), '(v.shape)\n', (22529, 22538), True, 'import numpy as np\n'), ((23454, 23516), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), '(mode=mode, predictions=predictions)\n', (23480, 23516), True, 'import tensorflow as tf\n'), ((27525, 27546), 'classifier_utils.PaddingInputExample', 'PaddingInputExample', ([], {}), '()\n', (27544, 27546), False, 'from classifier_utils import PaddingInputExample\n'), ((28456, 28488), 'os.path.join', 'join', (['FLAGS.model_dir', 'ckpt_name'], {}), '(FLAGS.model_dir, ckpt_name)\n', (28460, 28488), False, 'from os.path import join\n'), ((9842, 9864), 'os.path.exists', 'os.path.exists', (['labels'], {}), '(labels)\n', (9856, 9864), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((9869, 9891), 'os.path.isfile', 'os.path.isfile', (['labels'], {}), '(labels)\n', (9883, 9891), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((10453, 10500), 'os.path.join', 'os.path.join', (['self.output_dir', '"""label_list.pkl"""'], {}), "(self.output_dir, 'label_list.pkl')\n", (10465, 10500), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((10811, 10839), 'pickle.dump', 'pickle.dump', (['self.labels', 'rf'], {}), '(self.labels, rf)\n', (10822, 10839), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((22548, 22572), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (22570, 22572), True, 'import tensorflow as tf\n'), ((22887, 22956), 'tensorflow.metrics.mean_squared_error', 'tf.metrics.mean_squared_error', ([], {'labels': 'label_ids', 'predictions': 'pred_ids'}), '(labels=label_ids, predictions=pred_ids)\n', (22916, 22956), True, 'import tensorflow as tf\n'), ((9919, 9961), 'codecs.open', 'codecs.open', (['labels', '"""r"""'], {'encoding': '"""utf-8"""'}), "(labels, 'r', encoding='utf-8')\n", (9930, 9961), False, 'import os, collections, json, codecs, pickle, re, xlnet\n'), ((10728, 10775), 'os.path.join', 'os.path.join', (['self.output_dir', '"""label_list.pkl"""'], {}), "(self.output_dir, 'label_list.pkl')\n", (10740, 10775), False, 'import os, collections, json, codecs, pickle, re, xlnet\n')] |
import os
import json
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.nn import functional as F
from .datasets import register_dataset
from .data_utils import truncate_feats
@register_dataset("anet")
class ActivityNetDataset(Dataset):
def __init__(
self,
is_training, # if in training mode
split, # split, a tuple/list allowing concat of subsets
feat_folder, # folder for features
json_file, # json file for annotations
feat_stride, # temporal stride of the feats
num_frames, # number of frames for each feat
default_fps, # default fps
downsample_rate, # downsample rate for feats
max_seq_len, # maximum sequence length during training
trunc_thresh, # threshold for truncate an action segment
crop_ratio, # a tuple (e.g., (0.9, 1.0)) for random cropping
input_dim, # input feat dim
num_classes, # number of action categories
file_prefix, # feature file prefix if any
file_ext, # feature file extension if any
force_upsampling # force to upsample to max_seq_len
):
# file path
assert os.path.exists(feat_folder) and os.path.exists(json_file)
assert isinstance(split, tuple) or isinstance(split, list)
assert crop_ratio == None or len(crop_ratio) == 2
self.feat_folder = feat_folder
self.use_hdf5 = '.hdf5' in feat_folder
if file_prefix is not None:
self.file_prefix = file_prefix
else:
self.file_prefix = ''
self.file_ext = file_ext
self.json_file = json_file
# anet uses fixed length features, make sure there is no downsampling
self.force_upsampling = force_upsampling
# split / training mode
self.split = split
self.is_training = is_training
# features meta info
self.feat_stride = feat_stride
self.num_frames = num_frames
self.input_dim = input_dim
self.default_fps = default_fps
self.downsample_rate = downsample_rate
self.max_seq_len = max_seq_len
self.trunc_thresh = trunc_thresh
self.num_classes = num_classes
self.label_dict = None
self.crop_ratio = crop_ratio
# load database and select the subset
dict_db, label_dict = self._load_json_db(self.json_file)
# proposal vs action categories
assert (num_classes == 1) or (len(label_dict) == num_classes)
self.data_list = dict_db
self.label_dict = label_dict
# dataset specific attributes
self.db_attributes = {
'dataset_name': 'ActivityNet 1.3',
'tiou_thresholds': np.linspace(0.5, 0.95, 10),
'empty_label_ids': []
}
def get_attributes(self):
return self.db_attributes
def _load_json_db(self, json_file):
# load database and select the subset
with open(json_file, 'r') as fid:
json_data = json.load(fid)
json_db = json_data['database']
# if label_dict is not available
if self.label_dict is None:
label_dict = {}
for key, value in json_db.items():
for act in value['annotations']:
label_dict[act['label']] = act['label_id']
# fill in the db (immutable afterwards)
dict_db = tuple()
for key, value in json_db.items():
# skip the video if not in the split
if value['subset'].lower() not in self.split:
continue
# get fps if available
if self.default_fps is not None:
fps = self.default_fps
elif 'fps' in value:
fps = value['fps']
else:
assert False, "Unknown video FPS."
duration = value['duration']
# get annotations if available
if ('annotations' in value) and (len(value['annotations']) > 0):
num_acts = len(value['annotations'])
segments = np.zeros([num_acts, 2], dtype=np.float32)
labels = np.zeros([num_acts, ], dtype=np.int64)
for idx, act in enumerate(value['annotations']):
segments[idx][0] = act['segment'][0]
segments[idx][1] = act['segment'][1]
if self.num_classes == 1:
labels[idx] = 0
else:
labels[idx] = label_dict[act['label']]
else:
segments = None
labels = None
dict_db += ({'id': key,
'fps' : fps,
'duration' : duration,
'segments' : segments,
'labels' : labels
}, )
return dict_db, label_dict
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
# directly return a (truncated) data point (so it is very fast!)
# auto batching will be disabled in the subsequent dataloader
# instead the model will need to decide how to batch / preporcess the data
video_item = self.data_list[idx]
# load features
if self.use_hdf5:
with h5py.File(self.feat_folder, 'r') as h5_fid:
feats = np.asarray(
h5_fid[self.file_prefix + video_item['id']][()],
dtype=np.float32
)
else:
filename = os.path.join(self.feat_folder,
self.file_prefix + video_item['id'] + self.file_ext)
feats = np.load(filename).astype(np.float32)
# we support both fixed length features / variable length features
if self.feat_stride > 0:
# var length features
feat_stride, num_frames = self.feat_stride, self.num_frames
# only apply down sampling here
if self.downsample_rate > 1:
feats = feats[::self.downsample_rate, :]
feat_stride = self.feat_stride * self.downsample_rate
else:
# deal with fixed length feature, recompute feat_stride, num_frames
seq_len = feats.shape[0]
assert seq_len <= self.max_seq_len
if self.force_upsampling:
# reset to max_seq_len
seq_len = self.max_seq_len
feat_stride = video_item['duration'] * video_item['fps'] / seq_len
# center the features
num_frames = feat_stride * self.num_frames
# T x C -> C x T
feats = torch.from_numpy(np.ascontiguousarray(feats.transpose()))
# resize the features if needed
if (self.feat_stride <= 0) and (feats.shape[-1] != self.max_seq_len) and self.force_upsampling:
resize_feats = F.interpolate(
feats.unsqueeze(0),
size=self.max_seq_len,
mode='linear',
align_corners=False
)
feats = resize_feats.squeeze(0)
# convert time stamp (in second) into temporal feature grids
# ok to have small negative values here
if video_item['segments'] is not None:
segments = torch.from_numpy(
(video_item['segments'] * video_item['fps']- 0.5 * num_frames) / feat_stride
)
labels = torch.from_numpy(video_item['labels'])
# for activity net, we have a few videos with a bunch of missing frames
# here is a quick fix for training
if self.is_training:
feat_len = feats.shape[1]
valid_seg_list = []
for seg in segments:
if seg[0] >= feat_len:
# skip an action outside of the feature map
continue
# truncate an action boundary
valid_seg_list.append(seg.clamp(max=feat_len))
segments = torch.stack(valid_seg_list, dim=0)
else:
segments, labels = None, None
# return a data dict
data_dict = {'video_id' : video_item['id'],
'feats' : feats, # C x T
'segments' : segments, # N x 2
'labels' : labels, # N
'fps' : video_item['fps'],
'duration' : video_item['duration'],
'feat_stride' : feat_stride,
'feat_num_frames' : num_frames}
# no truncation is needed
# truncate the features during training
if self.is_training and (segments is not None) and (self.feat_stride > 0):
data_dict = truncate_feats(
data_dict, self.max_seq_len, self.trunc_thresh, self.crop_ratio
)
return data_dict
| [
"h5py.File",
"json.load",
"numpy.load",
"torch.stack",
"numpy.asarray",
"os.path.exists",
"numpy.zeros",
"numpy.linspace",
"os.path.join",
"torch.from_numpy"
] | [((1276, 1303), 'os.path.exists', 'os.path.exists', (['feat_folder'], {}), '(feat_folder)\n', (1290, 1303), False, 'import os\n'), ((1308, 1333), 'os.path.exists', 'os.path.exists', (['json_file'], {}), '(json_file)\n', (1322, 1333), False, 'import os\n'), ((2821, 2847), 'numpy.linspace', 'np.linspace', (['(0.5)', '(0.95)', '(10)'], {}), '(0.5, 0.95, 10)\n', (2832, 2847), True, 'import numpy as np\n'), ((3111, 3125), 'json.load', 'json.load', (['fid'], {}), '(fid)\n', (3120, 3125), False, 'import json\n'), ((5654, 5742), 'os.path.join', 'os.path.join', (['self.feat_folder', "(self.file_prefix + video_item['id'] + self.file_ext)"], {}), "(self.feat_folder, self.file_prefix + video_item['id'] + self.\n file_ext)\n", (5666, 5742), False, 'import os\n'), ((7399, 7498), 'torch.from_numpy', 'torch.from_numpy', (["((video_item['segments'] * video_item['fps'] - 0.5 * num_frames) / feat_stride)"], {}), "((video_item['segments'] * video_item['fps'] - 0.5 *\n num_frames) / feat_stride)\n", (7415, 7498), False, 'import torch\n'), ((7545, 7583), 'torch.from_numpy', 'torch.from_numpy', (["video_item['labels']"], {}), "(video_item['labels'])\n", (7561, 7583), False, 'import torch\n'), ((4180, 4221), 'numpy.zeros', 'np.zeros', (['[num_acts, 2]'], {'dtype': 'np.float32'}), '([num_acts, 2], dtype=np.float32)\n', (4188, 4221), True, 'import numpy as np\n'), ((4247, 4283), 'numpy.zeros', 'np.zeros', (['[num_acts]'], {'dtype': 'np.int64'}), '([num_acts], dtype=np.int64)\n', (4255, 4283), True, 'import numpy as np\n'), ((5413, 5445), 'h5py.File', 'h5py.File', (['self.feat_folder', '"""r"""'], {}), "(self.feat_folder, 'r')\n", (5422, 5445), False, 'import h5py\n'), ((5481, 5558), 'numpy.asarray', 'np.asarray', (["h5_fid[self.file_prefix + video_item['id']][()]"], {'dtype': 'np.float32'}), "(h5_fid[self.file_prefix + video_item['id']][()], dtype=np.float32)\n", (5491, 5558), True, 'import numpy as np\n'), ((8151, 8185), 'torch.stack', 'torch.stack', (['valid_seg_list'], {'dim': '(0)'}), '(valid_seg_list, dim=0)\n', (8162, 8185), False, 'import torch\n'), ((5794, 5811), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (5801, 5811), True, 'import numpy as np\n')] |
# coding: utf-8
import sys
from python_environment_check import check_packages
import networkx as nx
import numpy as np
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
# # Machine Learning with PyTorch and Scikit-Learn
# # -- Code Examples
# ## Package version checks
# Add folder to path in order to load from the check_packages.py script:
sys.path.insert(0, '..')
# Check recommended package versions:
d = {
'torch': '1.8.0',
'networkx': '2.6.2',
'numpy': '1.21.2',
}
check_packages(d)
# # Chapter 18 - Graph Neural Networks for Capturing Dependencies in Graph Structured Data (Part 1/2)
# - [Introduction to graph data](#Introduction-to-graph-data)
# - [Undirected graphs](#Undirected-graphs)
# - [Directed graphs](#Directed-graphs)
# - [Labeled graphs](#Labeled-graphs)
# - [Representing molecules as graphs](#Representing-molecules-as-graphs)
# - [Understanding graph convolutions](#Understanding-graph-convolutions)
# - [The motivation behind using graph convolutions](#The-motivation-behind-using-graph-convolutions)
# - [Implementing a basic graph convolution](#Implementing-a-basic-graph-convolution)
# - [Implementing a GNN in PyTorch from scratch](#Implementing-a-GNN-in-PyTorch-from-scratch)
# - [Defining the NodeNetwork model](#Defining-the-NodeNetwork-model)
# - [Coding the NodeNetwork’s graph convolution layer](#Coding-the-NodeNetworks-graph-convolution-layer)
# - [Adding a global pooling layer to deal with varying graph sizes](#Adding-a-global-pooling-layer-to-deal-with-varying-graph-sizes)
# - [Preparing the DataLoader](#Preparing-the-DataLoader)
# - [Using the NodeNetwork to make predictions](#Using-the-NodeNetwork-to-make-predictions)
# ## Introduction to graph data
# ### Undirected graphs
# ### Directed graphs
# ### Labeled graphs
# ### Representing molecules as graphs
# ### Understanding graph convolutions
# ### The motivation behind using graph convolutions
# ### Implementing a basic graph convolution
G = nx.Graph()
#Hex codes for colors if we draw graph
blue, orange, green = "#1f77b4", "#ff7f0e","#2ca02c"
G.add_nodes_from([(1, {"color": blue}),
(2, {"color": orange}),
(3, {"color": blue}),
(4, {"color": green})])
G.add_edges_from([(1, 2),(2, 3),(1, 3),(3, 4)])
A = np.asarray(nx.adjacency_matrix(G).todense())
print(A)
def build_graph_color_label_representation(G,mapping_dict):
one_hot_idxs = np.array([mapping_dict[v] for v in
nx.get_node_attributes(G, 'color').values()])
one_hot_encoding = np.zeros((one_hot_idxs.size,len(mapping_dict)))
one_hot_encoding[np.arange(one_hot_idxs.size),one_hot_idxs] = 1
return one_hot_encoding
X = build_graph_color_label_representation(G, {green: 0, blue: 1, orange: 2})
print(X)
color_map = nx.get_node_attributes(G, 'color').values()
nx.draw(G, with_labels=True, node_color=color_map)
f_in, f_out = X.shape[1], 6
W_1 = np.random.rand(f_in, f_out)
W_2 = np.random.rand(f_in, f_out)
h = np.dot(X,W_1) + np.dot(np.dot(A, X), W_2)
# ## Implementing a GNN in PyTorch from scratch
# ### Defining the NodeNetwork model
class NodeNetwork(torch.nn.Module):
def __init__(self, input_features):
super().__init__()
self.conv_1 = BasicGraphConvolutionLayer(input_features, 32)
self.conv_2 = BasicGraphConvolutionLayer(32, 32)
self.fc_1 = torch.nn.Linear(32, 16)
self.out_layer = torch.nn.Linear(16, 2)
def forward(self, X, A,batch_mat):
x = self.conv_1(X, A).clamp(0)
x = self.conv_2(x, A).clamp(0)
output = global_sum_pool(x, batch_mat)
output = self.fc_1(output)
output = self.out_layer(output)
return F.softmax(output, dim=1)
# ### Coding the NodeNetwork’s graph convolution layer
class BasicGraphConvolutionLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.W2 = Parameter(torch.rand(
(in_channels, out_channels), dtype=torch.float32))
self.W1 = Parameter(torch.rand(
(in_channels, out_channels), dtype=torch.float32))
self.bias = Parameter(torch.zeros(
out_channels, dtype=torch.float32))
def forward(self, X, A):
potential_msgs = torch.mm(X, self.W2)
propagated_msgs = torch.mm(A, potential_msgs)
root_update = torch.mm(X, self.W1)
output = propagated_msgs + root_update + self.bias
return output
# ### Adding a global pooling layer to deal with varying graph sizes
def global_sum_pool(X, batch_mat):
if batch_mat is None or batch_mat.dim() == 1:
return torch.sum(X, dim=0).unsqueeze(0)
else:
return torch.mm(batch_mat, X)
def get_batch_tensor(graph_sizes):
starts = [sum(graph_sizes[:idx]) for idx in range(len(graph_sizes))]
stops = [starts[idx]+graph_sizes[idx] for idx in range(len(graph_sizes))]
tot_len = sum(graph_sizes)
batch_size = len(graph_sizes)
batch_mat = torch.zeros([batch_size, tot_len]).float()
for idx, starts_and_stops in enumerate(zip(starts, stops)):
start = starts_and_stops[0]
stop = starts_and_stops[1]
batch_mat[idx, start:stop] = 1
return batch_mat
def collate_graphs(batch):
adj_mats = [graph['A'] for graph in batch]
sizes = [A.size(0) for A in adj_mats]
tot_size = sum(sizes)
# create batch matrix
batch_mat = get_batch_tensor(sizes)
# combine feature matrices
feat_mats = torch.cat([graph['X'] for graph in batch],dim=0)
# combine labels
labels = torch.cat([graph['y'] for graph in batch], dim=0)
# combine adjacency matrices
batch_adj = torch.zeros([tot_size, tot_size], dtype=torch.float32)
accum = 0
for adj in adj_mats:
g_size = adj.shape[0]
batch_adj[accum:accum+g_size, accum:accum+g_size] = adj
accum = accum + g_size
repr_and_label = {
'A': batch_adj,
'X': feat_mats,
'y': labels,
'batch' : batch_mat}
return repr_and_label
# ### Preparing the DataLoader
def get_graph_dict(G, mapping_dict):
# build dictionary representation of graph G
A = torch.from_numpy(np.asarray(nx.adjacency_matrix(G).todense())).float()
# build_graph_color_label_representation() was introduced with the first example graph
X = torch.from_numpy(build_graph_color_label_representation(G,mapping_dict)).float()
# kludge since there is not specific task for this example
y = torch.tensor([[1, 0]]).float()
return {'A': A, 'X': X, 'y': y, 'batch': None}
# building 4 graphs to treat as a dataset
blue, orange, green = "#1f77b4", "#ff7f0e","#2ca02c"
mapping_dict = {green: 0, blue: 1, orange: 2}
G1 = nx.Graph()
G1.add_nodes_from([(1, {"color": blue}),
(2, {"color": orange}),
(3, {"color": blue}),
(4, {"color": green})])
G1.add_edges_from([(1, 2), (2, 3),(1, 3), (3, 4)])
G2 = nx.Graph()
G2.add_nodes_from([(1, {"color": green}),
(2, {"color": green}),
(3, {"color": orange}),
(4, {"color": orange}),
(5,{"color": blue})])
G2.add_edges_from([(2, 3),(3, 4),(3, 1),(5, 1)])
G3 = nx.Graph()
G3.add_nodes_from([(1, {"color": orange}),
(2, {"color": orange}),
(3, {"color": green}),
(4, {"color": green}),
(5, {"color": blue}),
(6, {"color":orange})])
G3.add_edges_from([(2, 3), (3, 4), (3, 1), (5, 1), (2, 5), (6, 1)])
G4 = nx.Graph()
G4.add_nodes_from([(1, {"color": blue}), (2, {"color": blue}), (3, {"color": green})])
G4.add_edges_from([(1, 2), (2, 3)])
graph_list = [get_graph_dict(graph,mapping_dict) for graph in [G1, G2, G3, G4]]
class ExampleDataset(Dataset):
# Simple PyTorch dataset that will use our list of graphs
def __init__(self, graph_list):
self.graphs = graph_list
def __len__(self):
return len(self.graphs)
def __getitem__(self,idx):
mol_rep = self.graphs[idx]
return mol_rep
dset = ExampleDataset(graph_list)
# Note how we use our custom collate function
loader = DataLoader(dset, batch_size=2, shuffle=False, collate_fn=collate_graphs)
# ### Using the NodeNetwork to make predictions
torch.manual_seed(123)
node_features = 3
net = NodeNetwork(node_features)
batch_results = []
for b in loader:
batch_results.append(net(b['X'], b['A'], b['batch']).detach())
G1_rep = dset[1]
G1_single = net(G1_rep['X'], G1_rep['A'], G1_rep['batch']).detach()
G1_batch = batch_results[0][1]
torch.all(torch.isclose(G1_single, G1_batch))
# ---
#
# Readers may ignore the next cell.
| [
"torch.cat",
"torch.mm",
"numpy.arange",
"networkx.adjacency_matrix",
"python_environment_check.check_packages",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.zeros",
"torch.manual_seed",
"networkx.draw",
"torch.rand",
"networkx.get_node_attributes",
"numpy.dot",
"torch.sum",
... | [((466, 490), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (481, 490), False, 'import sys\n'), ((615, 632), 'python_environment_check.check_packages', 'check_packages', (['d'], {}), '(d)\n', (629, 632), False, 'from python_environment_check import check_packages\n'), ((2155, 2165), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2163, 2165), True, 'import networkx as nx\n'), ((3040, 3090), 'networkx.draw', 'nx.draw', (['G'], {'with_labels': '(True)', 'node_color': 'color_map'}), '(G, with_labels=True, node_color=color_map)\n', (3047, 3090), True, 'import networkx as nx\n'), ((3133, 3160), 'numpy.random.rand', 'np.random.rand', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (3147, 3160), True, 'import numpy as np\n'), ((3168, 3195), 'numpy.random.rand', 'np.random.rand', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (3182, 3195), True, 'import numpy as np\n'), ((7092, 7102), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (7100, 7102), True, 'import networkx as nx\n'), ((7327, 7337), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (7335, 7337), True, 'import networkx as nx\n'), ((7603, 7613), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (7611, 7613), True, 'import networkx as nx\n'), ((7941, 7951), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (7949, 7951), True, 'import networkx as nx\n'), ((8583, 8655), 'torch.utils.data.DataLoader', 'DataLoader', (['dset'], {'batch_size': '(2)', 'shuffle': '(False)', 'collate_fn': 'collate_graphs'}), '(dset, batch_size=2, shuffle=False, collate_fn=collate_graphs)\n', (8593, 8655), False, 'from torch.utils.data import DataLoader\n'), ((8709, 8731), 'torch.manual_seed', 'torch.manual_seed', (['(123)'], {}), '(123)\n', (8726, 8731), False, 'import torch\n'), ((3201, 3215), 'numpy.dot', 'np.dot', (['X', 'W_1'], {}), '(X, W_1)\n', (3207, 3215), True, 'import numpy as np\n'), ((5839, 5888), 'torch.cat', 'torch.cat', (["[graph['X'] for graph in batch]"], {'dim': '(0)'}), "([graph['X'] for graph in batch], dim=0)\n", (5848, 5888), False, 'import torch\n'), ((5922, 5971), 'torch.cat', 'torch.cat', (["[graph['y'] for graph in batch]"], {'dim': '(0)'}), "([graph['y'] for graph in batch], dim=0)\n", (5931, 5971), False, 'import torch\n'), ((6021, 6075), 'torch.zeros', 'torch.zeros', (['[tot_size, tot_size]'], {'dtype': 'torch.float32'}), '([tot_size, tot_size], dtype=torch.float32)\n', (6032, 6075), False, 'import torch\n'), ((9019, 9053), 'torch.isclose', 'torch.isclose', (['G1_single', 'G1_batch'], {}), '(G1_single, G1_batch)\n', (9032, 9053), False, 'import torch\n'), ((2996, 3030), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""color"""'], {}), "(G, 'color')\n", (3018, 3030), True, 'import networkx as nx\n'), ((3224, 3236), 'numpy.dot', 'np.dot', (['A', 'X'], {}), '(A, X)\n', (3230, 3236), True, 'import numpy as np\n'), ((3601, 3624), 'torch.nn.Linear', 'torch.nn.Linear', (['(32)', '(16)'], {}), '(32, 16)\n', (3616, 3624), False, 'import torch\n'), ((3650, 3672), 'torch.nn.Linear', 'torch.nn.Linear', (['(16)', '(2)'], {}), '(16, 2)\n', (3665, 3672), False, 'import torch\n'), ((3932, 3956), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (3941, 3956), True, 'import torch.nn.functional as F\n'), ((4613, 4633), 'torch.mm', 'torch.mm', (['X', 'self.W2'], {}), '(X, self.W2)\n', (4621, 4633), False, 'import torch\n'), ((4660, 4687), 'torch.mm', 'torch.mm', (['A', 'potential_msgs'], {}), '(A, potential_msgs)\n', (4668, 4687), False, 'import torch\n'), ((4710, 4730), 'torch.mm', 'torch.mm', (['X', 'self.W1'], {}), '(X, self.W1)\n', (4718, 4730), False, 'import torch\n'), ((5044, 5066), 'torch.mm', 'torch.mm', (['batch_mat', 'X'], {}), '(batch_mat, X)\n', (5052, 5066), False, 'import torch\n'), ((2488, 2510), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['G'], {}), '(G)\n', (2507, 2510), True, 'import networkx as nx\n'), ((2817, 2845), 'numpy.arange', 'np.arange', (['one_hot_idxs.size'], {}), '(one_hot_idxs.size)\n', (2826, 2845), True, 'import numpy as np\n'), ((4268, 4328), 'torch.rand', 'torch.rand', (['(in_channels, out_channels)'], {'dtype': 'torch.float32'}), '((in_channels, out_channels), dtype=torch.float32)\n', (4278, 4328), False, 'import torch\n'), ((4372, 4432), 'torch.rand', 'torch.rand', (['(in_channels, out_channels)'], {'dtype': 'torch.float32'}), '((in_channels, out_channels), dtype=torch.float32)\n', (4382, 4432), False, 'import torch\n'), ((4488, 4534), 'torch.zeros', 'torch.zeros', (['out_channels'], {'dtype': 'torch.float32'}), '(out_channels, dtype=torch.float32)\n', (4499, 4534), False, 'import torch\n'), ((5342, 5376), 'torch.zeros', 'torch.zeros', (['[batch_size, tot_len]'], {}), '([batch_size, tot_len])\n', (5353, 5376), False, 'import torch\n'), ((6857, 6879), 'torch.tensor', 'torch.tensor', (['[[1, 0]]'], {}), '([[1, 0]])\n', (6869, 6879), False, 'import torch\n'), ((4986, 5005), 'torch.sum', 'torch.sum', (['X'], {'dim': '(0)'}), '(X, dim=0)\n', (4995, 5005), False, 'import torch\n'), ((2679, 2713), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""color"""'], {}), "(G, 'color')\n", (2701, 2713), True, 'import networkx as nx\n'), ((6563, 6585), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['G'], {}), '(G)\n', (6582, 6585), True, 'import networkx as nx\n')] |
# coding=utf-8
# 导入自己的函数包d2lzh_pytorch,注意要先将目标包的父路径添加到系统路径中
import sys
sys.path.append(r".")
from d2lzh_pytorch import train, plot
import numpy as np
import torch
import math
"""
这一节重新开始详细介绍和实验梯度下降相关的算法
"""
# 写一个一维的梯度下降函数进行测试,这里假定目标函数是x**2,因此导数是2*x
# 这里的eta是一个比较小的值,也就是学习率,代表了往梯度方向移动的步伐大小
def gd(eta):
# 设置初始值
x = 10
results = [x]
for i in range(10):
# 梯度下降,
x -= eta*2*x
# 将下降过程中的x值记录下来
results.append(x)
print('epoch 10, x:', x)
return results
res = gd(0.2)
# 绘制出x的下降轨迹
def show_trace(res):
# 设置绘制框的大小
n = max(abs(min(res)), abs(max(res)), 10)
# 画线时的尺度
f_line = np.arange(-n, n, 0.1)
plot.set_figsize()
# 绘制x**2的线
plot.plt.plot(f_line, [x * x for x in f_line])
# 绘制结果线
plot.plt.plot(res, [x * x for x in res], '-o')
plot.plt.xlabel('x')
plot.plt.ylabel('f(x)')
plot.plt.show()
show_trace(res)
print('————————————————————————————')
# 控制梯度下降速率的eta就是学习率,一般需要开始时由人工设定,不同的学习率有不同效果
# 太小的学习率会下降得很慢
show_trace(gd(0.05))
# 太大的学习率会震荡
show_trace(gd(1.1))
print('————————————————————————————')
# 而对于梯度下降的多维模式,需要用到偏导数来计算
# 由于直接求偏导就是往各个轴向的变化率,用方向导数来指定所有可能方向上的变换率
# 由于导数*单位方向u,就是导数的模*梯度与方向的夹角的cos,因此cos取-1最好
# 最终下降公式也是x<-x-eta*delta(f(x)),这里二维梯度下降和下降过程的绘制函数都保存了
# 二维梯度下降train_2d(trainer),绘制下降曲线show_trace_2d(f, results)
eta = 0.1
def f_2d(x1, x2): # ⽬标函数
return x1 ** 2 + 2 * x2 ** 2
def gd_2d(x1, x2, s1, s2):
return (x1 - eta * 2 * x1, x2 - eta * 4 * x2, 0, 0)
plot.show_trace_2d(f_2d, train.train_2d(gd_2d))
print('————————————————————————————')
# 随机梯度下降的测试,由于随机梯度下降一般采样一个局部来计算梯度估计总体,因此梯度不准
# 这里用均值为0的正态分布随机噪声来模拟随机梯度下降的错误梯度
# 尽管路线会出现抖动,但是仍然能到达目标,而且迭代速度快了很多
def sgd_2d(x1, x2, s1, s2):
return (x1 - eta * (2 * x1 + np.random.normal(0.1)), x2 -
eta * (4 * x2 + np.random.normal(0.1)), 0, 0)
plot.show_trace_2d(f_2d, train.train_2d(sgd_2d))
print('————————————————————————————')
| [
"sys.path.append",
"d2lzh_pytorch.plot.set_figsize",
"d2lzh_pytorch.plot.plt.show",
"d2lzh_pytorch.train.train_2d",
"d2lzh_pytorch.plot.plt.plot",
"d2lzh_pytorch.plot.plt.ylabel",
"numpy.arange",
"numpy.random.normal",
"d2lzh_pytorch.plot.plt.xlabel"
] | [((72, 92), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (87, 92), False, 'import sys\n'), ((645, 666), 'numpy.arange', 'np.arange', (['(-n)', 'n', '(0.1)'], {}), '(-n, n, 0.1)\n', (654, 666), True, 'import numpy as np\n'), ((671, 689), 'd2lzh_pytorch.plot.set_figsize', 'plot.set_figsize', ([], {}), '()\n', (687, 689), False, 'from d2lzh_pytorch import train, plot\n'), ((709, 757), 'd2lzh_pytorch.plot.plt.plot', 'plot.plt.plot', (['f_line', '[(x * x) for x in f_line]'], {}), '(f_line, [(x * x) for x in f_line])\n', (722, 757), False, 'from d2lzh_pytorch import train, plot\n'), ((772, 820), 'd2lzh_pytorch.plot.plt.plot', 'plot.plt.plot', (['res', '[(x * x) for x in res]', '"""-o"""'], {}), "(res, [(x * x) for x in res], '-o')\n", (785, 820), False, 'from d2lzh_pytorch import train, plot\n'), ((823, 843), 'd2lzh_pytorch.plot.plt.xlabel', 'plot.plt.xlabel', (['"""x"""'], {}), "('x')\n", (838, 843), False, 'from d2lzh_pytorch import train, plot\n'), ((848, 871), 'd2lzh_pytorch.plot.plt.ylabel', 'plot.plt.ylabel', (['"""f(x)"""'], {}), "('f(x)')\n", (863, 871), False, 'from d2lzh_pytorch import train, plot\n'), ((876, 891), 'd2lzh_pytorch.plot.plt.show', 'plot.plt.show', ([], {}), '()\n', (889, 891), False, 'from d2lzh_pytorch import train, plot\n'), ((1507, 1528), 'd2lzh_pytorch.train.train_2d', 'train.train_2d', (['gd_2d'], {}), '(gd_2d)\n', (1521, 1528), False, 'from d2lzh_pytorch import train, plot\n'), ((1857, 1879), 'd2lzh_pytorch.train.train_2d', 'train.train_2d', (['sgd_2d'], {}), '(sgd_2d)\n', (1871, 1879), False, 'from d2lzh_pytorch import train, plot\n'), ((1743, 1764), 'numpy.random.normal', 'np.random.normal', (['(0.1)'], {}), '(0.1)\n', (1759, 1764), True, 'import numpy as np\n'), ((1800, 1821), 'numpy.random.normal', 'np.random.normal', (['(0.1)'], {}), '(0.1)\n', (1816, 1821), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" For each measurement configuration, the sensitivity distribution and the
center of mass of its values is computed.
Then for all measurements sensitivities and centers of mass are plotted in the
grid. This might give a better overview on the sensitivities of our measurement
configuration.
Different weights for the sensitivities can be used (--weight):
- 0: unweighted,
- 1: abs,
- 2: log10,
- 3: square,
by invoking the command line options.
Use sens_center_plot.py -h for help or take a look at the tests provided in
TESTS/sens_center_plot.
Examples
--------
Plot center plot, and single measurement sensitivities: ::
sens_center_plot.py --elem elem.dat --elec elec.dat --config config.dat -c
Disable plots: ::
sens_center_plot.py --no_plot
Use alternative weighting functions:
sens_center_plot.py --weight 0
sens_center_plot.py --weight 1
sens_center_plot.py --weight 2
sens_center_plot.py --weight 3
"""
import crtomo.mpl
plt, mpl = crtomo.mpl.setup()
from optparse import OptionParser
import numpy as np
import shutil
import crtomo.grid as CRGrid
import crtomo.cfg as CRcfg
# import crlab_py.elem as elem
# import crlab_py.CRMod as CRMod
def handle_cmd_options():
parser = OptionParser()
parser.add_option(
"-e", "--elem",
dest="elem_file",
type="string",
help="elem.dat file (default: elem.dat)",
default="elem.dat"
)
parser.add_option(
"-t", "--elec",
dest="elec_file",
type="string",
help="elec.dat file (default: elec.dat)",
default="elec.dat"
)
parser.add_option(
"--config", dest="config_file",
type="string",
help="config.dat file (default: config.dat)",
default="config.dat"
)
parser.add_option(
"-i", "--use_first_line",
action="store_true",
dest="use_first_line",
default=False,
help="Normally the first line of the config file is " +
"ignored, but if set to True, it will be used. " +
"Default: False"
)
parser.add_option(
'-s', "--sink",
dest="sink",
type="int",
help="Fictitious sink node nr, implies 2D mode",
default=None
)
parser.add_option(
"--data", dest="data_file",
type="string",
help="Data file (default: volt.dat)",
default='volt.dat'
)
parser.add_option(
"-f", "--frequency",
dest="frequency",
type="int",
help="Frequency/Column in volt.dat, starting from 0 " +
"(default: 2)",
default=2
)
parser.add_option(
"-o", "--output",
dest="output_file",
type="string",
help="Output file (plot) (default: sens_center.png)",
default='sens_center.png'
)
parser.add_option(
"--cblabel", dest="cblabel",
type="string",
help=r"ColorbarLabel (default: $Data$)",
default=r'$Data$'
)
parser.add_option(
"--label",
dest="label",
type="string",
help=r"Label (default: none)",
default=r'$ $'
)
parser.add_option(
"-w", "--weight",
dest="weight_int",
type="int",
help="Choose the weights used : 0 - unweighted, 1 - " +
"abs, 2 -log10, 3 - sqrt",
default=0
)
parser.add_option(
"-c", "--plot_configurations",
action="store_true",
dest="plot_configurations",
default=False,
help="Plots every configuration sensitivity center in " +
"a single file. Default: False"
)
parser.add_option(
"--no_plot",
action="store_true",
dest="no_plot",
default=False,
help="Do not create center plot (only text output)"
)
(options, args) = parser.parse_args()
return options
class sens_center:
def __init__(self, elem_file, elec_file, options, weight):
self.options = options
self.elem_file = elem_file
self.elec_file = elec_file
self.weight = weight
self.cblabel = None
self.output_file = None
self.grid = CRGrid.crt_grid(elem_file, elec_file)
def plot_single_configuration(self, config_nr, sens_file):
"""
plot sensitivity distribution with center of mass for
a single configuration. The electrodes used are colored.
Parameters
----------
config_nr: int
number of configuration
sens_file: string, file path
filename to sensitvity file
"""
indices = elem.load_column_file_to_elements_advanced(
sens_file, [2, 3],
False,
False
)
elem.plt_opt.title = ''
elem.plt_opt.reverse = True
elem.plt_opt.cbmin = -1
elem.plt_opt.cbmax = 1
elem.plt_opt.cblabel = r'fill'
elem.plt_opt.xlabel = 'x (m)'
elem.plt_opt.ylabel = 'z (m)'
fig = plt.figure(figsize=(5, 7))
ax = fig.add_subplot(111)
ax, pm, cb = elem.plot_element_data_to_ax(
indices[0],
ax,
scale='asinh',
no_cb=False,
)
ax.scatter(
self.sens_centers[config_nr, 0],
self.sens_centers[config_nr, 1],
marker='*',
s=50,
color='w',
edgecolors='w',
)
self.color_electrodes(config_nr, ax)
# Output
sensf = sens_file.split('sens')[-1]
sensf = sensf.split('.')[0]
out = 'sens_center_' + sensf + '.png'
fig.savefig(out, bbox_inches='tight', dpi=300)
fig.clf()
plt.close(fig)
def plot_sens_center(self, frequency=2):
"""
plot sensitivity center distribution for all configurations in
config.dat. The centers of mass are colored by the data given in
volt_file.
"""
try:
colors = np.loadtxt(self.volt_file, skiprows=1)
except IOError:
print('IOError opening {0}'.format(volt_file))
exit()
# check for 1-dimensionality
if(len(colors.shape) > 1):
print('Artificial or Multi frequency data')
colors = colors[:, frequency].flatten()
colors = colors[~np.isnan(colors)]
elem.load_elem_file(self.elem_file)
elem.load_elec_file(self.elec_file)
nr_elements = len(elem.element_type_list[0])
elem.element_data = np.zeros((nr_elements, 1)) * np.nan
elem.plt_opt.title = ' '
elem.plt_opt.reverse = True
elem.plt_opt.cbmin = -1
elem.plt_opt.cbmax = 1
elem.plt_opt.cblabel = self.cblabel
elem.plt_opt.xlabel = 'x (m)'
elem.plt_opt.ylabel = 'z (m)'
fig = plt.figure(figsize=(5, 7))
ax = fig.add_subplot(111)
ax, pm, cb = elem.plot_element_data_to_ax(0, ax, scale='linear',
no_cb=True)
ax.scatter(self.sens_centers[:, 0], self.sens_centers[:, 1], c=colors,
s=100, edgecolors='none')
cb_pos = mpl_get_cb_bound_next_to_plot(ax)
ax1 = fig.add_axes(cb_pos, frame_on=True)
cmap = mpl.cm.jet_r
norm = mpl.colors.Normalize(vmin=np.nanmin(colors),
vmax=np.nanmax(colors))
mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm,
orientation='vertical')
fig.savefig(self.output_file, bbox_inches='tight', dpi=300)
def color_electrodes(self, config_nr, ax):
"""
Color the electrodes used in specific configuration.
Voltage electrodes are yellow, Current electrodes are red ?!
"""
electrodes = np.loadtxt(options.config_file, skiprows=1)
electrodes = self.configs[~np.isnan(self.configs).any(1)]
electrodes = electrodes.astype(int)
conf = []
for dim in range(0, electrodes.shape[1]):
c = electrodes[config_nr, dim]
# c = c.partition('0')
a = np.round(c / 10000) - 1
b = np.mod(c, 10000) - 1
conf.append(a)
conf.append(b)
Ex, Ez = elem.get_electrodes()
color = ['#ffed00', '#ffed00', '#ff0000', '#ff0000']
ax.scatter(Ex[conf], Ez[conf], c=color, marker='s', s=60,
clip_on=False, edgecolors='k')
def compute_sens(self, elem_file, elec_file, configs):
"""
Compute the sensitivities for the given input data.
A CRMod instance is called to create the sensitivity files.
"""
CRMod_config = CRMod.config()
# activate 2D mode and set sink nr
if self.options.sink is not None:
print('2D mode with sink {0}'.format(self.options.sink))
CRMod_config['2D'] = 0
CRMod_config['fictitious_sink'] = 'T'
CRMod_config['sink_node'] = self.options.sink
CRMod_config['write_sens'] = 'T'
CRMod_instance = CRMod.CRMod(CRMod_config)
CRMod_instance.elemfile = elem_file
CRMod_instance.elecfile = elec_file
CRMod_instance.configdata = configs
resistivity = 100
# get number of elements
fid = open(elem_file, 'r')
fid.readline()
elements = int(fid.readline().strip().split()[1])
fid.close()
# create rho.dat file
rhodata = '{0}\n'.format(elements)
for i in range(0, elements):
rhodata += '{0} 0\n'.format(resistivity)
CRMod_instance.rhodata = rhodata
CRMod_instance.run_in_tempdir()
volt_file = CRMod_instance.volt_file
sens_files = CRMod_instance.sens_files
return sens_files, volt_file, CRMod_instance.temp_dir
def compute_center_of_mass(self, filename):
"""
Center of mass is computed using the sensitivity data output from CRMod
Data weights can be applied using command line options
"""
sens = np.loadtxt(filename, skiprows=1)
X = sens[:, 0]
Z = sens[:, 1]
# C = (np.abs(sens[:,2]))# ./ np.max(np.abs(sens[:,2]))
C = sens[:, 2]
x_center = 0
z_center = 0
sens_sum = 0
for i in range(0, C.shape[0]):
# unweighted
if(self.weight == 0):
weight = (C[i])
# abs
if(self.weight == 1):
weight = np.abs(C[i])
# log10
if(self.weight == 2):
weight = np.log10(np.abs(C[i]))
# sqrt
if(self.weight == 3):
weight = np.sqrt(np.abs(C[i]))
x_center += (X[i] * weight)
z_center += (Z[i] * weight)
sens_sum += weight
x_center /= sens_sum
z_center /= sens_sum
return (x_center, z_center)
def get_configs(self, filename, use_first_line):
# 1. compute sensitivities of config file
if(options.use_first_line):
skiprows = 0
else:
skiprows = 1
# 2. load configuration file
configs = np.loadtxt(options.config_file, skiprows=skiprows)
configs = configs[~np.isnan(configs).any(1)] # remove nans
self.configs = configs
def get_sens_centers(self, sens_files):
center = []
for sens_f in sens_files:
c = self.compute_center_of_mass(sens_f)
center.append(c)
center = np.array(center)
self.sens_centers = center
def compute_sensitivity_centers(self):
# compute the sensitivity centers (CRMod is called!) and store
# them in sens_centers. Save to 'center.dat'
sens_files, volt_file, temp_dir = self.compute_sens(self.elem_file,
self.elec_file,
self.configs)
self.sens_files = sens_files
self.temp_dir = temp_dir
self.volt_file = volt_file
self.get_sens_centers(sens_files)
def plot_sensitivities_to_file(self):
for k, sens_f in enumerate(self.sens_files):
print('Plotting' + sens_f)
self.plot_single_configuration(k, sens_f)
def remove_tmp_dir(self, directory):
"""
Remove the directory if it is located in /tmp
"""
if(not directory.startswith('/tmp/')):
print('Directory not in /tmp')
exit()
print('Deleting directory: ' + directory)
shutil.rmtree(directory)
def clean(self):
self.remove_tmp_dir(self.temp_dir)
def main():
options = handle_cmd_options()
center_obj = sens_center(
options.elem_file,
options.elec_file,
options,
weight=options.weight_int,
)
center_obj.cblabel = options.cblabel
center_obj.output_file = options.output_file
center_obj.get_configs(options.config_file, options.use_first_line)
center_obj.compute_sensitivity_centers()
np.savetxt('center.dat', center_obj.sens_centers)
if not options.no_plot:
print('Creating center plot')
center_obj.plot_sens_center(options.frequency)
if(options.plot_configurations):
print('Plotting single configuration sensitivities')
center_obj.plot_sensitivities_to_file()
center_obj.clean()
if __name__ == '__main__':
main()
| [
"numpy.abs",
"optparse.OptionParser",
"numpy.savetxt",
"numpy.zeros",
"numpy.isnan",
"numpy.nanmin",
"numpy.mod",
"crtomo.grid.crt_grid",
"numpy.array",
"numpy.loadtxt",
"shutil.rmtree",
"numpy.round",
"numpy.nanmax"
] | [((1286, 1300), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (1298, 1300), False, 'from optparse import OptionParser\n'), ((13135, 13184), 'numpy.savetxt', 'np.savetxt', (['"""center.dat"""', 'center_obj.sens_centers'], {}), "('center.dat', center_obj.sens_centers)\n", (13145, 13184), True, 'import numpy as np\n'), ((4218, 4255), 'crtomo.grid.crt_grid', 'CRGrid.crt_grid', (['elem_file', 'elec_file'], {}), '(elem_file, elec_file)\n', (4233, 4255), True, 'import crtomo.grid as CRGrid\n'), ((7851, 7894), 'numpy.loadtxt', 'np.loadtxt', (['options.config_file'], {'skiprows': '(1)'}), '(options.config_file, skiprows=1)\n', (7861, 7894), True, 'import numpy as np\n'), ((10100, 10132), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'skiprows': '(1)'}), '(filename, skiprows=1)\n', (10110, 10132), True, 'import numpy as np\n'), ((11221, 11271), 'numpy.loadtxt', 'np.loadtxt', (['options.config_file'], {'skiprows': 'skiprows'}), '(options.config_file, skiprows=skiprows)\n', (11231, 11271), True, 'import numpy as np\n'), ((11569, 11585), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (11577, 11585), True, 'import numpy as np\n'), ((12641, 12665), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {}), '(directory)\n', (12654, 12665), False, 'import shutil\n'), ((6030, 6068), 'numpy.loadtxt', 'np.loadtxt', (['self.volt_file'], {'skiprows': '(1)'}), '(self.volt_file, skiprows=1)\n', (6040, 6068), True, 'import numpy as np\n'), ((6566, 6592), 'numpy.zeros', 'np.zeros', (['(nr_elements, 1)'], {}), '((nr_elements, 1))\n', (6574, 6592), True, 'import numpy as np\n'), ((6378, 6394), 'numpy.isnan', 'np.isnan', (['colors'], {}), '(colors)\n', (6386, 6394), True, 'import numpy as np\n'), ((7361, 7378), 'numpy.nanmin', 'np.nanmin', (['colors'], {}), '(colors)\n', (7370, 7378), True, 'import numpy as np\n'), ((7421, 7438), 'numpy.nanmax', 'np.nanmax', (['colors'], {}), '(colors)\n', (7430, 7438), True, 'import numpy as np\n'), ((8168, 8187), 'numpy.round', 'np.round', (['(c / 10000)'], {}), '(c / 10000)\n', (8176, 8187), True, 'import numpy as np\n'), ((8208, 8224), 'numpy.mod', 'np.mod', (['c', '(10000)'], {}), '(c, 10000)\n', (8214, 8224), True, 'import numpy as np\n'), ((10539, 10551), 'numpy.abs', 'np.abs', (['C[i]'], {}), '(C[i])\n', (10545, 10551), True, 'import numpy as np\n'), ((10640, 10652), 'numpy.abs', 'np.abs', (['C[i]'], {}), '(C[i])\n', (10646, 10652), True, 'import numpy as np\n'), ((10740, 10752), 'numpy.abs', 'np.abs', (['C[i]'], {}), '(C[i])\n', (10746, 10752), True, 'import numpy as np\n'), ((7930, 7952), 'numpy.isnan', 'np.isnan', (['self.configs'], {}), '(self.configs)\n', (7938, 7952), True, 'import numpy as np\n'), ((11299, 11316), 'numpy.isnan', 'np.isnan', (['configs'], {}), '(configs)\n', (11307, 11316), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
from gazebo_msgs.srv import GetModelState, ApplyBodyWrenchRequest, ApplyBodyWrench, ApplyBodyWrenchResponse
from sub8_gazebo.srv import SetTurbulence
from mil_ros_tools import msg_helpers
import numpy as np
class Turbulizor():
def __init__(self, mag, freq):
rospy.wait_for_service('/gazebo/apply_body_wrench')
self.set_wrench = rospy.ServiceProxy('/gazebo/apply_body_wrench', ApplyBodyWrench)
self.get_model = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.turbulence_mag = mag
self.turbulence_freq = freq
self.reset_srv = rospy.Service('gazebo/set_turbulence', SetTurbulence, self.set_turbulence)
# Wait for all the models and such to spawn.
rospy.sleep(3)
rospy.loginfo("Starting Turbulence.")
self.turbuloop()
def set_turbulence(self, srv):
self.turbulence_mag = srv.magnitude
self.turbulence_freq = srv.frequency
return ApplyBodyWrenchResponse()
def turbuloop(self):
'''
The idea is to create a smooth application of force to emulate underwater motion.
The Turbuloop applies a wrench with a magnitude that varies like a squared function with zeros on both sides
so that there are no sudden changes in the force.
'''
model_name = 'sub8::base_link'
# Used to gently apply a force on the sub, time_step times per 1 / freq
time_step = 5.0
while not rospy.is_shutdown():
turbulence_mag_step = self.turbulence_mag / time_step
sleep_step = 1 / (self.turbulence_freq * time_step)
# Random unit vector scaled by the desired magnitude
f = np.random.uniform(-1, 1, size=3) * self.turbulence_mag
r = np.random.uniform(-1, 1, size=3)
r[:2] = 0 # C3 doesn't like variation in x or y rotation :(
for i in range(int(time_step)):
# Square function: -(x - a/2)^2 + (a/2)^2
mag_multiplier = -((i - time_step / 2) ** 2 - (time_step / 2) ** 2 - 1) * turbulence_mag_step
# Create service call
body_wrench = ApplyBodyWrenchRequest()
body_wrench.body_name = model_name
body_wrench.reference_frame = model_name
body_wrench.wrench = msg_helpers.make_wrench_stamped(f * mag_multiplier, r * mag_multiplier).wrench
body_wrench.start_time = rospy.Time()
body_wrench.duration = rospy.Duration(sleep_step)
# rospy.loginfo("{}: Wrench Applied: {}".format(i, body_wrench.wrench))
self.set_wrench(body_wrench)
rospy.sleep(sleep_step)
if __name__ == '__main__':
rospy.init_node('turbulator')
t = Turbulizor(5, .5)
rospy.spin()
| [
"numpy.random.uniform",
"rospy.ServiceProxy",
"rospy.Time",
"rospy.sleep",
"rospy.wait_for_service",
"rospy.loginfo",
"mil_ros_tools.msg_helpers.make_wrench_stamped",
"rospy.is_shutdown",
"rospy.init_node",
"gazebo_msgs.srv.ApplyBodyWrenchRequest",
"gazebo_msgs.srv.ApplyBodyWrenchResponse",
"r... | [((2778, 2807), 'rospy.init_node', 'rospy.init_node', (['"""turbulator"""'], {}), "('turbulator')\n", (2793, 2807), False, 'import rospy\n'), ((2838, 2850), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2848, 2850), False, 'import rospy\n'), ((309, 360), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/gazebo/apply_body_wrench"""'], {}), "('/gazebo/apply_body_wrench')\n", (331, 360), False, 'import rospy\n'), ((387, 451), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/apply_body_wrench"""', 'ApplyBodyWrench'], {}), "('/gazebo/apply_body_wrench', ApplyBodyWrench)\n", (405, 451), False, 'import rospy\n'), ((477, 537), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/get_model_state"""', 'GetModelState'], {}), "('/gazebo/get_model_state', GetModelState)\n", (495, 537), False, 'import rospy\n'), ((635, 709), 'rospy.Service', 'rospy.Service', (['"""gazebo/set_turbulence"""', 'SetTurbulence', 'self.set_turbulence'], {}), "('gazebo/set_turbulence', SetTurbulence, self.set_turbulence)\n", (648, 709), False, 'import rospy\n'), ((772, 786), 'rospy.sleep', 'rospy.sleep', (['(3)'], {}), '(3)\n', (783, 786), False, 'import rospy\n'), ((796, 833), 'rospy.loginfo', 'rospy.loginfo', (['"""Starting Turbulence."""'], {}), "('Starting Turbulence.')\n", (809, 833), False, 'import rospy\n'), ((1000, 1025), 'gazebo_msgs.srv.ApplyBodyWrenchResponse', 'ApplyBodyWrenchResponse', ([], {}), '()\n', (1023, 1025), False, 'from gazebo_msgs.srv import GetModelState, ApplyBodyWrenchRequest, ApplyBodyWrench, ApplyBodyWrenchResponse\n'), ((1509, 1528), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1526, 1528), False, 'import rospy\n'), ((1813, 1845), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3)'}), '(-1, 1, size=3)\n', (1830, 1845), True, 'import numpy as np\n'), ((1742, 1774), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3)'}), '(-1, 1, size=3)\n', (1759, 1774), True, 'import numpy as np\n'), ((2201, 2225), 'gazebo_msgs.srv.ApplyBodyWrenchRequest', 'ApplyBodyWrenchRequest', ([], {}), '()\n', (2223, 2225), False, 'from gazebo_msgs.srv import GetModelState, ApplyBodyWrenchRequest, ApplyBodyWrench, ApplyBodyWrenchResponse\n'), ((2491, 2503), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (2501, 2503), False, 'import rospy\n'), ((2543, 2569), 'rospy.Duration', 'rospy.Duration', (['sleep_step'], {}), '(sleep_step)\n', (2557, 2569), False, 'import rospy\n'), ((2722, 2745), 'rospy.sleep', 'rospy.sleep', (['sleep_step'], {}), '(sleep_step)\n', (2733, 2745), False, 'import rospy\n'), ((2371, 2442), 'mil_ros_tools.msg_helpers.make_wrench_stamped', 'msg_helpers.make_wrench_stamped', (['(f * mag_multiplier)', '(r * mag_multiplier)'], {}), '(f * mag_multiplier, r * mag_multiplier)\n', (2402, 2442), False, 'from mil_ros_tools import msg_helpers\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Module test_measured_model - Contains the unit tests for the classes
in the datamodels.miri_measured_model module.
:History:
15 Jan 2013: Created.
21 Jan 2013: Warning messages controlled with Python warnings module.
05 Feb 2013: File closing problem solved by using "with" context manager.
08 Feb 2013: Replaced 'to_fits' with more generic 'save' method.
23 Apr 2013: Modified to keep up with behaviour of jwst_lib model.
Uninitialised arrays now have the same size and shape as the
data array but are full of default values.
26 Apr 2013: File closing problem has returned!
13 May 2013: Added MiriSlopeModel to describe MIRI slope data
(which is different from "ImageModel" data because it
preserves integrations). N.B. FINAL MODEL IS TBD.
04 Jun 2013: Shortened the names of the ramp, slope and image models.
10 Jun 2013: Added more metadata tests.
02 Jul 2013: MiriCubeModel added.
29 Jul 2013: stats() method added.
14 Aug 2013: Updated ramp model test to include groupdq and pixeldq
02 Sep 2013: Compare numpy record arrays in a way that it independent
of the byte ordering.
12 Sep 2013: Swapped the MRS CHANNEL and BAND keywords.
12 Sep 2013: Test that the data product can be copied successfully.
04 Oct 2013: Changed default field_def table to use MIRI reserved flags.
07 Oct 2013: GROUP_DEF table added to MIRI ramp data. Test MiriRampModel
for masking and arithmetic operations.
24 Feb 2014: Instrument name (INSTRUME) changed from meta.instrument.type to
meta.instrument.name.
27 Feb 2014: Added extra data arrays to MiriSlopeModel test.
04 Mar 2014: Added set_housekeeping_metadata.
25 Jun 2014: field_def and group_def changed to dq_def and groupdq_def.
field_def for ramp data changed to pixeldq_def.
21 Jul 2014: IM, and LW detectors changed to MIRIMAGE and MIRIFULONG.
25 Sep 2014: Updated the reference flags. insert_value_column function
used to convert between 3 column and 4 column flag tables.
TYPE and REFTYPE are no longer identical.
07 Nov 2014: The data model now raises an IOError when an invalid file
path is provided.
11 Mar 2015: group_integration_time changed to group_time.
11 Jun 2015: Added a history record test.
09 Jul 2015: Reference output array (refout) added to MiriRampModel schema.
19 Aug 2015: Removed MiriImageModel and MiriCubeModel.
07 Oct 2015: Made exception catching Python 3 compatible.
08 Apr 2016: Removed obsolete FIXME statements.
04 May 2016: ERR array removed from ramp data model.
31 Aug 2016: Change exception detected when creating a data model with an
invalid initialiser.
15 Jun 2017: Observation and target metadata is appropriate for ramp and
slope data only.
12 Jul 2017: Replaced "clobber" parameter with "overwrite".
13 Sep 2017: Updated "not a file name" test to match the new behaviour of
JWST pipeline version 0.7.8rc2
27 Apr 2018: Corrected bug in get_history() length test.
27 Jun 2018: Removed unused arrays.
15 Feb 2018: Check that the DQ_DEF table has the correct fieldnames.
04 Oct 2019: Removed pixeldq_def and groupdq_def tables completely.
Added test for group table in ramp data.
07 Oct 2019: FIXME: dq_def removed from unit tests until data corruption
bug fixed (Bug 589).
12 Feb 2020: Reinstated the array broadcasting test.
15 Sep 2021: added dq_def back to unit tests after data corruption bug was
fixed (MIRI-1156).
@author: <NAME> (UKATC)
"""
import os
import unittest
import warnings
import numpy as np
# Import the JWST master data quality flag definitions
from miri.datamodels.dqflags import master_flags, pixeldq_flags, \
groupdq_flags
from miri.datamodels.miri_measured_model import MiriMeasuredModel, \
MiriRampModel, MiriSlopeModel
from miri.datamodels.tests.util import assert_recarray_equal, \
assert_products_equal
class TestMiriMeasuredModel(unittest.TestCase):
# Test the MiriMeasuredModel class
def setUp(self):
# Create a 64x64 simple MiriMeasuredModel object, with no error
# or quality arrays.
self.data = np.linspace(0.0, 100000.0, 64*64)
self.data.shape = [64,64]
self.simpleproduct = MiriMeasuredModel(data=self.data)
# Add some example metadata.
self.simpleproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',
'V1.0')
self.simpleproduct.set_instrument_metadata(detector='MIRIMAGE',
filt='F560W',
ccc_pos='OPEN',
deck_temperature=10.0,
detector_temperature=7.0)
self.simpleproduct.set_exposure_metadata(readpatt='SLOW',
nints=1, ngroups=10,
frame_time=30.0,
integration_time=30.0,
group_time=300.0,
reset_time=0, frame_resets=3)
# Create a more complex MiriMeasuredModel object from primary,
# error and quality arrays.
self.primary = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
self.error = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
self.quality = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.dataproduct = MiriMeasuredModel(data=self.primary,
err=self.error,
dq=self.quality,
dq_def=master_flags)
# Add some example metadata.
self.dataproduct.set_instrument_metadata(detector='MIRIFUSHORT',
channel='1',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='FAST',
nints=1, ngroups=1,
frame_time=1.0,
integration_time=10.0,
group_time=10.0,
reset_time=0, frame_resets=3)
self.testfile1 = "MiriMeasuredModel_test1.fits"
self.testfile2 = "MiriMeasuredModel_test2.fits"
self.tempfiles = [self.testfile1, self.testfile2]
def tearDown(self):
# Tidy up
del self.dataproduct
del self.primary, self.error, self.quality
del self.simpleproduct
del self.data
# Remove temporary files, if they exist and if able to.
for tempfile in self.tempfiles:
if os.path.isfile(tempfile):
try:
os.remove(tempfile)
except Exception as e:
strg = "Could not remove temporary file, " + tempfile + \
"\n " + str(e)
warnings.warn(strg)
del self.tempfiles
def test_creation(self):
# Check that the DQ_DEF field names in the class variable are the same
# as the ones declared in the schema.
dq_def_names = list(MiriMeasuredModel.dq_def_names)
schema_names = list(self.dataproduct.get_field_names('dq_def'))
self.assertEqual(dq_def_names, schema_names,
"'dq_def_names' class variable does not match schema")
# Test that the error and quality arrays are optional.
a2 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
b2 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
c2 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
# 1) Data array only. Data array must exist and be non-empty.
# Other arrays should exist and be the same size and shape as the
# data array. They should be full of default values.
newdp1 = MiriMeasuredModel(data=a2)
self.assertIsNotNone(newdp1.data)
self.assertGreater(len(newdp1.data), 0)
self.assertIsNotNone(newdp1.err)
self.assertEqual(newdp1.err.shape, newdp1.data.shape)
# Assumes default is 0.0 - see schema
self.assertAlmostEqual(np.mean(newdp1.err), 0.0)
self.assertIsNotNone(newdp1.dq)
self.assertEqual(newdp1.dq.shape, newdp1.dq.shape)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp1.dq), 0)
descr1 = str(newdp1)
self.assertIsNotNone(descr1)
del newdp1, descr1
# 2) Data and error arrays only. Data and error arrays must exist
# and be non-empty. Quality array should exist but be the same
# size and shape as the data array. It should be full of default
# values.
newdp2 = MiriMeasuredModel(data=a2, err=b2)
self.assertIsNotNone(newdp2.data)
self.assertGreater(len(newdp2.data), 0)
self.assertIsNotNone(newdp2.err)
self.assertEqual(newdp2.err.shape, newdp2.data.shape)
# The error array must not be full of default values.
self.assertNotAlmostEqual(np.mean(newdp2.err), 0.0)
self.assertIsNotNone(newdp2.dq)
self.assertEqual(newdp2.dq.shape, newdp2.dq.shape)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp2.dq), 0)
descr2 = str(newdp2)
self.assertIsNotNone(descr2)
del newdp2, descr2
# 3) Data, error and quality arrays. All arrays must exist,
# be non-empty and be the same size and shape.
newdp3 = MiriMeasuredModel(data=a2, err=b2, dq=c2)
self.assertIsNotNone(newdp3.data)
self.assertGreater(len(newdp3.data), 0)
self.assertIsNotNone(newdp3.err)
self.assertEqual(newdp3.err.shape, newdp3.data.shape)
# The error array must not be full of default values.
self.assertNotAlmostEqual(np.mean(newdp3.err), 0.0)
self.assertIsNotNone(newdp3.dq)
self.assertEqual(newdp3.dq.shape, newdp3.dq.shape)
# The quality array must not be full of default values.
self.assertNotEqual(np.mean(newdp3.dq), 0)
descr3 = str(newdp3)
self.assertIsNotNone(descr3)
del newdp3, descr3
# It should be possible to set up an empty data product with
# a specified shape. All three arrays should be initialised to
# the same shape.
emptydp = MiriMeasuredModel( (4,4) )
self.assertIsNotNone(emptydp.data)
self.assertEqual(emptydp.data.shape, (4,4))
self.assertIsNotNone(emptydp.err)
self.assertEqual(emptydp.err.shape, (4,4))
self.assertIsNotNone(emptydp.dq)
self.assertEqual(emptydp.dq.shape, (4,4))
descr = str(emptydp)
self.assertIsNotNone(descr)
del emptydp, descr
# A null data product can also be created and populated
# with data later.
nulldp = MiriMeasuredModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.data = np.asarray(a2)
self.assertIsNotNone(nulldp.err)
self.assertIsNotNone(nulldp.dq)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
# A scalar data product is possible, even if of little use.
scalardp = MiriMeasuredModel( data=42 )
self.assertEqual(scalardp.data, 42)
self.assertIsNotNone(scalardp.err)
self.assertIsNotNone(scalardp.dq)
descr = str(scalardp)
self.assertIsNotNone(descr)
del scalardp, descr
# Attempts to create a data product from invalid data types
# and stupid values must be detected.
# NOTE: A bug in the JWST data model might cause an AttributeError
# to be raised instead of a ValueError. If this happens, try a newer
# version of the JWST data model library.
self.assertRaises(ValueError, MiriMeasuredModel, init=[])
self.assertRaises(ValueError, MiriMeasuredModel, init=42)
self.assertRaises(ValueError, MiriMeasuredModel, init='not a file name')
self.assertRaises(IOError, MiriMeasuredModel, init='nosuchfile.fits')
#self.assertRaises(ValueError, MiriMeasuredModel, init='')
self.assertRaises(ValueError, MiriMeasuredModel, data='badstring')
def test_metadata(self):
# Check the dataproducts contain metadata
# First test the basic STScI FITS keyword lookup method.
kwstrg = self.simpleproduct.find_fits_keyword('TELESCOP',
return_result=True)
self.assertIsNotNone(kwstrg)
# kwstrg is a list - assume the first entry is what we want.
telname = self.simpleproduct[kwstrg[0]]
self.assertEqual(telname, 'JWST')
# Accessing the tree structure directly should also work.
telname = self.simpleproduct.meta.telescope
self.assertEqual(telname, 'JWST')
# An alternative lookup provided by the MIRI data model.
telname = self.simpleproduct.get_fits_keyword('TELESCOP')
self.assertEqual(telname, 'JWST')
kwstrg = self.simpleproduct.find_fits_keyword('INSTRUME',
return_result=True)
self.assertIsNotNone(kwstrg)
insname = self.simpleproduct[kwstrg[0]]
self.assertEqual(insname, 'MIRI')
insname = self.simpleproduct.meta.instrument.name
self.assertEqual(insname, 'MIRI')
insname = self.simpleproduct.get_fits_keyword('INSTRUME')
self.assertEqual(insname, 'MIRI')
# Add some history records and check they exist.
self.simpleproduct.add_history('History 1')
self.simpleproduct.add_history('History 2')
self.simpleproduct.add_history('History 3')
self.assertGreaterEqual(len(self.simpleproduct.get_history()), 3)
strg = self.simpleproduct.get_history_str()
self.assertIsNotNone(strg)
self.assertGreater(len(strg), 0)
def test_content(self):
# The data, err and dq attributes are aliases for the primary,
# error and quality arrays
self.assertTrue( np.allclose(self.primary, self.dataproduct.data) )
self.assertTrue( np.allclose(self.error, self.dataproduct.err) )
self.assertTrue( np.allclose(self.quality, self.dataproduct.dq) )
def test_copy(self):
# Test that a copy can be made of the data product.
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy)
assert_products_equal( self, self.dataproduct, datacopy,
arrays=['data', 'err', 'dq'],
tables='dq_def' )
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data products can be written to a FITS
# file and read back again without changing the data.
self.simpleproduct.save(self.testfile1, overwrite=True)
with MiriMeasuredModel(self.testfile1) as readback:
self.assertTrue( np.allclose(self.simpleproduct.data,
readback.data) )
del readback
self.dataproduct.save(self.testfile2, overwrite=True)
with MiriMeasuredModel(self.testfile2) as readback:
assert_products_equal(self, self.dataproduct, readback, arrays=['data', 'err', 'dq'], tables='dq_def')
del readback
def test_asciiio(self):
# Check that the data products can be written to an ASCII
# file and read back again without changing the data.
# TODO: At the moment jwst_lib only supports FITS I/O
pass
# # Suppress metadata warnings
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# self.simpleproduct.save(self.testfile_ascii, overwrite=True)
# with MiriMeasuredModel(self.testfile_ascii) as readback:
# self.assertTrue( np.allclose(self.simpleproduct.data,
# readback.data) )
# del readback
def test_masking(self):
# The DQ array must mask off bad values in the SCI and ERR arrays.
a2 = [[10,999,10,999], [999,10,10,999], [10,10,999,10]]
b2 = [[1,99,1,99], [99,1,1,99], [1,1,99,1]]
c2 = [[0,1,0,1], [1,0,0,1], [0,0,1,0]]
# Without a DQ array (assuming the default quality value is 0)
# the SCI and ERR arrays are not masked, so their averages
# include the 999s and are greater than they ought to be.
newdp = MiriMeasuredModel(data=a2, err=b2)
meandata = np.mean(newdp.data_masked)
self.assertGreater(meandata, 10)
meanerr = np.mean(newdp.err_masked)
self.assertGreater(meanerr, 1)
# The addition of the quality data should cause the SCI and ERR
# arrays to be masked off and give the correct average.
newdp2 = MiriMeasuredModel(data=a2, err=b2, dq=c2)
meandata2 = np.mean(newdp2.data_masked)
self.assertAlmostEqual(meandata2, 10)
meanerr2 = np.mean(newdp2.err_masked)
self.assertAlmostEqual(meanerr2, 1)
del newdp, newdp2
def test_arithmetic(self):
a2 = [[90,80,70,60],[50,40,30,20],[10,0,-10,-20]]
b2 = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
c2 = [[0,1,1,0],[0,2,0,2],[1,0,1,0]]
newdp = MiriMeasuredModel(data=a2, err=b2, dq=c2)
# Self-subtraction of the simple product. The result
# should be zero.
newsimple = self.simpleproduct - self.simpleproduct
self.assertAlmostEqual(newsimple.data.all(), 0.0)
del newsimple
# Scalar addition
result = self.dataproduct + 42
test1 = self.dataproduct.data + 42
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product addition
result = self.dataproduct + newdp
test1 = self.dataproduct.data + newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test that error arrays are combined properly - at least for
# a couple of unmasked points.
expectedsq = self.error[1][0]*self.error[1][0] + b2[1][0]*b2[1][0]
actualsq = result.err[1,0]*result.err[1,0]
self.assertAlmostEqual(expectedsq, actualsq)
expectedsq = self.error[2][1]*self.error[2][1] + b2[2][1]*b2[2][1]
actualsq = result.err[2,1]*result.err[2,1]
self.assertAlmostEqual(expectedsq, actualsq)
del result
# Scalar subtraction
result = self.dataproduct - 42
test1 = self.dataproduct.data - 42
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product subtraction
result = self.dataproduct - newdp
test1 = self.dataproduct.data - newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test that error arrays are combined properly - at least for
# a couple of unmasked points.
expectedsq = self.error[1][0]*self.error[1][0] + b2[1][0]*b2[1][0]
actualsq = result.err[1,0]*result.err[1,0]
self.assertAlmostEqual(expectedsq, actualsq)
expectedsq = self.error[2][1]*self.error[2][1] + b2[2][1]*b2[2][1]
actualsq = result.err[2,1]*result.err[2,1]
self.assertAlmostEqual(expectedsq, actualsq)
del result
# Addition and subtraction should cancel each other out
result = self.dataproduct + newdp - newdp
test1 = self.dataproduct.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Scalar multiplication
result = self.dataproduct * 3
test1 = self.dataproduct.data * 3
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product multiplication
result = self.dataproduct * newdp
test1 = self.dataproduct.data * newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
err1 = self.dataproduct.err
da1 = self.dataproduct.data
err2 = newdp.err
da2 = newdp.data
expectedErr = np.sqrt(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1)
self.assertTrue(np.array_equal(expectedErr, result.err))
del result, da1, da2, err1, err2, expectedErr
# Scalar division
result = self.dataproduct / 3.0
test1 = self.dataproduct.data / 3.0
test2 = result.data
self.assertAlmostEqual(test1.all(), test2.all())
del test1, test2, result
# Division by zero
self.assertRaises(ValueError, self.dataproduct.__truediv__, 0.0)
# Data product division
#print("NOTE: The following test is expected to generate run time warnings.")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = self.dataproduct / newdp
test1 = self.dataproduct.data / newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test Juergen Schreiber error propagation
dat = self.dataproduct.data[1][1]
newdat = newdp.data[1][1]
resultErr = result.err[1][1]
dpErr = self.dataproduct.err[1][1]
newdpErr = newdp.err[1][1]
expectErr = np.sqrt( dpErr * dpErr/(newdat * newdat) + \
newdpErr * newdpErr * dat * dat / \
(newdat * newdat * newdat * newdat))
self.assertEqual(expectErr, resultErr)
del test1, test2, result
# More complex arithmetic should be possible.
newdp2 = newdp * 2
newdp3 = newdp * 3
newdp4 = newdp2 + newdp3
result = ((self.dataproduct - newdp) * newdp2 / newdp3) + newdp4
del newdp, newdp2, newdp3, newdp4
del result
def test_broadcasting(self):
# Test that operations where the broadcasting of one array
# onto a similar shaped array work.
a4x3 = [[90,80,70,60],[50,40,30,20],[10,0,-10,-20]]
b4x3 = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
#c4x3 = [[0,1,0,0],[0,0,1,0],[1,0,0,1]]
a4x1 = [4,3,2,1]
b4x1 = [1,2,1,2]
c4x1 = [0,1,0,0]
a5x1 = [5,4,3,2,1]
b5x1 = [1,2,3,2,1]
c5x1 = [0,1,0,0,1]
# Create an object with 4x3 primary and error arrays but a 4x1
# quality array. This should succeed because the quality array
# is broadcastable.
newdp1 = MiriMeasuredModel(data=a4x3, err=b4x3, dq=c4x1)
self.assertTrue( np.allclose(a4x3, newdp1.data) )
self.assertTrue( np.allclose(b4x3, newdp1.err) )
self.assertTrue( np.allclose(c4x1, newdp1.dq) )
# 5x1 is not broadcastable onto 4x3 and this statement should fail.
self.assertRaises(TypeError, MiriMeasuredModel, data=a4x3,
err=b5x1, dq=c5x1)
# Combine two broadcastable object mathematically.
# The + and - operations should be commutative and the result
# should be saveable to a FITS file.
newdp2 = MiriMeasuredModel(data=a4x1, err=b4x1, dq=c4x1)
result1 = newdp1 + newdp2
result2 = newdp2 + newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.data, result2.data) )
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
result1 = newdp1 * newdp2
result2 = newdp2 * newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.data, result2.data) )
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
# The - and / operations are not commutative, but the data shape
# should be consistent and the quality arrays should be combined
# in the same way.
result1 = newdp1 - newdp2
result2 = newdp2 - newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
result1 = newdp1 / newdp2
result2 = newdp2 / newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
# The errors resulting from division depend on the order
# of the operation.
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.simpleproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.simpleproduct)
self.assertIsNotNone(descr)
del descr
descr = self.simpleproduct.stats()
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI, ERROR and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.err)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
class TestMiriRampModel(unittest.TestCase):
# Most of the necessary tests are already carried out by
# the TestMiriMeasuredModel class.
def setUp(self):
# Create a ramp data product.
# NOTE: A ramp product does not contain an ERR array.
self.a1 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
self.c1 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.c2 = [[0,1,1,0], [1,0,0,1], [1,0,1,0]]
self.acube = [self.a1,self.a1,self.a1]
self.ccube = [self.c1,self.c2,self.c1]
self.ahyper = [self.acube,self.acube]
self.chyper = [self.ccube,self.ccube]
self.refout = np.ones_like(self.chyper)
self.dataproduct = MiriRampModel(data=self.ahyper, refout=self.refout,
pixeldq=self.c1,
dq_def=pixeldq_flags,
groupdq=self.chyper,
groupdq_def=groupdq_flags)
# Add some example metadata.
self.dataproduct.set_housekeeping_metadata('MIRI EC', '<NAME>',
'V1.0')
self.dataproduct.set_observation_metadata()
self.dataproduct.set_target_metadata(0.0, 0.0)
self.dataproduct.set_instrument_metadata(detector='MIRIFULONG',
channel='1',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='FAST',
nints=1, ngroups=1,
frame_time=1.0,
integration_time=10.0,
group_time=10.0,
reset_time=0, frame_resets=3)
self.testfile = "MiriRampModel_test.fits"
def tearDown(self):
# Tidy up
del self.a1, self.c1, self.c2
del self.acube, self.ccube
del self.ahyper, self.chyper
del self.dataproduct
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_creation(self):
# Test that any of the quality arrays are optional.
b1 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
bcube = [b1,b1,b1]
bhyper = [bcube,bcube]
# 1) Data array only. Data array must exist and be non-empty.
# The quality arrays must be 2-D and 4-D.
# Unspecified arrays must be filled with default values.
newdp1 = MiriRampModel(data=self.ahyper)
self.assertIsNotNone(newdp1.data)
self.assertGreater(len(newdp1.data), 0)
# Assumes default is 0.0 - see schema
self.assertIsNotNone(newdp1.pixeldq)
self.assertTrue(newdp1.pixeldq.ndim == 2)
# Assumes default is 0 - see schema
# FIXME: The pixeldq array ends up containing null values.
#self.assertEqual(np.mean(newdp1.pixeldq), 0)
self.assertIsNotNone(newdp1.groupdq)
self.assertTrue(newdp1.groupdq.ndim == 4)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp1.groupdq), 0)
descr1 = str(newdp1)
del newdp1, descr1
# 2) Data and both quality arrays. All arrays must exist,
# be non-empty and be the shape specified.
newdp3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper)
self.assertIsNotNone(newdp3.data)
self.assertGreater(len(newdp3.data), 0)
# The pixeldq array must not be full of default values.
self.assertIsNotNone(newdp3.pixeldq)
self.assertTrue(newdp3.pixeldq.ndim == 2)
self.assertNotEqual(np.mean(newdp3.pixeldq), 0)
self.assertIsNotNone(newdp3.groupdq)
self.assertTrue(newdp3.groupdq.ndim == 4)
# The groupdq array must not be full of default values.
self.assertNotEqual(np.mean(newdp3.groupdq), 0)
descr3 = str(newdp3)
del newdp3, descr3
# 3) Data and pixeldq array only. All arrays must exist,
# be non-empty and be the shape specified.
newdp4 = MiriRampModel(data=self.ahyper, pixeldq=self.c1)
self.assertIsNotNone(newdp4.data)
self.assertGreater(len(newdp4.data), 0)
# The pixeldq array must not be full of default values.
self.assertIsNotNone(newdp4.pixeldq)
self.assertTrue(newdp4.pixeldq.ndim == 2)
self.assertNotEqual(np.mean(newdp4.pixeldq), 0)
self.assertIsNotNone(newdp4.groupdq)
self.assertTrue(newdp4.groupdq.ndim == 4)
descr4 = str(newdp4)
del newdp4, descr4
# 4) Data and groupdq array only. All arrays must exist,
# be non-empty and be the shape specified.
newdp5 = MiriRampModel(data=self.ahyper, groupdq=self.chyper)
self.assertIsNotNone(newdp5.data)
self.assertGreater(len(newdp5.data), 0)
self.assertIsNotNone(newdp5.pixeldq)
self.assertTrue(newdp5.pixeldq.ndim == 2)
# The groupdq array must not be full of default values.
self.assertIsNotNone(newdp5.groupdq)
self.assertTrue(newdp5.groupdq.ndim == 4)
# The groupdq array must not be full of default values.
self.assertNotEqual(np.mean(newdp5.groupdq), 0)
descr5 = str(newdp5)
del newdp5, descr5
# It should be possible to set up an empty data product with
# a specified 4-D shape. Data array should be
# initialised to the same shape.
emptydp = MiriRampModel( (2,2,2,2) )
self.assertIsNotNone(emptydp.data)
self.assertEqual(emptydp.data.shape, (2,2,2,2))
self.assertIsNotNone(emptydp.pixeldq)
#self.assertEqual(emptydp.pixeldq.shape, (2,2))
self.assertIsNotNone(emptydp.groupdq)
self.assertEqual(emptydp.groupdq.shape, (2,2,2,2))
descr = str(emptydp)
self.assertIsNotNone(descr)
del emptydp, descr
# A null data product can also be created and populated
# with data later.
nulldp = MiriRampModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.data = np.asarray(self.ahyper)
self.assertIsNotNone(nulldp.pixeldq)
self.assertIsNotNone(nulldp.groupdq)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
# Creating an object with other than 4 dimensions must fail.
a1d = [10,20,30,40]
c1d = [1,0,0,0]
self.assertRaises(ValueError, MiriRampModel, data=a1d, pixeldq=c1d)
a2d = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
c2d = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.assertRaises(ValueError, MiriRampModel, data=a2d, groupdq=c2d)
a3d = [a2d, a2d, a2d]
c3d = [c2d, c2d, c2d]
self.assertRaises(ValueError, MiriRampModel, data=a3d, pixeldq=c3d)
self.assertRaises(ValueError, MiriRampModel, data=a3d, groupdq=c3d)
# The pixeldq array must be 2-D.
self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,
pixeldq=self.ccube)
# The groupdq array must be 4-D.
self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,
groupdq=self.c1)
def test_masking(self):
# Ramp data must have a dq array which gives a view of one
# or both of the pixeldq and groupdq masks
self.assertIsNotNone(self.dataproduct.dq)
# Create a data product masked by the pixeldq array.
# The dq and pixeldq arrays must be the same
mask1 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='pixeldq')
self.assertIsNotNone(mask1.pixeldq)
self.assertGreater(len(mask1.pixeldq), 0)
self.assertIsNotNone(mask1.dq)
self.assertGreater(len(mask1.dq), 0)
self.assertEqual(mask1.dq.shape, mask1.pixeldq.shape)
self.assertTrue(np.all( mask1.dq == mask1.pixeldq ))
del mask1
# Create a data product masked by the groupdq array.
# The dq and groupdq arrays must be the same
mask2 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='groupdq')
self.assertIsNotNone(mask2.groupdq)
self.assertGreater(len(mask2.groupdq), 0)
self.assertIsNotNone(mask2.dq)
self.assertGreater(len(mask2.dq), 0)
self.assertEqual(mask2.dq.shape, mask2.groupdq.shape)
self.assertTrue(np.all( mask2.dq == mask2.groupdq ))
del mask2
# Create a data product masked by both pixeldq and groupdq arrays.
# The result must have the same shape as the groupdq array but be
# a combination of both masks.
mask3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='both')
self.assertIsNotNone(mask3.pixeldq)
self.assertGreater(len(mask3.pixeldq), 0)
self.assertIsNotNone(mask3.groupdq)
self.assertGreater(len(mask3.groupdq), 0)
self.assertIsNotNone(mask3.dq)
self.assertGreater(len(mask3.dq), 0)
self.assertEqual(mask3.dq.shape, mask3.groupdq.shape)
expected = mask3.groupdq | mask3.pixeldq
self.assertTrue(np.all( mask3.dq == expected ))
del mask3
def test_arithmetic(self):
# The ramp data model supports all the arithmetic operations
# supported by the MiriMeasuredModel. The following are exceptions
# specific to the ramp model.
# Create a data model in which the DATA and DQ arrays have different
# shapes.
testdp = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='both')
descr = str(testdp)
self.assertIsNotNone(descr)
del descr
# Suppress warning about the DQ array being propagated only from GROUPDQ
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check the product can be combined with itself
double = testdp * 2.0
self.assertIsNotNone(double.data)
self.assertGreater(len(double.data), 0)
expected = double.data * 2.0
self.assertTrue(np.all( (double.data - expected) < 0.001 ))
descr = str(double)
self.assertIsNotNone(descr)
del descr
# When this is combined with another data product, the DATA
# array is masked with both the pixeldq and groupdq arrays.
warnings.simplefilter("ignore")
result = self.dataproduct + testdp
self.assertIsNotNone(result.data)
self.assertGreater(len(result.data), 0)
self.assertIsNotNone(result.dq)
self.assertGreater(len(result.dq), 0)
descr = str(result)
self.assertIsNotNone(descr)
del descr
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriRampModel(self.testfile) as readback:
assert_products_equal( self, self.dataproduct, readback,
arrays=['data', 'refout', 'pixeldq','groupdq'],
tables=['group'] )
del readback
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI, REFOUR and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.refout)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
class TestMiriSlopeModel(unittest.TestCase):
# Most of the necessary tests are already carried out by
# the TestMiriMeasuredModel class.
def setUp(self):
# Create a slope data product.
a1 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
b1 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
c1 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
acube = [a1,a1,a1]
bcube = [b1,b1,b1]
ccube = [c1,c1,c1]
dcube = [a1,b1,a1]
self.dataproduct = MiriSlopeModel(data=acube, err=bcube,
dq=ccube, dq_def=master_flags,
zeropt=dcube, fiterr=dcube)
# Add some example metadata.
self.dataproduct.set_housekeeping_metadata('MIRI EC', '<NAME>',
'V1.0')
self.dataproduct.set_observation_metadata()
self.dataproduct.set_target_metadata(0.0, 0.0)
self.dataproduct.set_instrument_metadata(detector='MIRIMAGE',
filt='F2550W',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='SLOW',
nints=3, ngroups=10,
frame_time=1.0,
integration_time=100.0,
group_time=1000.0,
reset_time=0, frame_resets=3)
self.testfile = "MiriSlopeModel_test.fits"
def tearDown(self):
# Tidy up
del self.dataproduct
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_creation(self):
# Creating an object with other than 3 dimensions must fail.
a1d = [10,20,30,40]
b1d = [1,2,3,4]
c1d = [1,0,0,0]
self.assertRaises(ValueError, MiriSlopeModel, data=a1d, err=b1d,
dq=c1d)
a2d = [a1d, a1d, a1d]
b2d = [b1d, b1d, b1d]
c2d = [c1d, c1d, c1d]
self.assertRaises(ValueError, MiriSlopeModel, data=a2d, err=b2d,
dq=c2d)
a3d = [a2d, a2d]
b3d = [b2d, b2d]
c3d = [c2d, c2d]
a4d = [a3d, a3d]
b4d = [b3d, b3d]
c4d = [c3d, c3d]
self.assertRaises(ValueError, MiriSlopeModel, data=a4d, err=b4d,
dq=c4d)
def test_copy(self):
# Test that a copy can be made of the data product.
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy)
assert_products_equal(self, self.dataproduct, datacopy,
arrays=['data', 'err', 'dq',
'nreads', 'readsat', 'ngoodseg',
'zeropt', 'fiterr'],
tables='dq_def')
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriSlopeModel(self.testfile) as readback:
assert_products_equal(self, self.dataproduct, readback,
arrays=['data', 'err', 'dq',
'nreads', 'readsat', 'ngoodseg',
'zeropt', 'fiterr'],
tables='dq_def')
del readback
def test_description(self):
# Test that the querying and description functions work.
# For this test to pass these only need to run without error.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
# If being run as a main program, run the tests.
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"miri.datamodels.miri_measured_model.MiriRampModel",
"os.remove",
"numpy.ones_like",
"miri.datamodels.miri_measured_model.MiriMeasuredModel",
"warnings.simplefilter",
"numpy.asarray",
"numpy.allclose",
"numpy.all",
"os.path.isfile",
"numpy.mean",
"warnings.catch_warnings",
"... | [((45407, 45422), 'unittest.main', 'unittest.main', ([], {}), '()\n', (45420, 45422), False, 'import unittest\n'), ((4238, 4273), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100000.0)', '(64 * 64)'], {}), '(0.0, 100000.0, 64 * 64)\n', (4249, 4273), True, 'import numpy as np\n'), ((4335, 4368), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'self.data'}), '(data=self.data)\n', (4352, 4368), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((5668, 5762), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'self.primary', 'err': 'self.error', 'dq': 'self.quality', 'dq_def': 'master_flags'}), '(data=self.primary, err=self.error, dq=self.quality,\n dq_def=master_flags)\n', (5685, 5762), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((8374, 8400), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'a2'}), '(data=a2)\n', (8391, 8400), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((9243, 9277), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'a2', 'err': 'b2'}), '(data=a2, err=b2)\n', (9260, 9277), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((10018, 10059), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'a2', 'err': 'b2', 'dq': 'c2'}), '(data=a2, err=b2, dq=c2)\n', (10035, 10059), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((10875, 10900), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', (['(4, 4)'], {}), '((4, 4))\n', (10892, 10900), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((11390, 11409), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {}), '()\n', (11407, 11409), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((11499, 11513), 'numpy.asarray', 'np.asarray', (['a2'], {}), '(a2)\n', (11509, 11513), True, 'import numpy as np\n'), ((11792, 11818), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': '(42)'}), '(data=42)\n', (11809, 11818), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((15062, 15168), 'miri.datamodels.tests.util.assert_products_equal', 'assert_products_equal', (['self', 'self.dataproduct', 'datacopy'], {'arrays': "['data', 'err', 'dq']", 'tables': '"""dq_def"""'}), "(self, self.dataproduct, datacopy, arrays=['data',\n 'err', 'dq'], tables='dq_def')\n", (15083, 15168), False, 'from miri.datamodels.tests.util import assert_recarray_equal, assert_products_equal\n'), ((17294, 17328), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'a2', 'err': 'b2'}), '(data=a2, err=b2)\n', (17311, 17328), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((17348, 17374), 'numpy.mean', 'np.mean', (['newdp.data_masked'], {}), '(newdp.data_masked)\n', (17355, 17374), True, 'import numpy as np\n'), ((17434, 17459), 'numpy.mean', 'np.mean', (['newdp.err_masked'], {}), '(newdp.err_masked)\n', (17441, 17459), True, 'import numpy as np\n'), ((17661, 17702), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'a2', 'err': 'b2', 'dq': 'c2'}), '(data=a2, err=b2, dq=c2)\n', (17678, 17702), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((17723, 17750), 'numpy.mean', 'np.mean', (['newdp2.data_masked'], {}), '(newdp2.data_masked)\n', (17730, 17750), True, 'import numpy as np\n'), ((17816, 17842), 'numpy.mean', 'np.mean', (['newdp2.err_masked'], {}), '(newdp2.err_masked)\n', (17823, 17842), True, 'import numpy as np\n'), ((18121, 18162), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'a2', 'err': 'b2', 'dq': 'c2'}), '(data=a2, err=b2, dq=c2)\n', (18138, 18162), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((21050, 21108), 'numpy.sqrt', 'np.sqrt', (['(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1)'], {}), '(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1)\n', (21057, 21108), True, 'import numpy as np\n'), ((23527, 23574), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'a4x3', 'err': 'b4x3', 'dq': 'c4x1'}), '(data=a4x3, err=b4x3, dq=c4x1)\n', (23544, 23574), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((24135, 24182), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', ([], {'data': 'a4x1', 'err': 'b4x1', 'dq': 'c4x1'}), '(data=a4x1, err=b4x1, dq=c4x1)\n', (24152, 24182), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((27924, 27949), 'numpy.ones_like', 'np.ones_like', (['self.chyper'], {}), '(self.chyper)\n', (27936, 27949), True, 'import numpy as np\n'), ((27977, 28120), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper', 'refout': 'self.refout', 'pixeldq': 'self.c1', 'dq_def': 'pixeldq_flags', 'groupdq': 'self.chyper', 'groupdq_def': 'groupdq_flags'}), '(data=self.ahyper, refout=self.refout, pixeldq=self.c1, dq_def\n =pixeldq_flags, groupdq=self.chyper, groupdq_def=groupdq_flags)\n', (27990, 28120), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((29602, 29631), 'os.path.isfile', 'os.path.isfile', (['self.testfile'], {}), '(self.testfile)\n', (29616, 29631), False, 'import os\n'), ((30304, 30335), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper'}), '(data=self.ahyper)\n', (30317, 30335), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((31115, 31184), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper', 'pixeldq': 'self.c1', 'groupdq': 'self.chyper'}), '(data=self.ahyper, pixeldq=self.c1, groupdq=self.chyper)\n', (31128, 31184), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((31926, 31974), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper', 'pixeldq': 'self.c1'}), '(data=self.ahyper, pixeldq=self.c1)\n', (31939, 31974), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((32565, 32617), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper', 'groupdq': 'self.chyper'}), '(data=self.ahyper, groupdq=self.chyper)\n', (32578, 32617), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((33321, 33348), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', (['(2, 2, 2, 2)'], {}), '((2, 2, 2, 2))\n', (33334, 33348), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((33863, 33878), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {}), '()\n', (33876, 33878), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((33968, 33991), 'numpy.asarray', 'np.asarray', (['self.ahyper'], {}), '(self.ahyper)\n', (33978, 33991), True, 'import numpy as np\n'), ((35440, 35533), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper', 'pixeldq': 'self.c1', 'groupdq': 'self.chyper', 'maskwith': '"""pixeldq"""'}), "(data=self.ahyper, pixeldq=self.c1, groupdq=self.chyper,\n maskwith='pixeldq')\n", (35453, 35533), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((36010, 36103), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper', 'pixeldq': 'self.c1', 'groupdq': 'self.chyper', 'maskwith': '"""groupdq"""'}), "(data=self.ahyper, pixeldq=self.c1, groupdq=self.chyper,\n maskwith='groupdq')\n", (36023, 36103), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((36654, 36744), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper', 'pixeldq': 'self.c1', 'groupdq': 'self.chyper', 'maskwith': '"""both"""'}), "(data=self.ahyper, pixeldq=self.c1, groupdq=self.chyper,\n maskwith='both')\n", (36667, 36744), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((37563, 37653), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', ([], {'data': 'self.ahyper', 'pixeldq': 'self.c1', 'groupdq': 'self.chyper', 'maskwith': '"""both"""'}), "(data=self.ahyper, pixeldq=self.c1, groupdq=self.chyper,\n maskwith='both')\n", (37576, 37653), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((40920, 41021), 'miri.datamodels.miri_measured_model.MiriSlopeModel', 'MiriSlopeModel', ([], {'data': 'acube', 'err': 'bcube', 'dq': 'ccube', 'dq_def': 'master_flags', 'zeropt': 'dcube', 'fiterr': 'dcube'}), '(data=acube, err=bcube, dq=ccube, dq_def=master_flags, zeropt\n =dcube, fiterr=dcube)\n', (40934, 41021), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((42318, 42347), 'os.path.isfile', 'os.path.isfile', (['self.testfile'], {}), '(self.testfile)\n', (42332, 42347), False, 'import os\n'), ((43508, 43671), 'miri.datamodels.tests.util.assert_products_equal', 'assert_products_equal', (['self', 'self.dataproduct', 'datacopy'], {'arrays': "['data', 'err', 'dq', 'nreads', 'readsat', 'ngoodseg', 'zeropt', 'fiterr']", 'tables': '"""dq_def"""'}), "(self, self.dataproduct, datacopy, arrays=['data',\n 'err', 'dq', 'nreads', 'readsat', 'ngoodseg', 'zeropt', 'fiterr'],\n tables='dq_def')\n", (43529, 43671), False, 'from miri.datamodels.tests.util import assert_recarray_equal, assert_products_equal\n'), ((7156, 7180), 'os.path.isfile', 'os.path.isfile', (['tempfile'], {}), '(tempfile)\n', (7170, 7180), False, 'import os\n'), ((8671, 8690), 'numpy.mean', 'np.mean', (['newdp1.err'], {}), '(newdp1.err)\n', (8678, 8690), True, 'import numpy as np\n'), ((8865, 8883), 'numpy.mean', 'np.mean', (['newdp1.dq'], {}), '(newdp1.dq)\n', (8872, 8883), True, 'import numpy as np\n'), ((9567, 9586), 'numpy.mean', 'np.mean', (['newdp2.err'], {}), '(newdp2.err)\n', (9574, 9586), True, 'import numpy as np\n'), ((9761, 9779), 'numpy.mean', 'np.mean', (['newdp2.dq'], {}), '(newdp2.dq)\n', (9768, 9779), True, 'import numpy as np\n'), ((10349, 10368), 'numpy.mean', 'np.mean', (['newdp3.err'], {}), '(newdp3.err)\n', (10356, 10368), True, 'import numpy as np\n'), ((10566, 10584), 'numpy.mean', 'np.mean', (['newdp3.dq'], {}), '(newdp3.dq)\n', (10573, 10584), True, 'import numpy as np\n'), ((14688, 14736), 'numpy.allclose', 'np.allclose', (['self.primary', 'self.dataproduct.data'], {}), '(self.primary, self.dataproduct.data)\n', (14699, 14736), True, 'import numpy as np\n'), ((14764, 14809), 'numpy.allclose', 'np.allclose', (['self.error', 'self.dataproduct.err'], {}), '(self.error, self.dataproduct.err)\n', (14775, 14809), True, 'import numpy as np\n'), ((14837, 14883), 'numpy.allclose', 'np.allclose', (['self.quality', 'self.dataproduct.dq'], {}), '(self.quality, self.dataproduct.dq)\n', (14848, 14883), True, 'import numpy as np\n'), ((15328, 15353), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (15351, 15353), False, 'import warnings\n'), ((15367, 15398), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (15388, 15398), False, 'import warnings\n'), ((21133, 21172), 'numpy.array_equal', 'np.array_equal', (['expectedErr', 'result.err'], {}), '(expectedErr, result.err)\n', (21147, 21172), True, 'import numpy as np\n'), ((21707, 21732), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (21730, 21732), False, 'import warnings\n'), ((21746, 21777), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (21767, 21777), False, 'import warnings\n'), ((22256, 22374), 'numpy.sqrt', 'np.sqrt', (['(dpErr * dpErr / (newdat * newdat) + newdpErr * newdpErr * dat * dat / (\n newdat * newdat * newdat * newdat))'], {}), '(dpErr * dpErr / (newdat * newdat) + newdpErr * newdpErr * dat * dat /\n (newdat * newdat * newdat * newdat))\n', (22263, 22374), True, 'import numpy as np\n'), ((23600, 23630), 'numpy.allclose', 'np.allclose', (['a4x3', 'newdp1.data'], {}), '(a4x3, newdp1.data)\n', (23611, 23630), True, 'import numpy as np\n'), ((23658, 23687), 'numpy.allclose', 'np.allclose', (['b4x3', 'newdp1.err'], {}), '(b4x3, newdp1.err)\n', (23669, 23687), True, 'import numpy as np\n'), ((23715, 23743), 'numpy.allclose', 'np.allclose', (['c4x1', 'newdp1.dq'], {}), '(c4x1, newdp1.dq)\n', (23726, 23743), True, 'import numpy as np\n'), ((24350, 24389), 'numpy.allclose', 'np.allclose', (['result1.data', 'result2.data'], {}), '(result1.data, result2.data)\n', (24361, 24389), True, 'import numpy as np\n'), ((24417, 24454), 'numpy.allclose', 'np.allclose', (['result1.err', 'result2.err'], {}), '(result1.err, result2.err)\n', (24428, 24454), True, 'import numpy as np\n'), ((24482, 24517), 'numpy.allclose', 'np.allclose', (['result1.dq', 'result2.dq'], {}), '(result1.dq, result2.dq)\n', (24493, 24517), True, 'import numpy as np\n'), ((24814, 24853), 'numpy.allclose', 'np.allclose', (['result1.data', 'result2.data'], {}), '(result1.data, result2.data)\n', (24825, 24853), True, 'import numpy as np\n'), ((24881, 24918), 'numpy.allclose', 'np.allclose', (['result1.err', 'result2.err'], {}), '(result1.err, result2.err)\n', (24892, 24918), True, 'import numpy as np\n'), ((24946, 24981), 'numpy.allclose', 'np.allclose', (['result1.dq', 'result2.dq'], {}), '(result1.dq, result2.dq)\n', (24957, 24981), True, 'import numpy as np\n'), ((25451, 25488), 'numpy.allclose', 'np.allclose', (['result1.err', 'result2.err'], {}), '(result1.err, result2.err)\n', (25462, 25488), True, 'import numpy as np\n'), ((25516, 25551), 'numpy.allclose', 'np.allclose', (['result1.dq', 'result2.dq'], {}), '(result1.dq, result2.dq)\n', (25527, 25551), True, 'import numpy as np\n'), ((25941, 25976), 'numpy.allclose', 'np.allclose', (['result1.dq', 'result2.dq'], {}), '(result1.dq, result2.dq)\n', (25952, 25976), True, 'import numpy as np\n'), ((30896, 30919), 'numpy.mean', 'np.mean', (['newdp1.groupdq'], {}), '(newdp1.groupdq)\n', (30903, 30919), True, 'import numpy as np\n'), ((31493, 31516), 'numpy.mean', 'np.mean', (['newdp3.pixeldq'], {}), '(newdp3.pixeldq)\n', (31500, 31516), True, 'import numpy as np\n'), ((31708, 31731), 'numpy.mean', 'np.mean', (['newdp3.groupdq'], {}), '(newdp3.groupdq)\n', (31715, 31731), True, 'import numpy as np\n'), ((32252, 32275), 'numpy.mean', 'np.mean', (['newdp4.pixeldq'], {}), '(newdp4.pixeldq)\n', (32259, 32275), True, 'import numpy as np\n'), ((33054, 33077), 'numpy.mean', 'np.mean', (['newdp5.groupdq'], {}), '(newdp5.groupdq)\n', (33061, 33077), True, 'import numpy as np\n'), ((35824, 35857), 'numpy.all', 'np.all', (['(mask1.dq == mask1.pixeldq)'], {}), '(mask1.dq == mask1.pixeldq)\n', (35830, 35857), True, 'import numpy as np\n'), ((36394, 36427), 'numpy.all', 'np.all', (['(mask2.dq == mask2.groupdq)'], {}), '(mask2.dq == mask2.groupdq)\n', (36400, 36427), True, 'import numpy as np\n'), ((37178, 37206), 'numpy.all', 'np.all', (['(mask3.dq == expected)'], {}), '(mask3.dq == expected)\n', (37184, 37206), True, 'import numpy as np\n'), ((37866, 37891), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (37889, 37891), False, 'import warnings\n'), ((37905, 37936), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (37926, 37936), False, 'import warnings\n'), ((38502, 38533), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (38523, 38533), False, 'import warnings\n'), ((38953, 38978), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (38976, 38978), False, 'import warnings\n'), ((38992, 39023), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (39013, 39023), False, 'import warnings\n'), ((43899, 43924), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (43922, 43924), False, 'import warnings\n'), ((43938, 43969), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (43959, 43969), False, 'import warnings\n'), ((15627, 15660), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', (['self.testfile1'], {}), '(self.testfile1)\n', (15644, 15660), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((15919, 15952), 'miri.datamodels.miri_measured_model.MiriMeasuredModel', 'MiriMeasuredModel', (['self.testfile2'], {}), '(self.testfile2)\n', (15936, 15952), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((15982, 16088), 'miri.datamodels.tests.util.assert_products_equal', 'assert_products_equal', (['self', 'self.dataproduct', 'readback'], {'arrays': "['data', 'err', 'dq']", 'tables': '"""dq_def"""'}), "(self, self.dataproduct, readback, arrays=['data',\n 'err', 'dq'], tables='dq_def')\n", (16003, 16088), False, 'from miri.datamodels.tests.util import assert_recarray_equal, assert_products_equal\n'), ((29666, 29690), 'os.remove', 'os.remove', (['self.testfile'], {}), '(self.testfile)\n', (29675, 29690), False, 'import os\n'), ((38199, 38237), 'numpy.all', 'np.all', (['(double.data - expected < 0.001)'], {}), '(double.data - expected < 0.001)\n', (38205, 38237), True, 'import numpy as np\n'), ((39240, 39268), 'miri.datamodels.miri_measured_model.MiriRampModel', 'MiriRampModel', (['self.testfile'], {}), '(self.testfile)\n', (39253, 39268), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((39298, 39424), 'miri.datamodels.tests.util.assert_products_equal', 'assert_products_equal', (['self', 'self.dataproduct', 'readback'], {'arrays': "['data', 'refout', 'pixeldq', 'groupdq']", 'tables': "['group']"}), "(self, self.dataproduct, readback, arrays=['data',\n 'refout', 'pixeldq', 'groupdq'], tables=['group'])\n", (39319, 39424), False, 'from miri.datamodels.tests.util import assert_recarray_equal, assert_products_equal\n'), ((42382, 42406), 'os.remove', 'os.remove', (['self.testfile'], {}), '(self.testfile)\n', (42391, 42406), False, 'import os\n'), ((44186, 44215), 'miri.datamodels.miri_measured_model.MiriSlopeModel', 'MiriSlopeModel', (['self.testfile'], {}), '(self.testfile)\n', (44200, 44215), False, 'from miri.datamodels.miri_measured_model import MiriMeasuredModel, MiriRampModel, MiriSlopeModel\n'), ((44245, 44408), 'miri.datamodels.tests.util.assert_products_equal', 'assert_products_equal', (['self', 'self.dataproduct', 'readback'], {'arrays': "['data', 'err', 'dq', 'nreads', 'readsat', 'ngoodseg', 'zeropt', 'fiterr']", 'tables': '"""dq_def"""'}), "(self, self.dataproduct, readback, arrays=['data',\n 'err', 'dq', 'nreads', 'readsat', 'ngoodseg', 'zeropt', 'fiterr'],\n tables='dq_def')\n", (44266, 44408), False, 'from miri.datamodels.tests.util import assert_recarray_equal, assert_products_equal\n'), ((7223, 7242), 'os.remove', 'os.remove', (['tempfile'], {}), '(tempfile)\n', (7232, 7242), False, 'import os\n'), ((15707, 15758), 'numpy.allclose', 'np.allclose', (['self.simpleproduct.data', 'readback.data'], {}), '(self.simpleproduct.data, readback.data)\n', (15718, 15758), True, 'import numpy as np\n'), ((29858, 29877), 'warnings.warn', 'warnings.warn', (['strg'], {}), '(strg)\n', (29871, 29877), False, 'import warnings\n'), ((42574, 42593), 'warnings.warn', 'warnings.warn', (['strg'], {}), '(strg)\n', (42587, 42593), False, 'import warnings\n'), ((7421, 7440), 'warnings.warn', 'warnings.warn', (['strg'], {}), '(strg)\n', (7434, 7440), False, 'import warnings\n')] |
# This file is used to run the CIFAR and KITTI experiments easily with different hyper paramters
# Note that the MNIST experiment has its own runner since no hyper parameter exploration was used
from training_classification import train as train_c
from training_classification import getModel as model_c
from training_segmentation import train as train_s
from training_segmentation import getModel as model_s
from quaternion_layers.utils import Params
import click
import numpy as np
import keras
import os
np.random.seed(314)
@click.command()
@click.argument('task')
@click.option('--mode', default='quaternion', help='value type of model (real, complex, quaternion)')
@click.option('--num-blocks', '-nb', default=2, help='number of residual blocks per stage')
@click.option('--start-filters', '-sf', default=8, help='number of filters in first layer')
@click.option('--dropout', '-d', default=0, help='dropout percent')
@click.option('--batch-size', '-bs', default=8, help='batch size')
@click.option('--num-epochs', '-e', default=200, help='total number of epochs')
@click.option('--dataset', '-ds', default='cifar10', help='dataset to train and test on')
@click.option('--activation', '-act', default='relu', help='activation function to use')
@click.option('--initialization', '-init', default='quaternion', help='initialization scheme to use')
@click.option('--learning-rate', '-lr', default=1e-3, help='learning rate for optimizer')
@click.option('--momentum', '-mn', default=0.9, help='momentum for batch norm')
@click.option('--decay', '-dc', default=0, help='decay rate of optimizer')
@click.option('--clipnorm', '-cn', default=1.0, help='maximum gradient size')
def runner(task, mode, num_blocks, start_filters, dropout, batch_size, num_epochs, dataset,
activation, initialization, learning_rate, momentum, decay, clipnorm):
param_dict = {"mode": mode,
"num_blocks": num_blocks,
"start_filter": start_filters,
"dropout": dropout,
"batch_size": batch_size,
"num_epochs": num_epochs,
"dataset": dataset,
"act": activation,
"init": initialization,
"lr": learning_rate,
"momentum": momentum,
"decay": decay,
"clipnorm": clipnorm
}
params = Params(param_dict)
if task == 'classification':
model = model_c(params)
print()
print(model.count_params())
model.summary()
keras.utils.plot_model(model, to_file=os.path.join(param_dict['mode']+"model.png"),show_shapes=True)
train_c(params, model)
elif task == 'segmentation':
model = model_s(params)
print()
print(model.count_params())
train_s(params, model)
else:
print("Invalid task chosen...")
if __name__ == '__main__':
runner() | [
"quaternion_layers.utils.Params",
"numpy.random.seed",
"click.argument",
"training_segmentation.getModel",
"training_classification.getModel",
"click.option",
"click.command",
"training_classification.train",
"training_segmentation.train",
"os.path.join"
] | [((521, 540), 'numpy.random.seed', 'np.random.seed', (['(314)'], {}), '(314)\n', (535, 540), True, 'import numpy as np\n'), ((545, 560), 'click.command', 'click.command', ([], {}), '()\n', (558, 560), False, 'import click\n'), ((563, 585), 'click.argument', 'click.argument', (['"""task"""'], {}), "('task')\n", (577, 585), False, 'import click\n'), ((588, 693), 'click.option', 'click.option', (['"""--mode"""'], {'default': '"""quaternion"""', 'help': '"""value type of model (real, complex, quaternion)"""'}), "('--mode', default='quaternion', help=\n 'value type of model (real, complex, quaternion)')\n", (600, 693), False, 'import click\n'), ((691, 786), 'click.option', 'click.option', (['"""--num-blocks"""', '"""-nb"""'], {'default': '(2)', 'help': '"""number of residual blocks per stage"""'}), "('--num-blocks', '-nb', default=2, help=\n 'number of residual blocks per stage')\n", (703, 786), False, 'import click\n'), ((784, 879), 'click.option', 'click.option', (['"""--start-filters"""', '"""-sf"""'], {'default': '(8)', 'help': '"""number of filters in first layer"""'}), "('--start-filters', '-sf', default=8, help=\n 'number of filters in first layer')\n", (796, 879), False, 'import click\n'), ((877, 943), 'click.option', 'click.option', (['"""--dropout"""', '"""-d"""'], {'default': '(0)', 'help': '"""dropout percent"""'}), "('--dropout', '-d', default=0, help='dropout percent')\n", (889, 943), False, 'import click\n'), ((946, 1011), 'click.option', 'click.option', (['"""--batch-size"""', '"""-bs"""'], {'default': '(8)', 'help': '"""batch size"""'}), "('--batch-size', '-bs', default=8, help='batch size')\n", (958, 1011), False, 'import click\n'), ((1014, 1092), 'click.option', 'click.option', (['"""--num-epochs"""', '"""-e"""'], {'default': '(200)', 'help': '"""total number of epochs"""'}), "('--num-epochs', '-e', default=200, help='total number of epochs')\n", (1026, 1092), False, 'import click\n'), ((1095, 1188), 'click.option', 'click.option', (['"""--dataset"""', '"""-ds"""'], {'default': '"""cifar10"""', 'help': '"""dataset to train and test on"""'}), "('--dataset', '-ds', default='cifar10', help=\n 'dataset to train and test on')\n", (1107, 1188), False, 'import click\n'), ((1186, 1278), 'click.option', 'click.option', (['"""--activation"""', '"""-act"""'], {'default': '"""relu"""', 'help': '"""activation function to use"""'}), "('--activation', '-act', default='relu', help=\n 'activation function to use')\n", (1198, 1278), False, 'import click\n'), ((1276, 1381), 'click.option', 'click.option', (['"""--initialization"""', '"""-init"""'], {'default': '"""quaternion"""', 'help': '"""initialization scheme to use"""'}), "('--initialization', '-init', default='quaternion', help=\n 'initialization scheme to use')\n", (1288, 1381), False, 'import click\n'), ((1379, 1473), 'click.option', 'click.option', (['"""--learning-rate"""', '"""-lr"""'], {'default': '(0.001)', 'help': '"""learning rate for optimizer"""'}), "('--learning-rate', '-lr', default=0.001, help=\n 'learning rate for optimizer')\n", (1391, 1473), False, 'import click\n'), ((1470, 1548), 'click.option', 'click.option', (['"""--momentum"""', '"""-mn"""'], {'default': '(0.9)', 'help': '"""momentum for batch norm"""'}), "('--momentum', '-mn', default=0.9, help='momentum for batch norm')\n", (1482, 1548), False, 'import click\n'), ((1551, 1624), 'click.option', 'click.option', (['"""--decay"""', '"""-dc"""'], {'default': '(0)', 'help': '"""decay rate of optimizer"""'}), "('--decay', '-dc', default=0, help='decay rate of optimizer')\n", (1563, 1624), False, 'import click\n'), ((1627, 1703), 'click.option', 'click.option', (['"""--clipnorm"""', '"""-cn"""'], {'default': '(1.0)', 'help': '"""maximum gradient size"""'}), "('--clipnorm', '-cn', default=1.0, help='maximum gradient size')\n", (1639, 1703), False, 'import click\n'), ((2447, 2465), 'quaternion_layers.utils.Params', 'Params', (['param_dict'], {}), '(param_dict)\n', (2453, 2465), False, 'from quaternion_layers.utils import Params\n'), ((2519, 2534), 'training_classification.getModel', 'model_c', (['params'], {}), '(params)\n', (2526, 2534), True, 'from training_classification import getModel as model_c\n'), ((2733, 2755), 'training_classification.train', 'train_c', (['params', 'model'], {}), '(params, model)\n', (2740, 2755), True, 'from training_classification import train as train_c\n'), ((2807, 2822), 'training_segmentation.getModel', 'model_s', (['params'], {}), '(params)\n', (2814, 2822), True, 'from training_segmentation import getModel as model_s\n'), ((2886, 2908), 'training_segmentation.train', 'train_s', (['params', 'model'], {}), '(params, model)\n', (2893, 2908), True, 'from training_segmentation import train as train_s\n'), ((2661, 2707), 'os.path.join', 'os.path.join', (["(param_dict['mode'] + 'model.png')"], {}), "(param_dict['mode'] + 'model.png')\n", (2673, 2707), False, 'import os\n')] |
import time
import os
import math
import matplotlib.pyplot as plt
from scipy.io import loadmat
from mpl_toolkits.mplot3d.art3d import Line3D, Poly3DCollection
import matplotlib.animation as animation
import numpy as np
from plot import plot_component
from components import fgnetfdm
def render_in_flightgear(trajs, nameSuffixes=[''], speed=1.0):
"""
Render the trajectory in FlightGear
`nameSuffixes` constains a list of suffixes to be added to generic
names of 'plant' and 'controller' for the purpose of multi-agent
visualisation
"""
def get_trajectories(nameSuffix):
plant = 'plant' + nameSuffix
controller = 'controller' + nameSuffix
# Plant states
idx = trajs[plant].times.index(timestamp)
states = trajs[plant].states[idx]
# Controller output
idx = trajs[controller].times.index(timestamp)
ctrls = trajs[controller].states[idx]
return np.concatenate((np.asarray(states),ctrls))
fdm = fgnetfdm.FGNetFDM()
initial_time = time.monotonic()
main_suffix = nameSuffixes.pop(0)
if len(nameSuffixes) > 0:
intruder_suffix = nameSuffixes[0]
fdm_intruder = fgnetfdm.FGNetFDM()
fdm_intruder.init_from_params({'FG_GENERIC_PORT': 5507, 'FG_PORT': 5605})
else:
intruder_suffix = None
while True:
real_time = time.monotonic()
sim_time = (real_time - initial_time)*speed
timestamp = next(filter(lambda x: x > sim_time, trajs['plant'+main_suffix].times), None)
if timestamp:
# Update main plant
input_f16 = get_trajectories(main_suffix)
fdm.update_and_send(input_f16)
# Check if we crashed
if fdm.agl <= 0:
print(fdm.agl)
print("CRASH!")
break
# Update other agents
if intruder_suffix:
intruder_input_f16 = get_trajectories(intruder_suffix)
fdm_intruder.update_and_send(intruder_input_f16)
# Delay
time.sleep(fgnetfdm.FGNetFDM.DEFAULT_DELTA_T)
else:
break
print("Done!")
def plot_shield(trajs):
"""
Plot results for GCSA shield autopilot
"""
fig, ax = plt.subplots(figsize=(25, 15), nrows=4, ncols=3, sharex=True)
ax[0][0].set_title("F16 Plant")
plot_component(ax[0][0], trajs, "plant", "states", 11, "height (ft)")
plot_component(ax[1][0], trajs, "plant", "states", 0, "airspeed (ft/s)")
plot_component(ax[2][0], trajs, "plant", "states", 3, "roll (degrees)")
plot_component(ax[2][0], trajs, "plant", "states", 4, "pitch (degrees)")
plot_component(ax[2][0], trajs, "plant", "states", 5, "yaw (degrees)")
plot_component(ax[3][0], trajs, "plant", "states", 12, "power (%)")
ax[0][1].set_title("Low Level Controller")
plot_component(ax[0][1], trajs, "controller", "outputs", 0, "e ()")
plot_component(ax[1][1], trajs, "controller", "outputs", 1, "a ()")
plot_component(ax[2][1], trajs, "controller", "outputs", 2, "r ()")
plot_component(ax[3][1], trajs, "controller", "outputs", 3, "throttle ()")
ax[0][2].set_title("Autopilots")
plot_component(ax[0][2], trajs, "monitor_ap", "outputs", 0, "autopilot selected ()", do_schedule=True)
plot_component(ax[1][2], trajs, "autopilot", "fdas", 0, "GCAS State ()", do_schedule=True)
ax[1][2].set_title("GCAS Finite Discrete State")
ax[2][2].axis('off')
ax[3][2].axis('off')
ax[1][2].set_xlabel('Time (s)')
[ax[3][idx].set_xlabel('Time (s)') for idx in range(2)]
return fig
def plot_simple(trajs):
"""
Show results for a simulated F16 plant, controller and an autopilot
"""
fig, ax = plt.subplots(figsize=(25, 15), nrows=4, ncols=3, sharex=True)
ax[0][0].set_title("F16 Plant")
plot_component(ax[0][0], trajs, "plant", "states", 11, "height (ft)")
plot_component(ax[1][0], trajs, "plant", "states", 0, "airspeed (ft/s)")
plot_component(ax[2][0], trajs, "plant", "states", 3, "roll (degrees)")
plot_component(ax[2][0], trajs, "plant", "states", 4, "pitch (degrees)")
plot_component(ax[2][0], trajs, "plant", "states", 5, "yaw (degrees)")
plot_component(ax[3][0], trajs, "plant", "states", 12, "power (%)")
ax[0][1].set_title("Low Level Controller")
plot_component(ax[0][1], trajs, "controller", "outputs", 0, "s0 ()")
plot_component(ax[1][1], trajs, "controller", "outputs", 1, "s1 ()")
plot_component(ax[2][1], trajs, "controller", "outputs", 2, "s2 ()")
plot_component(ax[3][1], trajs, "controller", "outputs", 3, "s3 ()")
ax[0][2].set_title("Autopilot")
plot_component(ax[0][2], trajs, "autopilot", "outputs", 0, "a0 ()")
plot_component(ax[1][2], trajs, "autopilot", "outputs", 1, "a1 ()")
plot_component(ax[2][2], trajs, "autopilot", "outputs", 2, "a2 ()")
plot_component(ax[3][2], trajs, "autopilot", "outputs", 3, "a3 ()")
[ax[3][idx].set_xlabel('Time (s)') for idx in range(3)]
return fig
def plot_llc(trajs):
"""
Plot reference tracking of LLC
"""
fig, ax = plt.subplots(figsize=(10, 6), nrows=3, ncols=1, sharex=True)
ax[0].set_title("F16 LLC controller")
plot_component(ax[0], trajs, "autopilot", "outputs", 0, "Nz_ref ()")
plot_component(ax[0], trajs, "plant", "outputs", 0, "Nz ()")
plot_component(ax[1], trajs, "autopilot", "outputs", 2, "Ny_r_ref ()")
plot_component(ax[1], trajs, "plant", "outputs", 1, "Ny+r ()")
plot_component(ax[2], trajs, "autopilot", "outputs", 1, "ps_ref (rad/s)")
plot_component(ax[2], trajs, "plant", "states", 6, "ps (rad/s)")
return fig
def plot_llc_shield(trajs):
"""
Plot reference tracking of LLC
"""
fig, ax = plt.subplots(figsize=(25, 15), nrows=4, ncols=3, sharex=True)
ax[0][0].set_title("F16 Plant")
plot_component(ax[0][0], trajs, "plant", "states", 11, "height (ft)")
plot_component(ax[1][0], trajs, "plant", "states", 1, "alpha (ft/s)")
plot_component(ax[2][0], trajs, "plant", "states", 3, "roll (degrees)")
plot_component(ax[2][0], trajs, "plant", "states", 4, "pitch (degrees)")
plot_component(ax[2][0], trajs, "plant", "states", 5, "yaw (degrees)")
plot_component(ax[3][0], trajs, "plant", "states", 7, "pitch rate (degrees/s)")
ax[0][1].set_title("Low Level Controller")
plot_component(ax[0][1], trajs, "shield_llc", "outputs", 0, "s0 ()")
plot_component(ax[1][1], trajs, "shield_llc", "outputs", 1, "s1 ()")
plot_component(ax[2][1], trajs, "shield_llc", "outputs", 2, "s2 ()")
plot_component(ax[3][1], trajs, "shield_llc", "outputs", 3, "s3 ()")
ax[0][2].set_title("Autopilot")
plot_component(ax[0][2], trajs, "autopilot", "outputs", 0, "a0 ()")
plot_component(ax[1][2], trajs, "autopilot", "outputs", 1, "a1 ()")
plot_component(ax[2][2], trajs, "autopilot", "outputs", 2, "a2 ()")
plot_component(ax[3][2], trajs, "autopilot", "outputs", 3, "a3 ()")
[ax[3][idx].set_xlabel('Time (s)') for idx in range(3)]
return fig
def scale3d(pts, scale_list):
"""
scale a 3d ndarray of points, and return the new ndarray
"""
assert len(scale_list) == 3
rv = np.zeros(pts.shape)
for i in range(pts.shape[0]):
for d in range(3):
rv[i, d] = scale_list[d] * pts[i, d]
return rv
def rotate3d(pts, theta, psi, phi):
"""
rotates an ndarray of 3d points, returns new list
"""
sinTheta = math.sin(theta)
cosTheta = math.cos(theta)
sinPsi = math.sin(psi)
cosPsi = math.cos(psi)
sinPhi = math.sin(phi)
cosPhi = math.cos(phi)
transform_matrix = np.array([ \
[cosPsi * cosTheta, -sinPsi * cosTheta, sinTheta], \
[cosPsi * sinTheta * sinPhi + sinPsi * cosPhi, \
-sinPsi * sinTheta * sinPhi + cosPsi * cosPhi, \
-cosTheta * sinPhi], \
[-cosPsi * sinTheta * cosPhi + sinPsi * sinPhi, \
sinPsi * sinTheta * cosPhi + cosPsi * sinPhi, \
cosTheta * cosPhi]])
rv = np.zeros(pts.shape)
for i in range(pts.shape[0]):
rv[i] = np.dot(pts[i], transform_matrix)
return rv
def plot3d_anim(trace, filename=None):
"""
make a 3d plot of the GCAS maneuver
"""
skip = 1
full_plot = True
times = trace.times
states = trace.states
assert len(times) == len(states)
try:
modes = trace.modes
except AttributeError:
modes = [None]*len(times)
#TODO: Improve this interface?
op_array = np.vstack(trace.outputs)
Nz_list, ps_list = op_array[:,0], op_array[:,1]
if filename == None: # plot to the screen
skip = 20
full_plot = False
elif filename.endswith('.gif'):
skip = 5
else:
skip = 1 # plot every frame
start = time.time()
times = times[0::skip]
states = states[0::skip]
modes = modes[0::skip]
ps_list = ps_list[0::skip]
Nz_list = Nz_list[0::skip]
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111, projection='3d')
ax.view_init(30, 45)
pos_xs = [pt[9] for pt in states]
pos_ys = [pt[10] for pt in states]
pos_zs = [pt[11] for pt in states]
trail_line, = ax.plot([], [], [], color='r', lw=1)
data = loadmat(os.path.dirname(os.path.realpath(__file__))+ '/f-16.mat')
f16_pts = data['V']
f16_faces = data['F']
plane_polys = Poly3DCollection([], color=None if full_plot else 'k')
ax.add_collection3d(plane_polys)
ax.set_xlim([min(pos_xs), max(pos_xs)])
ax.set_ylim([min(pos_ys), max(pos_xs)])
ax.set_zlim([min(pos_zs), max(pos_zs)])
ax.set_xlabel('X [ft]')
ax.set_ylabel('Y [ft]')
ax.set_zlabel('Altitude [ft] ')
frames = len(times)
# text
fontsize = 14
time_text = ax.text2D(0.05, 1.07, "", transform=ax.transAxes, fontsize=fontsize)
mode_text = ax.text2D(0.95, 1.07, "", transform=ax.transAxes, fontsize=fontsize, horizontalalignment='right')
alt_text = ax.text2D(0.05, 1.00, "", transform=ax.transAxes, fontsize=fontsize)
v_text = ax.text2D(0.95, 1.00, "", transform=ax.transAxes, fontsize=fontsize, horizontalalignment='right')
alpha_text = ax.text2D(0.05, 0.93, "", transform=ax.transAxes, fontsize=fontsize)
beta_text = ax.text2D(0.95, 0.93, "", transform=ax.transAxes, fontsize=fontsize, horizontalalignment='right')
nz_text = ax.text2D(0.05, 0.86, "", transform=ax.transAxes, fontsize=fontsize)
ps_text = ax.text2D(0.95, 0.86, "", transform=ax.transAxes, fontsize=fontsize, horizontalalignment='right')
ang_text = ax.text2D(0.5, 0.79, "", transform=ax.transAxes, fontsize=fontsize, horizontalalignment='center')
def anim_func(frame):
'updates for the animation frame'
speed = states[frame][0]
alpha = states[frame][1]
beta = states[frame][2]
alt = states[frame][11]
phi = states[frame][3]
theta = states[frame][4]
psi = states[frame][5]
dx = states[frame][9]
dy = states[frame][10]
dz = states[frame][11]
time_text.set_text('t = {:.2f} sec'.format(times[frame]))
mode_text.set_text('Mode: {}'.format(modes[frame]))
alt_text.set_text('h = {:.2f} ft'.format(alt))
v_text.set_text('V = {:.2f} ft/sec'.format(speed))
alpha_text.set_text('$\\alpha$ = {:.2f} deg'.format(np.rad2deg(alpha)))
beta_text.set_text('$\\beta$ = {:.2f} deg'.format(np.rad2deg(beta)))
nz_text.set_text('$N_z$ = {:.2f} g'.format(Nz_list[frame]))
ps_text.set_text('$p_s$ = {:.2f} deg/sec'.format(np.rad2deg(ps_list[frame])))
ang_text.set_text('[$\\phi$, $\\theta$, $\\psi$] = [{:.2f}, {:.2f}, {:.2f}] deg'.format(\
np.rad2deg(phi), np.rad2deg(theta), np.rad2deg(psi)))
# do trail
trail_len = 200 // skip
start_index = max(0, frame-trail_len)
trail_line.set_data(pos_xs[start_index:frame], pos_ys[start_index:frame])
trail_line.set_3d_properties(pos_zs[start_index:frame])
scale = 25
pts = scale3d(f16_pts, [-scale, scale, scale])
pts = rotate3d(pts, theta, -psi, phi)
size = 1000
minx = dx - size
maxx = dx + size
miny = dy - size
maxy = dy + size
minz = dz - size
maxz = dz + size
ax.set_xlim([minx, maxx])
ax.set_ylim([miny, maxy])
ax.set_zlim([minz, maxz])
verts = []
fc = []
count = 0
for face in f16_faces:
face_pts = []
count = count + 1
if not full_plot and count % 10 != 0:
continue
for index in face:
face_pts.append((pts[index-1][0] + dx, \
pts[index-1][1] + dy, \
pts[index-1][2] + dz))
verts.append(face_pts)
fc.append('k')
# draw ground
if minz <= 0 and maxz >= 0:
z = 0
verts.append([(minx, miny, z), (maxx, miny, z), (maxx, maxy, z), (minx, maxy, z)])
fc.append('0.8')
plane_polys.set_verts(verts)
plane_polys.set_facecolors(fc)
return None
anim_obj = animation.FuncAnimation(fig, anim_func, frames, interval=30, \
blit=False, repeat=True)
if filename is not None:
if filename.endswith('.gif'):
print("\nSaving animation to '{}' using 'imagemagick'...".format(filename))
anim_obj.save(filename, dpi=80, writer='imagemagick')
print("Finished saving to {} in {:.1f} sec".format(filename, time.time() - start))
else:
fps = 50
codec = 'libx264'
print("\nSaving '{}' at {:.2f} fps using ffmpeg with codec '{}'.".format(
filename, fps, codec))
# if this fails do: 'sudo apt-get install ffmpeg'
try:
extra_args = []
if codec is not None:
extra_args += ['-vcodec', str(codec)]
anim_obj.save(filename, fps=fps, extra_args=extra_args)
print("Finished saving to {} in {:.1f} sec".format(filename, time.time() - start))
except AttributeError:
print("\nSaving video file failed! Is ffmpeg installed? Can you run 'ffmpeg' in the terminal?")
else:
return anim_obj
| [
"components.fgnetfdm.FGNetFDM",
"plot.plot_component",
"numpy.asarray",
"os.path.realpath",
"numpy.zeros",
"math.sin",
"time.time",
"matplotlib.animation.FuncAnimation",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"matplotlib.pyplot.figure",
"time.monotonic",
"numpy.array",
"math.cos",
... | [((1000, 1019), 'components.fgnetfdm.FGNetFDM', 'fgnetfdm.FGNetFDM', ([], {}), '()\n', (1017, 1019), False, 'from components import fgnetfdm\n'), ((1040, 1056), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1054, 1056), False, 'import time\n'), ((2283, 2344), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(25, 15)', 'nrows': '(4)', 'ncols': '(3)', 'sharex': '(True)'}), '(figsize=(25, 15), nrows=4, ncols=3, sharex=True)\n', (2295, 2344), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2454), 'plot.plot_component', 'plot_component', (['ax[0][0]', 'trajs', '"""plant"""', '"""states"""', '(11)', '"""height (ft)"""'], {}), "(ax[0][0], trajs, 'plant', 'states', 11, 'height (ft)')\n", (2399, 2454), False, 'from plot import plot_component\n'), ((2459, 2531), 'plot.plot_component', 'plot_component', (['ax[1][0]', 'trajs', '"""plant"""', '"""states"""', '(0)', '"""airspeed (ft/s)"""'], {}), "(ax[1][0], trajs, 'plant', 'states', 0, 'airspeed (ft/s)')\n", (2473, 2531), False, 'from plot import plot_component\n'), ((2536, 2607), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(3)', '"""roll (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 3, 'roll (degrees)')\n", (2550, 2607), False, 'from plot import plot_component\n'), ((2612, 2684), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(4)', '"""pitch (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 4, 'pitch (degrees)')\n", (2626, 2684), False, 'from plot import plot_component\n'), ((2689, 2759), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(5)', '"""yaw (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 5, 'yaw (degrees)')\n", (2703, 2759), False, 'from plot import plot_component\n'), ((2764, 2831), 'plot.plot_component', 'plot_component', (['ax[3][0]', 'trajs', '"""plant"""', '"""states"""', '(12)', '"""power (%)"""'], {}), "(ax[3][0], trajs, 'plant', 'states', 12, 'power (%)')\n", (2778, 2831), False, 'from plot import plot_component\n'), ((2884, 2951), 'plot.plot_component', 'plot_component', (['ax[0][1]', 'trajs', '"""controller"""', '"""outputs"""', '(0)', '"""e ()"""'], {}), "(ax[0][1], trajs, 'controller', 'outputs', 0, 'e ()')\n", (2898, 2951), False, 'from plot import plot_component\n'), ((2956, 3023), 'plot.plot_component', 'plot_component', (['ax[1][1]', 'trajs', '"""controller"""', '"""outputs"""', '(1)', '"""a ()"""'], {}), "(ax[1][1], trajs, 'controller', 'outputs', 1, 'a ()')\n", (2970, 3023), False, 'from plot import plot_component\n'), ((3028, 3095), 'plot.plot_component', 'plot_component', (['ax[2][1]', 'trajs', '"""controller"""', '"""outputs"""', '(2)', '"""r ()"""'], {}), "(ax[2][1], trajs, 'controller', 'outputs', 2, 'r ()')\n", (3042, 3095), False, 'from plot import plot_component\n'), ((3100, 3174), 'plot.plot_component', 'plot_component', (['ax[3][1]', 'trajs', '"""controller"""', '"""outputs"""', '(3)', '"""throttle ()"""'], {}), "(ax[3][1], trajs, 'controller', 'outputs', 3, 'throttle ()')\n", (3114, 3174), False, 'from plot import plot_component\n'), ((3217, 3323), 'plot.plot_component', 'plot_component', (['ax[0][2]', 'trajs', '"""monitor_ap"""', '"""outputs"""', '(0)', '"""autopilot selected ()"""'], {'do_schedule': '(True)'}), "(ax[0][2], trajs, 'monitor_ap', 'outputs', 0,\n 'autopilot selected ()', do_schedule=True)\n", (3231, 3323), False, 'from plot import plot_component\n'), ((3324, 3418), 'plot.plot_component', 'plot_component', (['ax[1][2]', 'trajs', '"""autopilot"""', '"""fdas"""', '(0)', '"""GCAS State ()"""'], {'do_schedule': '(True)'}), "(ax[1][2], trajs, 'autopilot', 'fdas', 0, 'GCAS State ()',\n do_schedule=True)\n", (3338, 3418), False, 'from plot import plot_component\n'), ((3758, 3819), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(25, 15)', 'nrows': '(4)', 'ncols': '(3)', 'sharex': '(True)'}), '(figsize=(25, 15), nrows=4, ncols=3, sharex=True)\n', (3770, 3819), True, 'import matplotlib.pyplot as plt\n'), ((3860, 3929), 'plot.plot_component', 'plot_component', (['ax[0][0]', 'trajs', '"""plant"""', '"""states"""', '(11)', '"""height (ft)"""'], {}), "(ax[0][0], trajs, 'plant', 'states', 11, 'height (ft)')\n", (3874, 3929), False, 'from plot import plot_component\n'), ((3934, 4006), 'plot.plot_component', 'plot_component', (['ax[1][0]', 'trajs', '"""plant"""', '"""states"""', '(0)', '"""airspeed (ft/s)"""'], {}), "(ax[1][0], trajs, 'plant', 'states', 0, 'airspeed (ft/s)')\n", (3948, 4006), False, 'from plot import plot_component\n'), ((4011, 4082), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(3)', '"""roll (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 3, 'roll (degrees)')\n", (4025, 4082), False, 'from plot import plot_component\n'), ((4087, 4159), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(4)', '"""pitch (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 4, 'pitch (degrees)')\n", (4101, 4159), False, 'from plot import plot_component\n'), ((4164, 4234), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(5)', '"""yaw (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 5, 'yaw (degrees)')\n", (4178, 4234), False, 'from plot import plot_component\n'), ((4239, 4306), 'plot.plot_component', 'plot_component', (['ax[3][0]', 'trajs', '"""plant"""', '"""states"""', '(12)', '"""power (%)"""'], {}), "(ax[3][0], trajs, 'plant', 'states', 12, 'power (%)')\n", (4253, 4306), False, 'from plot import plot_component\n'), ((4359, 4427), 'plot.plot_component', 'plot_component', (['ax[0][1]', 'trajs', '"""controller"""', '"""outputs"""', '(0)', '"""s0 ()"""'], {}), "(ax[0][1], trajs, 'controller', 'outputs', 0, 's0 ()')\n", (4373, 4427), False, 'from plot import plot_component\n'), ((4432, 4500), 'plot.plot_component', 'plot_component', (['ax[1][1]', 'trajs', '"""controller"""', '"""outputs"""', '(1)', '"""s1 ()"""'], {}), "(ax[1][1], trajs, 'controller', 'outputs', 1, 's1 ()')\n", (4446, 4500), False, 'from plot import plot_component\n'), ((4505, 4573), 'plot.plot_component', 'plot_component', (['ax[2][1]', 'trajs', '"""controller"""', '"""outputs"""', '(2)', '"""s2 ()"""'], {}), "(ax[2][1], trajs, 'controller', 'outputs', 2, 's2 ()')\n", (4519, 4573), False, 'from plot import plot_component\n'), ((4578, 4646), 'plot.plot_component', 'plot_component', (['ax[3][1]', 'trajs', '"""controller"""', '"""outputs"""', '(3)', '"""s3 ()"""'], {}), "(ax[3][1], trajs, 'controller', 'outputs', 3, 's3 ()')\n", (4592, 4646), False, 'from plot import plot_component\n'), ((4688, 4755), 'plot.plot_component', 'plot_component', (['ax[0][2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(0)', '"""a0 ()"""'], {}), "(ax[0][2], trajs, 'autopilot', 'outputs', 0, 'a0 ()')\n", (4702, 4755), False, 'from plot import plot_component\n'), ((4760, 4827), 'plot.plot_component', 'plot_component', (['ax[1][2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(1)', '"""a1 ()"""'], {}), "(ax[1][2], trajs, 'autopilot', 'outputs', 1, 'a1 ()')\n", (4774, 4827), False, 'from plot import plot_component\n'), ((4832, 4899), 'plot.plot_component', 'plot_component', (['ax[2][2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(2)', '"""a2 ()"""'], {}), "(ax[2][2], trajs, 'autopilot', 'outputs', 2, 'a2 ()')\n", (4846, 4899), False, 'from plot import plot_component\n'), ((4904, 4971), 'plot.plot_component', 'plot_component', (['ax[3][2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(3)', '"""a3 ()"""'], {}), "(ax[3][2], trajs, 'autopilot', 'outputs', 3, 'a3 ()')\n", (4918, 4971), False, 'from plot import plot_component\n'), ((5136, 5196), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)', 'nrows': '(3)', 'ncols': '(1)', 'sharex': '(True)'}), '(figsize=(10, 6), nrows=3, ncols=1, sharex=True)\n', (5148, 5196), True, 'import matplotlib.pyplot as plt\n'), ((5243, 5311), 'plot.plot_component', 'plot_component', (['ax[0]', 'trajs', '"""autopilot"""', '"""outputs"""', '(0)', '"""Nz_ref ()"""'], {}), "(ax[0], trajs, 'autopilot', 'outputs', 0, 'Nz_ref ()')\n", (5257, 5311), False, 'from plot import plot_component\n'), ((5316, 5376), 'plot.plot_component', 'plot_component', (['ax[0]', 'trajs', '"""plant"""', '"""outputs"""', '(0)', '"""Nz ()"""'], {}), "(ax[0], trajs, 'plant', 'outputs', 0, 'Nz ()')\n", (5330, 5376), False, 'from plot import plot_component\n'), ((5381, 5451), 'plot.plot_component', 'plot_component', (['ax[1]', 'trajs', '"""autopilot"""', '"""outputs"""', '(2)', '"""Ny_r_ref ()"""'], {}), "(ax[1], trajs, 'autopilot', 'outputs', 2, 'Ny_r_ref ()')\n", (5395, 5451), False, 'from plot import plot_component\n'), ((5456, 5518), 'plot.plot_component', 'plot_component', (['ax[1]', 'trajs', '"""plant"""', '"""outputs"""', '(1)', '"""Ny+r ()"""'], {}), "(ax[1], trajs, 'plant', 'outputs', 1, 'Ny+r ()')\n", (5470, 5518), False, 'from plot import plot_component\n'), ((5523, 5596), 'plot.plot_component', 'plot_component', (['ax[2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(1)', '"""ps_ref (rad/s)"""'], {}), "(ax[2], trajs, 'autopilot', 'outputs', 1, 'ps_ref (rad/s)')\n", (5537, 5596), False, 'from plot import plot_component\n'), ((5601, 5665), 'plot.plot_component', 'plot_component', (['ax[2]', 'trajs', '"""plant"""', '"""states"""', '(6)', '"""ps (rad/s)"""'], {}), "(ax[2], trajs, 'plant', 'states', 6, 'ps (rad/s)')\n", (5615, 5665), False, 'from plot import plot_component\n'), ((5775, 5836), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(25, 15)', 'nrows': '(4)', 'ncols': '(3)', 'sharex': '(True)'}), '(figsize=(25, 15), nrows=4, ncols=3, sharex=True)\n', (5787, 5836), True, 'import matplotlib.pyplot as plt\n'), ((5877, 5946), 'plot.plot_component', 'plot_component', (['ax[0][0]', 'trajs', '"""plant"""', '"""states"""', '(11)', '"""height (ft)"""'], {}), "(ax[0][0], trajs, 'plant', 'states', 11, 'height (ft)')\n", (5891, 5946), False, 'from plot import plot_component\n'), ((5951, 6020), 'plot.plot_component', 'plot_component', (['ax[1][0]', 'trajs', '"""plant"""', '"""states"""', '(1)', '"""alpha (ft/s)"""'], {}), "(ax[1][0], trajs, 'plant', 'states', 1, 'alpha (ft/s)')\n", (5965, 6020), False, 'from plot import plot_component\n'), ((6025, 6096), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(3)', '"""roll (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 3, 'roll (degrees)')\n", (6039, 6096), False, 'from plot import plot_component\n'), ((6101, 6173), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(4)', '"""pitch (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 4, 'pitch (degrees)')\n", (6115, 6173), False, 'from plot import plot_component\n'), ((6178, 6248), 'plot.plot_component', 'plot_component', (['ax[2][0]', 'trajs', '"""plant"""', '"""states"""', '(5)', '"""yaw (degrees)"""'], {}), "(ax[2][0], trajs, 'plant', 'states', 5, 'yaw (degrees)')\n", (6192, 6248), False, 'from plot import plot_component\n'), ((6253, 6332), 'plot.plot_component', 'plot_component', (['ax[3][0]', 'trajs', '"""plant"""', '"""states"""', '(7)', '"""pitch rate (degrees/s)"""'], {}), "(ax[3][0], trajs, 'plant', 'states', 7, 'pitch rate (degrees/s)')\n", (6267, 6332), False, 'from plot import plot_component\n'), ((6385, 6453), 'plot.plot_component', 'plot_component', (['ax[0][1]', 'trajs', '"""shield_llc"""', '"""outputs"""', '(0)', '"""s0 ()"""'], {}), "(ax[0][1], trajs, 'shield_llc', 'outputs', 0, 's0 ()')\n", (6399, 6453), False, 'from plot import plot_component\n'), ((6458, 6526), 'plot.plot_component', 'plot_component', (['ax[1][1]', 'trajs', '"""shield_llc"""', '"""outputs"""', '(1)', '"""s1 ()"""'], {}), "(ax[1][1], trajs, 'shield_llc', 'outputs', 1, 's1 ()')\n", (6472, 6526), False, 'from plot import plot_component\n'), ((6531, 6599), 'plot.plot_component', 'plot_component', (['ax[2][1]', 'trajs', '"""shield_llc"""', '"""outputs"""', '(2)', '"""s2 ()"""'], {}), "(ax[2][1], trajs, 'shield_llc', 'outputs', 2, 's2 ()')\n", (6545, 6599), False, 'from plot import plot_component\n'), ((6604, 6672), 'plot.plot_component', 'plot_component', (['ax[3][1]', 'trajs', '"""shield_llc"""', '"""outputs"""', '(3)', '"""s3 ()"""'], {}), "(ax[3][1], trajs, 'shield_llc', 'outputs', 3, 's3 ()')\n", (6618, 6672), False, 'from plot import plot_component\n'), ((6714, 6781), 'plot.plot_component', 'plot_component', (['ax[0][2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(0)', '"""a0 ()"""'], {}), "(ax[0][2], trajs, 'autopilot', 'outputs', 0, 'a0 ()')\n", (6728, 6781), False, 'from plot import plot_component\n'), ((6786, 6853), 'plot.plot_component', 'plot_component', (['ax[1][2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(1)', '"""a1 ()"""'], {}), "(ax[1][2], trajs, 'autopilot', 'outputs', 1, 'a1 ()')\n", (6800, 6853), False, 'from plot import plot_component\n'), ((6858, 6925), 'plot.plot_component', 'plot_component', (['ax[2][2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(2)', '"""a2 ()"""'], {}), "(ax[2][2], trajs, 'autopilot', 'outputs', 2, 'a2 ()')\n", (6872, 6925), False, 'from plot import plot_component\n'), ((6930, 6997), 'plot.plot_component', 'plot_component', (['ax[3][2]', 'trajs', '"""autopilot"""', '"""outputs"""', '(3)', '"""a3 ()"""'], {}), "(ax[3][2], trajs, 'autopilot', 'outputs', 3, 'a3 ()')\n", (6944, 6997), False, 'from plot import plot_component\n'), ((7227, 7246), 'numpy.zeros', 'np.zeros', (['pts.shape'], {}), '(pts.shape)\n', (7235, 7246), True, 'import numpy as np\n'), ((7496, 7511), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (7504, 7511), False, 'import math\n'), ((7527, 7542), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (7535, 7542), False, 'import math\n'), ((7556, 7569), 'math.sin', 'math.sin', (['psi'], {}), '(psi)\n', (7564, 7569), False, 'import math\n'), ((7583, 7596), 'math.cos', 'math.cos', (['psi'], {}), '(psi)\n', (7591, 7596), False, 'import math\n'), ((7610, 7623), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (7618, 7623), False, 'import math\n'), ((7637, 7650), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (7645, 7650), False, 'import math\n'), ((7675, 7983), 'numpy.array', 'np.array', (['[[cosPsi * cosTheta, -sinPsi * cosTheta, sinTheta], [cosPsi * sinTheta *\n sinPhi + sinPsi * cosPhi, -sinPsi * sinTheta * sinPhi + cosPsi * cosPhi,\n -cosTheta * sinPhi], [-cosPsi * sinTheta * cosPhi + sinPsi * sinPhi, \n sinPsi * sinTheta * cosPhi + cosPsi * sinPhi, cosTheta * cosPhi]]'], {}), '([[cosPsi * cosTheta, -sinPsi * cosTheta, sinTheta], [cosPsi *\n sinTheta * sinPhi + sinPsi * cosPhi, -sinPsi * sinTheta * sinPhi + \n cosPsi * cosPhi, -cosTheta * sinPhi], [-cosPsi * sinTheta * cosPhi + \n sinPsi * sinPhi, sinPsi * sinTheta * cosPhi + cosPsi * sinPhi, cosTheta *\n cosPhi]])\n', (7683, 7983), True, 'import numpy as np\n'), ((8047, 8066), 'numpy.zeros', 'np.zeros', (['pts.shape'], {}), '(pts.shape)\n', (8055, 8066), True, 'import numpy as np\n'), ((8536, 8560), 'numpy.vstack', 'np.vstack', (['trace.outputs'], {}), '(trace.outputs)\n', (8545, 8560), True, 'import numpy as np\n'), ((8817, 8828), 'time.time', 'time.time', ([], {}), '()\n', (8826, 8828), False, 'import time\n'), ((8986, 9012), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (8996, 9012), True, 'import matplotlib.pyplot as plt\n'), ((9405, 9459), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', (['[]'], {'color': "(None if full_plot else 'k')"}), "([], color=None if full_plot else 'k')\n", (9421, 9459), False, 'from mpl_toolkits.mplot3d.art3d import Line3D, Poly3DCollection\n'), ((13203, 13292), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'anim_func', 'frames'], {'interval': '(30)', 'blit': '(False)', 'repeat': '(True)'}), '(fig, anim_func, frames, interval=30, blit=False,\n repeat=True)\n', (13226, 13292), True, 'import matplotlib.animation as animation\n'), ((1190, 1209), 'components.fgnetfdm.FGNetFDM', 'fgnetfdm.FGNetFDM', ([], {}), '()\n', (1207, 1209), False, 'from components import fgnetfdm\n'), ((1369, 1385), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1383, 1385), False, 'import time\n'), ((8118, 8150), 'numpy.dot', 'np.dot', (['pts[i]', 'transform_matrix'], {}), '(pts[i], transform_matrix)\n', (8124, 8150), True, 'import numpy as np\n'), ((2087, 2132), 'time.sleep', 'time.sleep', (['fgnetfdm.FGNetFDM.DEFAULT_DELTA_T'], {}), '(fgnetfdm.FGNetFDM.DEFAULT_DELTA_T)\n', (2097, 2132), False, 'import time\n'), ((962, 980), 'numpy.asarray', 'np.asarray', (['states'], {}), '(states)\n', (972, 980), True, 'import numpy as np\n'), ((9294, 9320), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (9310, 9320), False, 'import os\n'), ((11373, 11390), 'numpy.rad2deg', 'np.rad2deg', (['alpha'], {}), '(alpha)\n', (11383, 11390), True, 'import numpy as np\n'), ((11451, 11467), 'numpy.rad2deg', 'np.rad2deg', (['beta'], {}), '(beta)\n', (11461, 11467), True, 'import numpy as np\n'), ((11596, 11622), 'numpy.rad2deg', 'np.rad2deg', (['ps_list[frame]'], {}), '(ps_list[frame])\n', (11606, 11622), True, 'import numpy as np\n'), ((11736, 11751), 'numpy.rad2deg', 'np.rad2deg', (['phi'], {}), '(phi)\n', (11746, 11751), True, 'import numpy as np\n'), ((11753, 11770), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (11763, 11770), True, 'import numpy as np\n'), ((11772, 11787), 'numpy.rad2deg', 'np.rad2deg', (['psi'], {}), '(psi)\n', (11782, 11787), True, 'import numpy as np\n'), ((13595, 13606), 'time.time', 'time.time', ([], {}), '()\n', (13604, 13606), False, 'import time\n'), ((14167, 14178), 'time.time', 'time.time', ([], {}), '()\n', (14176, 14178), False, 'import time\n')] |
""" Auxilary functions """
import os
import glob
import shutil
import logging
import hashlib
import itertools
import json
from collections import OrderedDict
from copy import deepcopy
import dill
from tqdm import tqdm_notebook
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
def to_list(value):
return value if isinstance(value, list) else [value]
def count_startswith(seq, name):
return sum(1 for item in seq if item.startswith(name))
def get_metrics(pipeline, metrics_var, metrics_name, *args, agg='mean', **kwargs):
""" Function to evaluate metrics. """
metrics_name = metrics_name if isinstance(metrics_name, list) else [metrics_name]
metrics = pipeline.get_variable(metrics_var).evaluate(metrics_name, *args, agg=agg, **kwargs)
values = [metrics[name] for name in metrics_name]
if len(values) == 1:
return values[0]
return values
def convert_research_results(research_name, new_name=None, bar=True):
""" Convert research results from old format to the new. Only results will be transformed, old research can not be
converted to load by new Research version. """
# Copy research if needed
if new_name is not None:
shutil.copytree(research_name, new_name)
research_name = new_name
# Move configs from separate folder to experiment folders
configs = {}
for config in glob.glob(f'{research_name}/configs/*'):
for experiment_folder in glob.glob(f'{research_name}/results/{glob.escape(os.path.basename(config))}/*'):
exp_id = os.path.basename(experiment_folder)
configs[exp_id] = os.path.basename(config)
for exp_id, config in configs.items():
src = f'{research_name}/configs/{config}'
dst = f'{research_name}/results/{config}/{exp_id}/config.dill'
with open(src, 'rb') as f:
content = dill.load(f) # content is a ConfigAlias instance
content['updates'] = content['update'] # Rename column for the new format
content.pop_config('update')
content['device'] = None # Add column
with open(dst, 'wb') as f:
dill.dump(content, f)
with open(f'{research_name}/results/{config}/{exp_id}/config.json', 'w') as f:
json.dump(jsonify(content.config().config), f)
# Remove folder with configs
shutil.rmtree(f'{research_name}/configs')
# Remove one nested level
initial_results = glob.glob(f'{research_name}/results/*')
for exp_path in initial_results:
for path in os.listdir(exp_path):
src = os.path.join(exp_path, path)
dst = os.path.join(os.path.dirname(exp_path), path)
shutil.move(src, dst)
for path in initial_results:
shutil.rmtree(path)
# Rename 'results' folder to 'experiments'
shutil.move(f'{research_name}/results', f'{research_name}/experiments')
# Move files from experiment folder to subfodlers
for results_file in tqdm_notebook(glob.glob(f'{research_name}/experiments/*/*'), disable=(not bar)):
filename = os.path.basename(results_file)
content = get_content(results_file)
if content is not None:
content.pop('sample_index')
iterations = content.pop('iteration')
unit_name, iteration_in_name = filename.split('_')
iteration_in_name = int(iteration_in_name) - 1
dirname = os.path.dirname(results_file)
for var in content:
new_dict = OrderedDict()
for i, val in zip(iterations, content[var]):
new_dict[i] = val
folder_for_var = f'{dirname}/results/{unit_name}_{var}'
if not os.path.exists(folder_for_var):
os.makedirs(folder_for_var)
dst = f'{folder_for_var}/{iteration_in_name}'
with open(dst, 'wb') as f:
dill.dump(new_dict, f)
os.remove(results_file)
def get_content(path):
""" Open research results file (if it is research results file, otherwise None). """
filename = os.path.basename(path)
if len(filename.split('_')) != 2:
return None
_, iteration_in_name = filename.split('_')
if not iteration_in_name.isdigit():
return None
try:
with open(path, 'rb') as f:
content = dill.load(f)
except dill.UnpicklingError:
return None
if not isinstance(content, dict):
return None
if 'sample_index' not in content or 'iteration' not in content:
return None
return content
def jsonify(src):
""" Transform np.arrays to lists to JSON serialize. """
src = deepcopy(src)
for key, value in src.items():
if isinstance(value, np.ndarray):
src[key] = value.tolist()
return src
def create_logger(name, path=None, loglevel='info'):
""" Create logger. """
loglevel = getattr(logging, loglevel.upper())
logger = logging.getLogger(name)
logger.setLevel(loglevel)
if path is not None:
handler = logging.FileHandler(path)
else:
handler = logging.StreamHandler() #TODO: filter outputs
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
handler.setLevel(loglevel)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def must_execute(iteration, when, n_iters=None, last=False):
""" Returns does unit must be executed for the current iteration. """
if last and 'last' in when:
return True
frequencies = (item for item in when if isinstance(item, int) and item > 0)
iterations = (int(item[1:]) for item in when if isinstance(item, str) and item != 'last')
it_ok = iteration in iterations
freq_ok = any((iteration+1) % item == 0 for item in frequencies)
if n_iters is None:
return it_ok or freq_ok
return (iteration + 1 == n_iters and 'last' in when) or it_ok or freq_ok
def parse_name(name):
""" Parse name of the form 'namespace_name.unit_name' into tuple ('namespace_name', 'unit_name'). """
if '.' not in name:
raise ValueError('`func` parameter must be provided or name must be "namespace_name.unit_name"')
name_components = name.split('.')
if len(name_components) > 2:
raise ValueError(f'name must be "namespace_name.unit_name" but {name} were given')
return name_components
def generate_id(config, random):
""" Generate id for experiment. """
name = hashlib.md5(config.alias(as_string=True).encode('utf-8')).hexdigest()[:8]
name += ''.join(str(i) for i in random.integers(10, size=8))
return name
def plot_results_by_config(results, variables, figsize=None, layout=None, **kwargs):
"""
Given results from Research.run() draws plots of specified variables for all configs
Parameters
----------
results : pandas.DataFrame
results produced by Research.run()
variables : tuple or list
variables to plot
figsize : tuple or None
figsize to pass to matplotlib. If None (default value) figsize is set to (x, y),
where x = (5 * number of variables), y = (5 * number of configs in `results`)
layout: 'flat', 'square' or None
plot arranging strategy when only one variable is needed (default: None, plots are arranged vertically)
"""
gbc = results.groupby('config')
n_configs = len(gbc)
n_vars = len(variables)
n_h, n_v = n_vars, n_configs
if n_vars == 1:
if layout == 'flat':
n_h, n_v = n_configs, 1
if layout == 'square':
n_h = int(np.sqrt(n_configs))
n_v = np.ceil(n_configs / n_h).astype(int)
if figsize is None:
figsize = (n_h * 5, n_v * 5)
_, axs = plt.subplots(n_v, n_h, figsize=figsize)
axs = axs.flatten() if isinstance(axs, np.ndarray) else (axs,)
for x, (config, df) in enumerate(gbc):
for y, val in enumerate(variables):
ax = axs[n_vars * x + y]
cols = ['repetition', 'cv_split'] if 'cv_split' in df.columns else 'repetition'
res = (df.pivot_table(index='iteration', columns=cols, values=val)
.rename(columns=lambda s: 'rep ' + str(s), level=0))
if 'cv_split' in df.columns:
res = res.rename(columns=lambda s: 'split ' + str(s), level=1)
res.plot(ax=ax, **kwargs)
ax.set_title(config)
ax.set_xlabel('Iteration')
ax.set_ylabel(val.replace('_', ' ').capitalize())
ax.grid(True)
ax.legend()
def show_research(df, layouts=None, titles=None, average_repetitions=False, log_scale=False,
rolling_window=None, color=None, **kwargs): # pylint: disable=too-many-branches
"""Show plots given by research dataframe.
Parameters
----------
df : DataFrame
Research's results
layouts : list, optional
List of strings where each element consists two parts that splited by /. First part is the type
of calculated value wrote in the "name" column. Second is name of column with the parameters
that will be drawn.
titles : list, optional
List of titles for plots that defined by layout.
average_repetitions : bool, optional
If True, then a separate line will be drawn for each repetition
else one mean line will be drawn for each repetition.
log_scale : bool or sequence of bools, optional
If True, values will be logarithmised.
rolling_window : int of sequence of ints, optional
Size of rolling window.
color: str or sequence of matplotlib.colors, optional
If str, should be a name of matplotlib colormap,
colors for plots will be selected from that colormap.
If sequence of colors, they will be used for plots,
if sequence length is less, than number of lines to plot,
colors will be repeated in cycle
If None (default), `mcolors.TABLEAU_COLORS` sequence is used
kwargs:
Additional named arguments directly passed to `plt.subplots`.
With default parameters:
- ``figsize = (9 * len(layouts), 7)``
- ``nrows = 1``
- ``ncols = len(layouts)``
"""
if layouts is None:
layouts = []
for nlabel, ndf in df.groupby("name"):
ndf = ndf.drop(['config', 'name', 'iteration', 'repetition'], axis=1).dropna(axis=1)
for attr in ndf.columns.values:
layouts.append('/'.join([str(nlabel), str(attr)]))
titles = layouts if titles is None else titles
if isinstance(log_scale, bool):
log_scale = [log_scale] * len(layouts)
if isinstance(rolling_window, int) or (rolling_window is None):
rolling_window = [rolling_window] * len(layouts)
rolling_window = [x if x is not None else 1 for x in rolling_window]
if color is None:
color = list(mcolors.TABLEAU_COLORS.keys())
df_len = len(df['config'].unique())
if isinstance(color, str):
cmap = plt.get_cmap(color)
chosen_colors = [cmap(i/df_len) for i in range(df_len)]
else:
chosen_colors = list(itertools.islice(itertools.cycle(color), df_len))
kwargs = {'figsize': (9 * len(layouts), 7), 'nrows': 1, 'ncols': len(layouts), **kwargs}
_, ax = plt.subplots(**kwargs)
if len(layouts) == 1:
ax = (ax, )
for i, (layout, title, log, roll_w) in enumerate(list(zip(*[layouts, titles, log_scale, rolling_window]))):
name, attr = layout.split('/')
ndf = df[df['name'] == name]
for (clabel, cdf), curr_color in zip(ndf.groupby("config"), chosen_colors):
cdf = cdf.drop(['config', 'name'], axis=1).dropna(axis=1).astype('float')
if average_repetitions:
idf = cdf.groupby('iteration').mean().drop('repetition', axis=1)
y_values = idf[attr].rolling(roll_w).mean().values
if log:
y_values = np.log(y_values)
ax[i].plot(idf.index.values, y_values, label=str(clabel), color=curr_color)
else:
for repet, rdf in cdf.groupby('repetition'):
rdf = rdf.drop('repetition', axis=1)
y_values = rdf[attr].rolling(roll_w).mean().values
if log:
y_values = np.log(y_values)
ax[i].plot(rdf['iteration'].values, y_values,
label='/'.join([str(repet), str(clabel)]), color=curr_color)
ax[i].set_xlabel('iteration')
ax[i].set_title(title)
ax[i].legend()
plt.show()
def print_results(df, layout, average_repetitions=False, sort_by=None, ascending=True, n_last=100):
""" Show results given by research dataframe.
Parameters
----------
df : DataFrame
Research's results
layout : str
string where each element consists two parts that splited by /. First part is the type
of calculated value wrote in the "name" column. Second is name of column with the parameters
that will be drawn.
average_repetitions : bool, optional
If True, then a separate values will be written
else one mean value will be written.
sort_by : str or None, optional
If not None, column's name to sort.
ascending : bool, None
Same as in ```pd.sort_value```.
n_last : int, optional
The number of iterations at the end of which the averaging takes place.
Returns
-------
: DataFrame
Research results in DataFrame, where indices is a config parameters and colums is `layout` values
"""
columns = []
data = []
index = []
name, attr = layout.split('/')
ndf = df[df['name'] == name]
if average_repetitions:
columns.extend([attr + ' (mean)', attr + ' (std)'])
else:
repetition_cols = [' (repetition {})'.format(i) for i in ndf['repetition'].unique()]
columns.extend([attr + col_name for col_name in [*repetition_cols, ' (mean)', ' (std)']])
for config, cdf in ndf.groupby("config"):
index.append(config)
cdf = cdf.drop(['config', 'name'], axis=1).dropna(axis=1).astype('float')
rep = []
for _, rdf in cdf.groupby('repetition'):
rdf = rdf.drop('repetition', axis=1)
rdf = rdf[rdf['iteration'] > rdf['iteration'].max() - n_last]
rep.append(rdf[attr].mean())
if average_repetitions:
data.append([np.mean(rep), np.std(rep)])
else:
data.append([*rep, np.mean(rep), np.std(rep)])
res_df = pd.DataFrame(data=data, index=index, columns=columns)
if sort_by:
res_df.sort_values(by=sort_by, ascending=ascending, inplace=True)
return res_df
def plot_images(images, labels=None, proba=None, ncols=5, classes=None, models_names=None, **kwargs):
""" Plot images and optionally true labels as well as predicted class proba.
- In case labels and proba are not passed, just shows images.
- In case labels are passed and proba is not, shows images with labels.
- Otherwise shows everything.
In case the predictions of several models provided, i.e proba is an iterable containing np.arrays,
shows predictions for every model.
Parameters
----------
images : np.array
Batch of images.
labels : array-like, optional
Images labels.
proba: np.array with the shape (n_images, n_classes) or list of such arrays, optional
Predicted probabilities for each class for each model.
ncols: int
Number of images to plot in a row.
classes: list of strings
Class names. In case not specified the list [`1`, `2`, .., `proba.shape[1]`] would be assigned.
models_names: string or list of strings
Models names. In case not specified and the single model predictions provided will not display any name.
Otherwise the list [`Model 1`, `Model 2`, ..] is being assigned.
kwargs : dict
Additional keyword arguments for plt.subplots().
"""
if isinstance(models_names, str):
models_names = (models_names, )
if not isinstance(proba, (list, tuple)):
proba = (proba, )
if models_names is None:
models_names = ['']
else:
if models_names is None:
models_names = ['Model ' + str(i+1) for i in range(len(proba))]
# if the classes names are not specified they can be implicitely infered from the `proba` shape,
if classes is None:
if proba[0] is not None:
classes = [str(i) for i in range(proba[0].shape[1])]
elif labels is None:
pass
elif proba[0] is None:
raise ValueError('Specify classes')
n_items = len(images)
nrows = (n_items // ncols) + 1
fontsize = kwargs.pop('fontsize', 28)
fig, ax = plt.subplots(nrows, ncols, **kwargs)
ax = ax.flatten()
for i in range(n_items):
ax[i].imshow(images[i])
if labels is not None: # plot images with labels
true_class_name = classes[labels[i]]
title = 'Real answer: {}'.format(true_class_name)
if proba[0] is not None: # plot images with labels and predictions
for j, model_proba in enumerate(proba): # the case of preidctions of several models
class_pred = np.argmax(model_proba, axis=1)[i]
class_proba = model_proba[i][class_pred]
pred_class_name = classes[class_pred]
title += '\n {} Prediction: {} with {:.2f}%'.format(models_names[j],
pred_class_name, class_proba * 100)
ax[i].title.set_text(title)
ax[i].title.set_size(fontsize)
ax[i].grid(b=None)
for i in range(n_items, nrows * ncols):
fig.delaxes(ax[i])
| [
"os.remove",
"numpy.argmax",
"logging.Formatter",
"numpy.mean",
"glob.glob",
"shutil.rmtree",
"itertools.cycle",
"os.path.join",
"pandas.DataFrame",
"logging.FileHandler",
"numpy.std",
"os.path.dirname",
"os.path.exists",
"dill.load",
"matplotlib.colors.TABLEAU_COLORS.keys",
"matplotli... | [((1417, 1456), 'glob.glob', 'glob.glob', (['f"""{research_name}/configs/*"""'], {}), "(f'{research_name}/configs/*')\n", (1426, 1456), False, 'import glob\n'), ((2385, 2426), 'shutil.rmtree', 'shutil.rmtree', (['f"""{research_name}/configs"""'], {}), "(f'{research_name}/configs')\n", (2398, 2426), False, 'import shutil\n'), ((2480, 2519), 'glob.glob', 'glob.glob', (['f"""{research_name}/results/*"""'], {}), "(f'{research_name}/results/*')\n", (2489, 2519), False, 'import glob\n'), ((2857, 2928), 'shutil.move', 'shutil.move', (['f"""{research_name}/results"""', 'f"""{research_name}/experiments"""'], {}), "(f'{research_name}/results', f'{research_name}/experiments')\n", (2868, 2928), False, 'import shutil\n'), ((4139, 4161), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4155, 4161), False, 'import os\n'), ((4714, 4727), 'copy.deepcopy', 'deepcopy', (['src'], {}), '(src)\n', (4722, 4727), False, 'from copy import deepcopy\n'), ((5002, 5025), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (5019, 5025), False, 'import logging\n'), ((5216, 5322), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {'datefmt': '"""%y-%m-%d %H:%M:%S"""'}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n", (5233, 5322), False, 'import logging\n'), ((7876, 7915), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_v', 'n_h'], {'figsize': 'figsize'}), '(n_v, n_h, figsize=figsize)\n', (7888, 7915), True, 'import matplotlib.pyplot as plt\n'), ((11450, 11472), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '(**kwargs)\n', (11462, 11472), True, 'import matplotlib.pyplot as plt\n'), ((12767, 12777), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12775, 12777), True, 'import matplotlib.pyplot as plt\n'), ((14767, 14820), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'index': 'index', 'columns': 'columns'}), '(data=data, index=index, columns=columns)\n', (14779, 14820), True, 'import pandas as pd\n'), ((17036, 17072), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {}), '(nrows, ncols, **kwargs)\n', (17048, 17072), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1285), 'shutil.copytree', 'shutil.copytree', (['research_name', 'new_name'], {}), '(research_name, new_name)\n', (1260, 1285), False, 'import shutil\n'), ((2577, 2597), 'os.listdir', 'os.listdir', (['exp_path'], {}), '(exp_path)\n', (2587, 2597), False, 'import os\n'), ((2785, 2804), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2798, 2804), False, 'import shutil\n'), ((3022, 3067), 'glob.glob', 'glob.glob', (['f"""{research_name}/experiments/*/*"""'], {}), "(f'{research_name}/experiments/*/*')\n", (3031, 3067), False, 'import glob\n'), ((3108, 3138), 'os.path.basename', 'os.path.basename', (['results_file'], {}), '(results_file)\n', (3124, 3138), False, 'import os\n'), ((5100, 5125), 'logging.FileHandler', 'logging.FileHandler', (['path'], {}), '(path)\n', (5119, 5125), False, 'import logging\n'), ((5154, 5177), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (5175, 5177), False, 'import logging\n'), ((11170, 11189), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['color'], {}), '(color)\n', (11182, 11189), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1628), 'os.path.basename', 'os.path.basename', (['experiment_folder'], {}), '(experiment_folder)\n', (1609, 1628), False, 'import os\n'), ((1659, 1683), 'os.path.basename', 'os.path.basename', (['config'], {}), '(config)\n', (1675, 1683), False, 'import os\n'), ((1906, 1918), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (1915, 1918), False, 'import dill\n'), ((2179, 2200), 'dill.dump', 'dill.dump', (['content', 'f'], {}), '(content, f)\n', (2188, 2200), False, 'import dill\n'), ((2617, 2645), 'os.path.join', 'os.path.join', (['exp_path', 'path'], {}), '(exp_path, path)\n', (2629, 2645), False, 'import os\n'), ((2722, 2743), 'shutil.move', 'shutil.move', (['src', 'dst'], {}), '(src, dst)\n', (2733, 2743), False, 'import shutil\n'), ((3450, 3479), 'os.path.dirname', 'os.path.dirname', (['results_file'], {}), '(results_file)\n', (3465, 3479), False, 'import os\n'), ((3987, 4010), 'os.remove', 'os.remove', (['results_file'], {}), '(results_file)\n', (3996, 4010), False, 'import os\n'), ((4394, 4406), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (4403, 4406), False, 'import dill\n'), ((11052, 11081), 'matplotlib.colors.TABLEAU_COLORS.keys', 'mcolors.TABLEAU_COLORS.keys', ([], {}), '()\n', (11079, 11081), True, 'import matplotlib.colors as mcolors\n'), ((2677, 2702), 'os.path.dirname', 'os.path.dirname', (['exp_path'], {}), '(exp_path)\n', (2692, 2702), False, 'import os\n'), ((3539, 3552), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3550, 3552), False, 'from collections import OrderedDict\n'), ((7725, 7743), 'numpy.sqrt', 'np.sqrt', (['n_configs'], {}), '(n_configs)\n', (7732, 7743), True, 'import numpy as np\n'), ((11310, 11332), 'itertools.cycle', 'itertools.cycle', (['color'], {}), '(color)\n', (11325, 11332), False, 'import itertools\n'), ((3747, 3777), 'os.path.exists', 'os.path.exists', (['folder_for_var'], {}), '(folder_for_var)\n', (3761, 3777), False, 'import os\n'), ((3799, 3826), 'os.makedirs', 'os.makedirs', (['folder_for_var'], {}), '(folder_for_var)\n', (3810, 3826), False, 'import os\n'), ((3952, 3974), 'dill.dump', 'dill.dump', (['new_dict', 'f'], {}), '(new_dict, f)\n', (3961, 3974), False, 'import dill\n'), ((7763, 7787), 'numpy.ceil', 'np.ceil', (['(n_configs / n_h)'], {}), '(n_configs / n_h)\n', (7770, 7787), True, 'import numpy as np\n'), ((12117, 12133), 'numpy.log', 'np.log', (['y_values'], {}), '(y_values)\n', (12123, 12133), True, 'import numpy as np\n'), ((14652, 14664), 'numpy.mean', 'np.mean', (['rep'], {}), '(rep)\n', (14659, 14664), True, 'import numpy as np\n'), ((14666, 14677), 'numpy.std', 'np.std', (['rep'], {}), '(rep)\n', (14672, 14677), True, 'import numpy as np\n'), ((14725, 14737), 'numpy.mean', 'np.mean', (['rep'], {}), '(rep)\n', (14732, 14737), True, 'import numpy as np\n'), ((14739, 14750), 'numpy.std', 'np.std', (['rep'], {}), '(rep)\n', (14745, 14750), True, 'import numpy as np\n'), ((1540, 1564), 'os.path.basename', 'os.path.basename', (['config'], {}), '(config)\n', (1556, 1564), False, 'import os\n'), ((12496, 12512), 'numpy.log', 'np.log', (['y_values'], {}), '(y_values)\n', (12502, 12512), True, 'import numpy as np\n'), ((17536, 17566), 'numpy.argmax', 'np.argmax', (['model_proba'], {'axis': '(1)'}), '(model_proba, axis=1)\n', (17545, 17566), True, 'import numpy as np\n')] |
import numpy as np
import argparse
import scipy.linalg as la
import time
from . import leapUtils
import scipy.linalg.blas as blas
from . import leapMain
np.set_printoptions(precision=3, linewidth=200)
def eigenDecompose(bed, kinshipFile=None, outFile=None, ignore_neig=False):
if (kinshipFile is None):
#Compute kinship matrix
bed = leapUtils._fixupBed(bed)
t0 = time.time()
print('Computing kinship matrix...')
XXT = leapUtils.symmetrize(blas.dsyrk(1.0, bed.val, lower=1)) / bed.val.shape[1]
print('Done in %0.2f'%(time.time()-t0), 'seconds')
else:
XXT = np.loadtxt(kinshipFile)
#Compute eigendecomposition
S,U = leapUtils.eigenDecompose(XXT, ignore_neig)
if (outFile is not None): np.savez_compressed(outFile, arr_0=U, arr_1=S, XXT=XXT)
eigen = dict([])
eigen['XXT'] = XXT
eigen['arr_0'] = U
eigen['arr_1'] = S
return eigen
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bfilesim', metavar='bfilesim', default=None, help='Binary plink file')
parser.add_argument('--kinship', metavar='kinship', default=None, help='A kinship matrix represented in a text file. Note that this matrix must correspond exactly to the phenotypes file, unlike the bfilesim file option.')
parser.add_argument('--extractSim', metavar='extractSim', default=None, help='SNPs subset to use')
parser.add_argument('--out', metavar='out', default=None, help='output file')
parser.add_argument('--pheno', metavar='pheno', default=None, help='Phenotypes file (optional), only used for identifying unphenotyped individuals')
parser.add_argument('--missingPhenotype', metavar='missingPhenotype', default='-9', help='identifier for missing values (default: -9)')
parser.add_argument('--ignore_neig', metavar='ignore_neig', type=int, default=0, help='if set to 1, negative eigenvalues will be set to 0 and consequently ignored.')
args = parser.parse_args()
if (args.bfilesim is None and args.kinship is None): raise Exception('bfilesim or kinship must be supplied')
if (args.bfilesim is not None and args.kinship is not None): raise Exception('bfilesim and kinship cannot both be supplied')
if (args.out is None): raise Exception('output file name must be supplied')
#Read input files
if (args.bfilesim is not None): bed, _ = leapUtils.loadData(args.bfilesim, args.extractSim, args.pheno, args.missingPhenotype, loadSNPs=True)
else: bed=None
leapMain.eigenDecompose(bed, kinshipFile=args.kinship, outFile=args.out, ignore_neig=args.ignore_neig>0)
| [
"numpy.set_printoptions",
"argparse.ArgumentParser",
"time.time",
"numpy.savez_compressed",
"numpy.loadtxt",
"scipy.linalg.blas.dsyrk"
] | [((153, 200), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'linewidth': '(200)'}), '(precision=3, linewidth=200)\n', (172, 200), True, 'import numpy as np\n'), ((899, 924), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (922, 924), False, 'import argparse\n'), ((372, 383), 'time.time', 'time.time', ([], {}), '()\n', (381, 383), False, 'import time\n'), ((575, 598), 'numpy.loadtxt', 'np.loadtxt', (['kinshipFile'], {}), '(kinshipFile)\n', (585, 598), True, 'import numpy as np\n'), ((706, 761), 'numpy.savez_compressed', 'np.savez_compressed', (['outFile'], {'arr_0': 'U', 'arr_1': 'S', 'XXT': 'XXT'}), '(outFile, arr_0=U, arr_1=S, XXT=XXT)\n', (725, 761), True, 'import numpy as np\n'), ((453, 486), 'scipy.linalg.blas.dsyrk', 'blas.dsyrk', (['(1.0)', 'bed.val'], {'lower': '(1)'}), '(1.0, bed.val, lower=1)\n', (463, 486), True, 'import scipy.linalg.blas as blas\n'), ((532, 543), 'time.time', 'time.time', ([], {}), '()\n', (541, 543), False, 'import time\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Optional
import numpy as np
from ax.models.random.base import RandomModel
from scipy.stats import uniform
class UniformGenerator(RandomModel):
"""This class specifies a uniform random generation algorithm.
As a uniform generator does not make use of a model, it does not implement
the fit or predict methods.
Attributes:
seed: An optional seed value for the underlying PRNG.
"""
def __init__(self, deduplicate: bool = False, seed: Optional[int] = None) -> None:
super().__init__(deduplicate=deduplicate, seed=seed)
self._rs = np.random.RandomState(seed=seed)
def _gen_samples(self, n: int, tunable_d: int) -> np.ndarray:
"""Generate samples from the scipy uniform distribution.
Args:
n: Number of samples to generate.
tunable_d: Dimension of samples to generate.
Returns:
samples: An (n x d) array of random points.
"""
return uniform.rvs(size=(n, tunable_d), random_state=self._rs) # pyre-ignore
| [
"scipy.stats.uniform.rvs",
"numpy.random.RandomState"
] | [((696, 728), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (717, 728), True, 'import numpy as np\n'), ((1081, 1136), 'scipy.stats.uniform.rvs', 'uniform.rvs', ([], {'size': '(n, tunable_d)', 'random_state': 'self._rs'}), '(size=(n, tunable_d), random_state=self._rs)\n', (1092, 1136), False, 'from scipy.stats import uniform\n')] |
from collections import defaultdict
import numpy as np
import random
from amplification.tasks.core import idk, Task, sequences
#yields edges of a random tree on [a, b)
#if b = a+1, yields nothing
#if point to is not none, all edges (x, y) have y closer to point_to
def random_tree(a, b, point_to=None):
if a + 1 < b:
split = np.random.randint(a+1, b)
l = np.random.randint(a, split)
r = np.random.randint(split, b)
if point_to is None:
yield (l, r)
yield from random_tree(a, split)
yield from random_tree(split, b)
else:
point_left = point_to < split
yield (r, l) if point_left else (l, r)
yield from random_tree(a, split, point_to if point_left else l)
yield from random_tree(split, b, r if point_left else point_to)
elif a >= b:
raise ValueError()
def dfs(neighbors_dict, start_node):
depths = {start_node: 0}
parents = {start_node: start_node}
stack = [start_node]
while stack:
node = stack.pop()
children = neighbors_dict[node]
for child in children:
if child not in depths:
stack.append(child)
depths[child] = depths[node] + 1
parents[child] = node
return parents, depths
class EqualsTask(Task):
value_query = 1
simple_value_query = 2
neighbor_query = 3
parent_query = 4
depth_query = 5
fixed_vocab = 6
interaction_length = 9
simple_question_tokens = [simple_value_query, neighbor_query]
def repr_symbol(self, x):
if x in self.chars: return "abcdefghijklmnopqrstuv"[x]
if x in self.vals: return str(x)
if x in self.depths: return str(x - self.zero)
return {self.value_query: "V",
self.neighbor_query: "N",
self.simple_value_query: "S",
self.parent_query: "P",
self.depth_query: "D"}.get(x, "?")
def __init__(self, nchars=8, length=2, num_vals=None, easy=False):
self.nchars = nchars
self.length = length
self.num_vars = nchars ** length
self.num_vals = num_vals or int(np.sqrt(self.num_vars))
self.nvocab = self.fixed_vocab
self.chars = self.allocate(self.nchars)
self.min_char = self.chars[0]
self.max_char = self.chars[-1]
self.vars = list(sequences(self.chars, self.length))
self.vals = self.allocate(self.num_vals)
self.max_d = self.num_vars - self.num_vals
self.depths = self.allocate(self.max_d + 1)
self.zero = self.depths[0]
self.largest_d = self.depths[-1]
self.easy = easy
self.fact_length = 2 * length
self.answer_length = length
self.question_length = 2 * length
def encode_n(self, n):
return np.minimum(self.zero + n, self.largest_d)
def are_simple(self, Qs):
return np.logical_or(
np.isin(Qs[:,0], self.simple_question_tokens),
np.logical_and(
np.isin(Qs[:,0], self.vars),
np.isin(Qs[:,1], self.vars)
)
)
def make_simple_value_query(self, x):
return self.pad((self.simple_value_query,) + tuple(x), self.question_length)
def make_parent_query(self, x):
return self.pad((self.parent_query,) + tuple(x), self.question_length)
def make_value_query(self, x):
return self.pad((self.value_query,) + tuple(x), self.question_length)
def make_neighbor_query(self, x):
return self.pad((self.neighbor_query,) + tuple(x), self.question_length)
def make_depth_query(self, x):
return self.pad((self.depth_query,) + tuple(x), self.question_length)
def make_edge_query(self, x, y):
return self.pad(tuple(x) + tuple(y), self.question_length)
def are_chars(self, x):
return np.logical_and(np.all(x >= self.min_char, axis=-1), np.all(x <= self.max_char, axis=-1))
def recursive_answer(self, Q):
Q = tuple(Q)
x = Q[1:1+self.length]
if Q[0] == self.value_query:
if not self.are_chars(x):
yield self.pad(idk), None
return
simple_value = (yield None, self.make_simple_value_query(x))[0]
if simple_value in self.vals:
yield self.pad(simple_value), None
return
y = (yield None, self.make_parent_query(x))[:self.length]
if not self.are_chars(y):
yield self.pad(idk), None
return
val = (yield None, self.make_value_query(y))[0]
if val not in self.vals:
yield self.pad(idk), None
return
yield self.pad(val), None
elif Q[0] == self.depth_query:
if not self.are_chars(x):
yield self.pad(idk), None
return
simple_val = (yield None, self.make_simple_value_query(x))[0]
if simple_val in self.vals:
yield self.pad(self.zero), None
return
y = (yield None, self.make_parent_query(x))[:self.length]
if not self.are_chars(y):
yield self.pad(idk), None
return
d = (yield None, self.make_depth_query(y))[0]
if d not in self.depths:
yield self.pad(idk), None
return
yield self.pad(self.encode_n(d - self.zero + 1)), None
elif Q[0] == self.parent_query:
if not self.are_chars(x):
yield idk, None
return
simple_val = (yield None, self.make_simple_value_query(x))[0]
if simple_val in self.vals:
yield x, None
return
def test_var(y): return self.zero == (yield None, self.make_edge_query(x, y))[0]
def get_neighbor(): return (yield None, self.make_neighbor_query(x))[:self.length]
def get_parent():
y = (yield None, self.make_parent_query(x))[:self.length]
if self.are_chars(y) and (yield from test_var(y)):
return y
return None
candidates = []
def addc(y):
if y is not None and self.are_chars(y): candidates.append(y)
addc((yield from get_parent()))
addc((yield from get_parent()))
addc((yield from get_neighbor()))
best = idk
best_d = self.largest_d
for y in candidates:
d = (yield None, self.make_depth_query(y))[0]
if d in self.depths and d<= best_d:
best_d = d
best = y
yield self.pad(best), None
else:
yield self.pad(idk), None
def make_dbs(self, difficulty=float('inf')):
difficulty = min(self.num_vars, difficulty)
num_used_vars = min(difficulty + 10, self.num_vars)
num_used_vals = min(int(np.sqrt(difficulty+10)), self.num_vals)
used_vars = random.sample(self.vars, num_used_vars)
used_vals = np.random.choice(self.vals, num_used_vals, replace=False)
value_list = np.random.choice(used_vals, num_used_vars, replace=True)
state = {}
given_values = {}
given_equivalences = []
neighbors = defaultdict(list)
equivalence_classes = defaultdict(list)
for var, val in zip(used_vars, value_list):
equivalence_classes[val].append(var)
state[var] = val
for val, vs in equivalence_classes.items():
root = random.choice(vs)
given_values[root] = val
tree = random_tree(0, len(vs), point_to=root if self.easy else None)
given_equivalences.extend([(vs[i], vs[j]) for i, j in tree])
for x, y in given_equivalences:
neighbors[x].append(y)
neighbors[y].append(x)
facts = np.array(
[self.pad(tuple(var) + (val,), self.fact_length) for var, val in given_values.items()] +
[self.pad(tuple(var1) + tuple(var2), self.fact_length) for var1, var2 in given_equivalences])
parents = {}
depths = {}
for y in given_values:
new_parents, new_depths = dfs(neighbors, y)
parents.update(new_parents)
depths.update(new_depths)
fast_db = {"givens": given_values, "neighbors": neighbors, "values": state, "inverse": equivalence_classes,
'depths': depths, 'parents':parents,
"used_vars":used_vars}
return facts, fast_db
def classify_question(self, Q, fast_db):
Q = tuple(Q)
t = self.repr_symbol(Q[0])
if Q[0] in self.simple_question_tokens:
return t
return "{}{}".format(t, fast_db['depths'][Q[1:1+self.length]])
def make_q(self, fast_db):
q = random.choice([self.value_query, self.parent_query, self.depth_query])
v = random.choice(fast_db["used_vars"])
return self.pad((q,) + v, self.question_length)
def answer(self, Q, fast_db):
Q = tuple(Q)
x = Q[1:1+self.length]
if Q[0] == self.simple_value_query:
return self.pad(fast_db["givens"].get(x, idk))
elif Q[0] == self.neighbor_query:
neighbors = fast_db["neighbors"].get(x, [])
if neighbors:
return self.pad(random.choice(neighbors))
else:
return self.pad(idk)
elif Q[0] == self.depth_query:
if x in fast_db["depths"]:
return self.pad(self.encode_n(fast_db["depths"][x]))
else:
return self.pad(idk)
elif Q[0] == self.parent_query:
return self.pad(fast_db["parents"].get(x, idk))
elif Q[0] == self.value_query:
return self.pad(fast_db["values"].get(x, idk))
else:
if Q[:self.length] in fast_db["neighbors"]:
if Q[self.length:2*self.length] in fast_db["neighbors"][Q[:self.length]]:
return self.pad(self.zero)
return self.pad(idk)
def all_questions(self, fast_db):
for var in self.vars:
yield [self.value_query, var]
| [
"numpy.isin",
"numpy.minimum",
"random.sample",
"random.choice",
"collections.defaultdict",
"numpy.random.randint",
"numpy.random.choice",
"amplification.tasks.core.sequences",
"numpy.all",
"numpy.sqrt"
] | [((339, 366), 'numpy.random.randint', 'np.random.randint', (['(a + 1)', 'b'], {}), '(a + 1, b)\n', (356, 366), True, 'import numpy as np\n'), ((377, 404), 'numpy.random.randint', 'np.random.randint', (['a', 'split'], {}), '(a, split)\n', (394, 404), True, 'import numpy as np\n'), ((417, 444), 'numpy.random.randint', 'np.random.randint', (['split', 'b'], {}), '(split, b)\n', (434, 444), True, 'import numpy as np\n'), ((2860, 2901), 'numpy.minimum', 'np.minimum', (['(self.zero + n)', 'self.largest_d'], {}), '(self.zero + n, self.largest_d)\n', (2870, 2901), True, 'import numpy as np\n'), ((7104, 7143), 'random.sample', 'random.sample', (['self.vars', 'num_used_vars'], {}), '(self.vars, num_used_vars)\n', (7117, 7143), False, 'import random\n'), ((7164, 7221), 'numpy.random.choice', 'np.random.choice', (['self.vals', 'num_used_vals'], {'replace': '(False)'}), '(self.vals, num_used_vals, replace=False)\n', (7180, 7221), True, 'import numpy as np\n'), ((7243, 7299), 'numpy.random.choice', 'np.random.choice', (['used_vals', 'num_used_vars'], {'replace': '(True)'}), '(used_vals, num_used_vars, replace=True)\n', (7259, 7299), True, 'import numpy as np\n'), ((7397, 7414), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7408, 7414), False, 'from collections import defaultdict\n'), ((7445, 7462), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7456, 7462), False, 'from collections import defaultdict\n'), ((8955, 9025), 'random.choice', 'random.choice', (['[self.value_query, self.parent_query, self.depth_query]'], {}), '([self.value_query, self.parent_query, self.depth_query])\n', (8968, 9025), False, 'import random\n'), ((9038, 9073), 'random.choice', 'random.choice', (["fast_db['used_vars']"], {}), "(fast_db['used_vars'])\n", (9051, 9073), False, 'import random\n'), ((2412, 2446), 'amplification.tasks.core.sequences', 'sequences', (['self.chars', 'self.length'], {}), '(self.chars, self.length)\n', (2421, 2446), False, 'from amplification.tasks.core import idk, Task, sequences\n'), ((2975, 3021), 'numpy.isin', 'np.isin', (['Qs[:, 0]', 'self.simple_question_tokens'], {}), '(Qs[:, 0], self.simple_question_tokens)\n', (2982, 3021), True, 'import numpy as np\n'), ((3919, 3954), 'numpy.all', 'np.all', (['(x >= self.min_char)'], {'axis': '(-1)'}), '(x >= self.min_char, axis=-1)\n', (3925, 3954), True, 'import numpy as np\n'), ((3956, 3991), 'numpy.all', 'np.all', (['(x <= self.max_char)'], {'axis': '(-1)'}), '(x <= self.max_char, axis=-1)\n', (3962, 3991), True, 'import numpy as np\n'), ((7664, 7681), 'random.choice', 'random.choice', (['vs'], {}), '(vs)\n', (7677, 7681), False, 'import random\n'), ((2199, 2221), 'numpy.sqrt', 'np.sqrt', (['self.num_vars'], {}), '(self.num_vars)\n', (2206, 2221), True, 'import numpy as np\n'), ((3066, 3094), 'numpy.isin', 'np.isin', (['Qs[:, 0]', 'self.vars'], {}), '(Qs[:, 0], self.vars)\n', (3073, 3094), True, 'import numpy as np\n'), ((3111, 3139), 'numpy.isin', 'np.isin', (['Qs[:, 1]', 'self.vars'], {}), '(Qs[:, 1], self.vars)\n', (3118, 3139), True, 'import numpy as np\n'), ((7044, 7068), 'numpy.sqrt', 'np.sqrt', (['(difficulty + 10)'], {}), '(difficulty + 10)\n', (7051, 7068), True, 'import numpy as np\n'), ((9476, 9500), 'random.choice', 'random.choice', (['neighbors'], {}), '(neighbors)\n', (9489, 9500), False, 'import random\n')] |
import time
import numpy as np
from multiprocessing import Pool, cpu_count
from KosarajuSCC import Node, Graph
# Variation of Papadimitriou's 2SAT algorithm with less time complexity.
def papadimitriou(n_vars: int, clause_array: np.ndarray, variable_dict: dict) -> list or None:
# Choose random initial assignment
assignment = np.random.choice(a=[False, True], size=n_vars, replace=True)
assignment = np.append(assignment, np.logical_not(assignment))
checked_until = 0
clause_to_check = set()
for count in range(2 * n_vars ** 2):
# if count % 1000 == 0:
# print(f"Count = {count}")
satisfy = True # Indicate if all clauses are satisfied simultaneously or not
for count_2, clause in enumerate(clause_array):
# Only re-check checked clauses that can become false, i.e. those that involve the recently changed variable
if count_2 > checked_until or count_2 in clause_to_check:
# If clause not satisfied, choose one related variable randomly and flip its value
if not np.sum(assignment[clause]):
var_to_change = np.random.choice(a=clause) % n_vars
temp = assignment[var_to_change]
assignment[var_to_change] = not temp
assignment[var_to_change + n_vars] = temp
satisfy = False
# Boundary between checked & unchecked is pushed further, update boundary & re-init clause to check
if count_2 - 1 > checked_until:
checked_until = count_2 - 1
clause_to_check = variable_dict[var_to_change]
if checked_until % 1000 == 0:
print(f"Checked until = {checked_until}, count = {count}")
# Else keep boundary unchanged and add new clauses to checking list
else:
clause_to_check.update(variable_dict[var_to_change])
break
if satisfy:
# Sanity check
for idx, clause in enumerate(clause_array):
assert np.sum(assignment[clause]), \
f"Clause {idx} ({clause_array[idx]}) failed with {assignment[clause]}"
print("Assignment found, terminating all processes")
return assignment[:n_vars].tolist()
return None
def quit_processes():
# p.terminate() # kill all pool workers
pass
if __name__ == '__main__':
# Method 1: reduce 2SAT to the determination of SCCs.
datasets = range(1, 7, 1)
for dataset in datasets:
print(f"Dataset {dataset} begins:")
with open(f"AssignmentData/Data_2SAT_{str(dataset)}.txt", 'r') as f:
for i, line in enumerate(f):
if i == 0:
# Build graph
nb_vars = int(line.strip())
nb_nodes = 2*nb_vars
graph = Graph(nb_nodes)
for j in range(nb_nodes):
graph.add_node(Node(j))
else:
# Add edges
# Variable x1, x2, ..., xn correspond respectively to node 0, 1, ..., n - 1
# Variable not x1, not x2, ..., not xn correspond respectively to node n, n + 1, ..., 2n - 1
node1, node2 = [int(node) for node in line.strip().split(' ')]
# Edge 1: not node 1 -> node 2, edge 2: not node 2 -> node 1
edges = [[-node1, node2], [-node2, node1]]
for a in range(2):
for b in range(2):
if edges[a][b] < 0:
edges[a][b] = abs(edges[a][b]) + nb_vars - 1
else:
edges[a][b] -= 1
for edge in edges:
graph.vertices[edge[0]].neighbors.append(graph.vertices[edge[1]])
graph.vertices[edge[1]].neighbors_reversed.append(graph.vertices[edge[0]])
start = time.time()
leaders = graph.kosaraju(recursion_dfs=False)
satisfiable = True
for var in range(nb_vars):
if leaders[var] == leaders[var + nb_vars]:
satisfiable = False
break
print(f"Satisfiable = {satisfiable}, time consumed = {round(time.time() - start, 2)}s")
# Method 2: Papadimitriou's 2-SAT algorithm
with open(f"AssignmentData/Data_2SAT_6.txt", 'r') as f:
var_dict = dict() # Only to suppress automatic code inspection errors
for i, line in enumerate(f):
if i == 0:
nb_vars = int(line.strip())
clauses = np.empty((nb_vars, 2), dtype=np.dtype('i4'))
# var_dict: variable (not their negation) index: [indices of clauses that involving this variable]
var_dict = dict(zip(range(nb_vars), [[] for _ in range(nb_vars)]))
else:
# Variable x1, x2, ..., xn correspond respectively to node 0, 1, ..., n - 1
# Variable not x1, not x2, ..., not xn correspond respectively to node n, n + 1, ..., 2n - 1
vars_involved = [abs(int(var)) - 1 for var in line.strip().split(' ')]
var_dict[vars_involved[0]].append(i - 1)
var_dict[vars_involved[1]].append(i - 1)
clauses[i - 1] = [abs(int(var)) + nb_vars - 1 if var[0] == '-' else int(var) - 1
for var in line.strip().split(' ')]
# Transform dict values from list to set, to avoid duplicate later
for key in var_dict.keys():
var_dict[key] = set(var_dict[key])
start = time.time()
# nb_workers = cpu_count()
# with Pool(nb_workers) as p:
# for i in range(nb_workers):
# result = p.apply_async(papadimitriou, args=(nb_vars, clauses, var_dict), callback=quit_processes)
# print(f"Workers: {nb_workers}, result = {result.get()}, time consumed = {round(time.time() - start, 2)}s")
result = papadimitriou(nb_vars, clauses, var_dict)
print(f"Result = {result}, time consumed = {round(time.time() - start, 2)}s")
| [
"numpy.sum",
"numpy.logical_not",
"numpy.dtype",
"KosarajuSCC.Graph",
"time.time",
"KosarajuSCC.Node",
"numpy.random.choice"
] | [((338, 398), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[False, True]', 'size': 'n_vars', 'replace': '(True)'}), '(a=[False, True], size=n_vars, replace=True)\n', (354, 398), True, 'import numpy as np\n'), ((5777, 5788), 'time.time', 'time.time', ([], {}), '()\n', (5786, 5788), False, 'import time\n'), ((438, 464), 'numpy.logical_not', 'np.logical_not', (['assignment'], {}), '(assignment)\n', (452, 464), True, 'import numpy as np\n'), ((4120, 4131), 'time.time', 'time.time', ([], {}), '()\n', (4129, 4131), False, 'import time\n'), ((2171, 2197), 'numpy.sum', 'np.sum', (['assignment[clause]'], {}), '(assignment[clause])\n', (2177, 2197), True, 'import numpy as np\n'), ((1084, 1110), 'numpy.sum', 'np.sum', (['assignment[clause]'], {}), '(assignment[clause])\n', (1090, 1110), True, 'import numpy as np\n'), ((2985, 3000), 'KosarajuSCC.Graph', 'Graph', (['nb_nodes'], {}), '(nb_nodes)\n', (2990, 3000), False, 'from KosarajuSCC import Node, Graph\n'), ((1148, 1174), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'clause'}), '(a=clause)\n', (1164, 1174), True, 'import numpy as np\n'), ((4804, 4818), 'numpy.dtype', 'np.dtype', (['"""i4"""'], {}), "('i4')\n", (4812, 4818), True, 'import numpy as np\n'), ((6230, 6241), 'time.time', 'time.time', ([], {}), '()\n', (6239, 6241), False, 'import time\n'), ((3086, 3093), 'KosarajuSCC.Node', 'Node', (['j'], {}), '(j)\n', (3090, 3093), False, 'from KosarajuSCC import Node, Graph\n'), ((4429, 4440), 'time.time', 'time.time', ([], {}), '()\n', (4438, 4440), False, 'import time\n')] |
import math
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import keras_cv
from keras_cv.metrics import coco
def produce_random_data(include_confidence=False, num_images=128, num_classes=20):
"""Generates a fake list of bounding boxes for use in this test.
Returns:
a tensor list of size [128, 25, 5/6]. This represents 128 images, 25 bboxes
and 5/6 dimensions to represent each bbox depending on if confidence is
set.
"""
images = []
for _ in range(num_images):
num_boxes = math.floor(25 * random.uniform(0, 1))
classes = np.floor(np.random.rand(num_boxes, 1) * num_classes)
bboxes = np.random.rand(num_boxes, 4)
boxes = np.concatenate([bboxes, classes], axis=-1)
if include_confidence:
confidence = np.random.rand(num_boxes, 1)
boxes = np.concatenate([boxes, confidence], axis=-1)
images.append(
keras_cv.utils.bounding_box.xywh_to_corners(
tf.constant(boxes, dtype=tf.float32)
)
)
images = [
keras_cv.bounding_box.pad_batch_to_shape(x, [25, images[0].shape[1]])
for x in images
]
return tf.stack(images, axis=0)
y_true = produce_random_data()
y_pred = produce_random_data(include_confidence=True)
class_ids = list(range(20))
n_images = [128, 256, 512, 512 + 256, 1024]
update_state_runtimes = []
result_runtimes = []
end_to_end_runtimes = []
for images in n_images:
y_true = produce_random_data(num_images=images)
y_pred = produce_random_data(num_images=images, include_confidence=True)
metric = coco.COCOMeanAveragePrecision(class_ids)
# warm up
metric.update_state(y_true, y_pred)
metric.result()
start = time.time()
metric.update_state(y_true, y_pred)
update_state_done = time.time()
r = metric.result()
end = time.time()
update_state_runtimes.append(update_state_done - start)
result_runtimes.append(end - update_state_done)
end_to_end_runtimes.append(end - start)
print("end_to_end_runtimes", end_to_end_runtimes)
data = pd.DataFrame(
{
"n_images": n_images,
"update_state_runtimes": update_state_runtimes,
"result_runtimes": result_runtimes,
"end_to_end_runtimes": end_to_end_runtimes,
}
)
sns.lineplot(data=data, x="n_images", y="update_state_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("update_state() runtime (seconds)")
plt.title("Runtime of update_state()")
plt.show()
sns.lineplot(data=data, x="n_images", y="result_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("result() runtime (seconds)")
plt.title("Runtime of result()")
plt.show()
sns.lineplot(data=data, x="n_images", y="end_to_end_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("End to end runtime (seconds)")
plt.title("Runtimes of update_state() followed by result()")
plt.show()
| [
"pandas.DataFrame",
"seaborn.lineplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"keras_cv.bounding_box.pad_batch_to_shape",
"random.uniform",
"keras_cv.metrics.coco.COCOMeanAveragePrecision",
"tensorflow.constant",
"time.time",
"tensorflow.stack",
"numpy.random.rand",
"matplotlib.... | [((2191, 2363), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_images': n_images, 'update_state_runtimes': update_state_runtimes,\n 'result_runtimes': result_runtimes, 'end_to_end_runtimes':\n end_to_end_runtimes}"], {}), "({'n_images': n_images, 'update_state_runtimes':\n update_state_runtimes, 'result_runtimes': result_runtimes,\n 'end_to_end_runtimes': end_to_end_runtimes})\n", (2203, 2363), True, 'import pandas as pd\n'), ((2402, 2466), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'data', 'x': '"""n_images"""', 'y': '"""update_state_runtimes"""'}), "(data=data, x='n_images', y='update_state_runtimes')\n", (2414, 2466), True, 'import seaborn as sns\n'), ((2467, 2497), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Images"""'], {}), "('Number of Images')\n", (2477, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2498, 2544), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""update_state() runtime (seconds)"""'], {}), "('update_state() runtime (seconds)')\n", (2508, 2544), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2583), 'matplotlib.pyplot.title', 'plt.title', (['"""Runtime of update_state()"""'], {}), "('Runtime of update_state()')\n", (2554, 2583), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2592, 2594), True, 'import matplotlib.pyplot as plt\n'), ((2596, 2654), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'data', 'x': '"""n_images"""', 'y': '"""result_runtimes"""'}), "(data=data, x='n_images', y='result_runtimes')\n", (2608, 2654), True, 'import seaborn as sns\n'), ((2655, 2685), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Images"""'], {}), "('Number of Images')\n", (2665, 2685), True, 'import matplotlib.pyplot as plt\n'), ((2686, 2726), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""result() runtime (seconds)"""'], {}), "('result() runtime (seconds)')\n", (2696, 2726), True, 'import matplotlib.pyplot as plt\n'), ((2727, 2759), 'matplotlib.pyplot.title', 'plt.title', (['"""Runtime of result()"""'], {}), "('Runtime of result()')\n", (2736, 2759), True, 'import matplotlib.pyplot as plt\n'), ((2760, 2770), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2768, 2770), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2834), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'data', 'x': '"""n_images"""', 'y': '"""end_to_end_runtimes"""'}), "(data=data, x='n_images', y='end_to_end_runtimes')\n", (2784, 2834), True, 'import seaborn as sns\n'), ((2835, 2865), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Images"""'], {}), "('Number of Images')\n", (2845, 2865), True, 'import matplotlib.pyplot as plt\n'), ((2866, 2908), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""End to end runtime (seconds)"""'], {}), "('End to end runtime (seconds)')\n", (2876, 2908), True, 'import matplotlib.pyplot as plt\n'), ((2909, 2969), 'matplotlib.pyplot.title', 'plt.title', (['"""Runtimes of update_state() followed by result()"""'], {}), "('Runtimes of update_state() followed by result()')\n", (2918, 2969), True, 'import matplotlib.pyplot as plt\n'), ((2970, 2980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2978, 2980), True, 'import matplotlib.pyplot as plt\n'), ((1283, 1307), 'tensorflow.stack', 'tf.stack', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (1291, 1307), True, 'import tensorflow as tf\n'), ((1709, 1749), 'keras_cv.metrics.coco.COCOMeanAveragePrecision', 'coco.COCOMeanAveragePrecision', (['class_ids'], {}), '(class_ids)\n', (1738, 1749), False, 'from keras_cv.metrics import coco\n'), ((1837, 1848), 'time.time', 'time.time', ([], {}), '()\n', (1846, 1848), False, 'import time\n'), ((1913, 1924), 'time.time', 'time.time', ([], {}), '()\n', (1922, 1924), False, 'import time\n'), ((1959, 1970), 'time.time', 'time.time', ([], {}), '()\n', (1968, 1970), False, 'import time\n'), ((753, 781), 'numpy.random.rand', 'np.random.rand', (['num_boxes', '(4)'], {}), '(num_boxes, 4)\n', (767, 781), True, 'import numpy as np\n'), ((798, 840), 'numpy.concatenate', 'np.concatenate', (['[bboxes, classes]'], {'axis': '(-1)'}), '([bboxes, classes], axis=-1)\n', (812, 840), True, 'import numpy as np\n'), ((1172, 1241), 'keras_cv.bounding_box.pad_batch_to_shape', 'keras_cv.bounding_box.pad_batch_to_shape', (['x', '[25, images[0].shape[1]]'], {}), '(x, [25, images[0].shape[1]])\n', (1212, 1241), False, 'import keras_cv\n'), ((897, 925), 'numpy.random.rand', 'np.random.rand', (['num_boxes', '(1)'], {}), '(num_boxes, 1)\n', (911, 925), True, 'import numpy as np\n'), ((946, 990), 'numpy.concatenate', 'np.concatenate', (['[boxes, confidence]'], {'axis': '(-1)'}), '([boxes, confidence], axis=-1)\n', (960, 990), True, 'import numpy as np\n'), ((643, 663), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (657, 663), False, 'import random\n'), ((692, 720), 'numpy.random.rand', 'np.random.rand', (['num_boxes', '(1)'], {}), '(num_boxes, 1)\n', (706, 720), True, 'import numpy as np\n'), ((1087, 1123), 'tensorflow.constant', 'tf.constant', (['boxes'], {'dtype': 'tf.float32'}), '(boxes, dtype=tf.float32)\n', (1098, 1123), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2018 by <NAME>
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: <NAME>
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import numpy as np
import time
class mcmc(_algorithm):
"""
This class holds the MarkovChainMonteCarlo (MCMC) algorithm, based on:
<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (1953)
Equation of state calculations by fast computing machines, J. Chem. Phys.
"""
def __init__(self, *args, **kwargs):
"""
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
* True: Simulation results will be saved
* False: Simulation results will not be saved
"""
kwargs['optimization_direction'] = 'maximize'
kwargs['algorithm_name'] = 'Markov Chain Monte Carlo (MCMC) sampler'
super(mcmc, self).__init__(*args, **kwargs)
def check_par_validity(self, par):
if len(par) == len(self.min_bound) and len(par) == len(self.max_bound):
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i]
if par[i] > self.max_bound[i]:
par[i] = self.max_bound[i]
else:
print('ERROR: Bounds have not the same lenghts as Parameterarray')
return par
def check_par_validity_reflect(self, par):
if len(par) == len(self.min_bound) and len(par) == len(self.max_bound):
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i] + (self.min_bound[i]- par[i])
elif par[i] > self.max_bound[i]:
par[i] = self.max_bound[i] - (par[i] - self.max_bound[i])
# Postprocessing if reflecting jumped out of bounds
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i]
if par[i] > self.max_bound[i]:
par[i] = self.max_bound[i]
else:
print('ERROR: Bounds have not the same lenghts as Parameterarray')
return par
def get_new_proposal_vector(self,old_par):
new_par = np.random.normal(loc=old_par, scale=self.stepsizes)
#new_par = self.check_par_validity(new_par)
new_par = self.check_par_validity_reflect(new_par)
return new_par
def update_mcmc_status(self,par,like,sim,cur_chain):
self.bestpar[cur_chain]=par
self.bestlike[cur_chain]=like
self.bestsim[cur_chain]=sim
def sample(self, repetitions,nChains=1):
self.set_repetiton(repetitions)
print('Starting the MCMC algotrithm with '+str(repetitions)+ ' repetitions...')
# Prepare storing MCMC chain as array of arrays.
self.nChains = int(nChains)
#Ensure initialisation of chains and database
self.burnIn = self.nChains
# define stepsize of MCMC.
self.stepsizes = self.parameter()['step'] # array of stepsizes
# Metropolis-Hastings iterations.
self.bestpar=np.array([[np.nan]*len(self.stepsizes)]*self.nChains)
self.bestlike=[[-np.inf]]*self.nChains
self.bestsim=[[np.nan]]*self.nChains
self.accepted=np.zeros(self.nChains)
self.nChainruns=[[0]]*self.nChains
self.min_bound, self.max_bound = self.parameter(
)['minbound'], self.parameter()['maxbound']
print('Initialize ', self.nChains, ' chain(s)...')
self.iter=0
param_generator = ((curChain,self.parameter()['random']) for curChain in range(int(self.nChains)))
for curChain,randompar,simulations in self.repeat(param_generator):
# A function that calculates the fitness of the run and the manages the database
like = self.postprocessing(self.iter, randompar, simulations, chains=curChain)
self.update_mcmc_status(randompar, like, simulations, curChain)
self.iter+=1
intervaltime = time.time()
print('Beginn of Random Walk')
# Walk through chains
while self.iter <= repetitions - self.burnIn:
param_generator = ((curChain,self.get_new_proposal_vector(self.bestpar[curChain])) for curChain in range(int(self.nChains)))
for cChain,randompar,simulations in self.repeat(param_generator):
# A function that calculates the fitness of the run and the manages the database
like = self.postprocessing(self.iter, randompar, simulations, chains=cChain)
logMetropHastRatio = np.abs(self.bestlike[cChain])/np.abs(like)
u = np.random.uniform(low=0.3, high=1)
if logMetropHastRatio>1.0 or logMetropHastRatio>u:
self.update_mcmc_status(randompar,like,simulations,cChain)
self.accepted[cChain] += 1 # monitor acceptance
self.iter+=1
# Progress bar
acttime = time.time()
#Refresh MCMC progressbar every two second
if acttime - intervaltime >= 2 and self.iter >=2:
text = '%i of %i (best like=%g)' % (
self.iter + self.burnIn, repetitions, self.status.objectivefunction_max)
text = "Acceptance rates [%] =" +str(np.around((self.accepted)/float(((self.iter-self.burnIn)/self.nChains)),decimals=4)*100).strip('array([])')
print(text)
intervaltime = time.time()
self.final_call()
| [
"numpy.random.uniform",
"numpy.abs",
"numpy.zeros",
"time.time",
"numpy.random.normal"
] | [((3632, 3683), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'old_par', 'scale': 'self.stepsizes'}), '(loc=old_par, scale=self.stepsizes)\n', (3648, 3683), True, 'import numpy as np\n'), ((4704, 4726), 'numpy.zeros', 'np.zeros', (['self.nChains'], {}), '(self.nChains)\n', (4712, 4726), True, 'import numpy as np\n'), ((5467, 5478), 'time.time', 'time.time', ([], {}), '()\n', (5476, 5478), False, 'import time\n'), ((6140, 6174), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.3)', 'high': '(1)'}), '(low=0.3, high=1)\n', (6157, 6174), True, 'import numpy as np\n'), ((6508, 6519), 'time.time', 'time.time', ([], {}), '()\n', (6517, 6519), False, 'import time\n'), ((7003, 7014), 'time.time', 'time.time', ([], {}), '()\n', (7012, 7014), False, 'import time\n'), ((6077, 6106), 'numpy.abs', 'np.abs', (['self.bestlike[cChain]'], {}), '(self.bestlike[cChain])\n', (6083, 6106), True, 'import numpy as np\n'), ((6107, 6119), 'numpy.abs', 'np.abs', (['like'], {}), '(like)\n', (6113, 6119), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from IPython.display import display
np.random.seed(100)
# setting up a 9 x 4 matrix
rows = 9
cols = 4
a = np.random.randn(rows,cols)
df = pd.DataFrame(a)
display(df)
print(df.mean())
print(df.std())
display(df**2)
df.columns = ['First', 'Second', 'Third', 'Fourth']
df.index = np.arange(9)
display(df)
print(df['Second'].mean() )
print(df.info())
print(df.describe())
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
df.cumsum().plot(lw=2.0, figsize=(10,6))
#plt.show()
df.plot.bar(figsize=(10,6), rot=15)
#plt.show()
b = np.arange(16).reshape((4,4))
print(b)
df1 = pd.DataFrame(b)
| [
"pandas.DataFrame",
"numpy.random.seed",
"numpy.random.randn",
"IPython.display.display",
"numpy.arange",
"pylab.plt.style.use"
] | [((75, 94), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (89, 94), True, 'import numpy as np\n'), ((145, 172), 'numpy.random.randn', 'np.random.randn', (['rows', 'cols'], {}), '(rows, cols)\n', (160, 172), True, 'import numpy as np\n'), ((177, 192), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {}), '(a)\n', (189, 192), True, 'import pandas as pd\n'), ((193, 204), 'IPython.display.display', 'display', (['df'], {}), '(df)\n', (200, 204), False, 'from IPython.display import display\n'), ((238, 254), 'IPython.display.display', 'display', (['(df ** 2)'], {}), '(df ** 2)\n', (245, 254), False, 'from IPython.display import display\n'), ((318, 330), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (327, 330), True, 'import numpy as np\n'), ((332, 343), 'IPython.display.display', 'display', (['df'], {}), '(df)\n', (339, 343), False, 'from IPython.display import display\n'), ((438, 462), 'pylab.plt.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (451, 462), False, 'from pylab import plt, mpl\n'), ((656, 671), 'pandas.DataFrame', 'pd.DataFrame', (['b'], {}), '(b)\n', (668, 671), True, 'import pandas as pd\n'), ((611, 624), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (620, 624), True, 'import numpy as np\n')] |
import ROOT as R
from gna.bindings import patchROOTClass
import numpy as N
@patchROOTClass(R.DataType.Hist('DataType'), '__str__')
def DataType__Hist____str__(self):
dt=self.cast()
if len(dt.shape)==1:
edges = N.asanyarray(dt.edges)
if edges.size<2:
return 'hist, {:3d} bins, edges undefined'.format(dt.shape[0])
width = edges[1:]-edges[:-1]
if N.allclose(width, width[0]):
suffix='width {}'.format(width[0])
else:
suffix='variable width'
return 'hist, {:3d} bins, edges {}->{}, {}'.format(dt.shape[0], edges[0], edges[-1], suffix)
elif len(dt.shape)==2:
edges1 = N.asanyarray(dt.edgesNd[0])
edges2 = N.asanyarray(dt.edgesNd[1])
if edges1.size<2 or edges2.size<2:
return 'hist, {:3d} bins, edges undefined'.format(dt.shape[0])
# width1 = edges1[1:]-edges1[:-1]
# width2 = edges2[1:]-edges2[:-1]
# if N.allclose(width, width[0]):
# suffix='width {}'.format(width[0])
# else:
# suffix='variable width'
return 'hist2d, {:d}x{:d}={:d} bins, edges {}->{} and {}->{}'.format(
dt.shape[0], dt.shape[1], dt.size(), edges1[0], edges1[-1], edges2[0], edges2[-1])
return 'histogram, undefined'
@patchROOTClass(R.DataType.Points('DataType'), '__str__')
def DataType__Points____str__(self):
dt=self.cast()
if len(dt.shape):
return 'array {}d, shape {}, size {:3d}'.format(len(dt.shape), 'x'.join((str(i) for i in dt.shape)), dt.size())
return 'array, undefined'
@patchROOTClass
def DataType____str__(self):
if self.defined():
if self.kind==1:
return str(self.points())
elif self.kind==2:
return str(self.hist())
return 'datatype, unsupported'
return 'datatype, undefined'
@patchROOTClass
def DataType__isHist(self):
return self.defined() and self.kind==2
@patchROOTClass
def DataType__isPoints(self):
return self.defined() and self.kind==1
@patchROOTClass
def DataType____eq__(self, other):
if self.kind!=other.kind:
return False
if list(self.shape)!=list(other.shape):
return False
if self.kind!=2:
return True
for (e1, e2) in zip(self.edgesNd, other.edgesNd):
edges1 = N.asanyarray(e1)
edges2 = N.asanyarray(e2)
if not N.allclose(edges1, edges2, rtol=0, atol=1.e-14):
return False
return True
@patchROOTClass
def DataType____ne__(self, other):
return not DataType____eq__(self, other)
| [
"ROOT.DataType.Hist",
"numpy.asanyarray",
"ROOT.DataType.Points",
"numpy.allclose"
] | [((93, 120), 'ROOT.DataType.Hist', 'R.DataType.Hist', (['"""DataType"""'], {}), "('DataType')\n", (108, 120), True, 'import ROOT as R\n'), ((1323, 1352), 'ROOT.DataType.Points', 'R.DataType.Points', (['"""DataType"""'], {}), "('DataType')\n", (1340, 1352), True, 'import ROOT as R\n'), ((228, 250), 'numpy.asanyarray', 'N.asanyarray', (['dt.edges'], {}), '(dt.edges)\n', (240, 250), True, 'import numpy as N\n'), ((400, 427), 'numpy.allclose', 'N.allclose', (['width', 'width[0]'], {}), '(width, width[0])\n', (410, 427), True, 'import numpy as N\n'), ((2324, 2340), 'numpy.asanyarray', 'N.asanyarray', (['e1'], {}), '(e1)\n', (2336, 2340), True, 'import numpy as N\n'), ((2358, 2374), 'numpy.asanyarray', 'N.asanyarray', (['e2'], {}), '(e2)\n', (2370, 2374), True, 'import numpy as N\n'), ((672, 699), 'numpy.asanyarray', 'N.asanyarray', (['dt.edgesNd[0]'], {}), '(dt.edgesNd[0])\n', (684, 699), True, 'import numpy as N\n'), ((717, 744), 'numpy.asanyarray', 'N.asanyarray', (['dt.edgesNd[1]'], {}), '(dt.edgesNd[1])\n', (729, 744), True, 'import numpy as N\n'), ((2391, 2437), 'numpy.allclose', 'N.allclose', (['edges1', 'edges2'], {'rtol': '(0)', 'atol': '(1e-14)'}), '(edges1, edges2, rtol=0, atol=1e-14)\n', (2401, 2437), True, 'import numpy as N\n')] |
from abc import ABC, abstractmethod
import numpy as np
from gym.envs.mujoco import MujocoEnv
class MujocoWrapper(ABC, MujocoEnv):
@abstractmethod
def qposvel_from_obs(self, obs):
pass
def set_state_from_obs(self, obs):
qpos, qvel = self.qposvel_from_obs(obs)
self.set_state(qpos, qvel)
def oracle_step(self, state, action):
try:
self.set_state_from_obs(state)
next_state, reward, done, info = self.step(action)
except:
next_state = np.full_like(state, np.nan)
reward = np.nan
done = True
info = {'Unstable dynamics': True}
return next_state, reward, done, info
def oracle_dynamics(self, state, action):
next_state, reward, _, _ = self.oracle_step(state, action)
return next_state, reward | [
"numpy.full_like"
] | [((529, 556), 'numpy.full_like', 'np.full_like', (['state', 'np.nan'], {}), '(state, np.nan)\n', (541, 556), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
"""
General math module for crystal orientation related calculation.
Most of the conventions used in this module is based on:
D Rowenhorst et al.
Consistent representations of and conversions between 3D rotations
10.1088/0965-0393/23/8/083501
with the exceptions:
1. An orientation is always attached to a frame, and all calculations
between orientations can only be done when all of them are converted
to the same frame.
2. Always prefer SI units.
Conversion chain:
Rodrigues (angle,axis) <-> quaternion <-> Euler angles(ZXZ) <-> rotation matrix
"""
import numpy as np
import concurrent.futures as cf
from dataclasses import dataclass
from typing import Union
from hexomap.npmath import norm
from hexomap.npmath import normalize
from hexomap.npmath import random_three_vector
from hexomap.utility import methdispatch
from hexomap.utility import iszero
from hexomap.utility import isone
from hexomap.utility import standarize_euler
@dataclass
class Eulers:
"""
Euler angles representation of orientation.
phi1: [0, 2pi]
phi: [0, pi]
phi2: [0, 2pi]
Euler angle definitions:
'Bunge' : z -> x -> z // prefered
"""
phi1: float # [0, 2pi)
phi: float # [0, pi]
phi2: float # [0, 2pi)
in_radians: bool=True
order: str='zxz'
convention: str='Bunge'
def __post_init__(self):
# force euler to the standard range
_euler = np.array([self.phi1, self.phi, self.phi2])
self.phi1, self.phi, self.phi2 = standarize_euler(_euler,
self.in_radians,
)
self.in_radians = True
@property
def as_array(self) -> np.ndarray:
return np.array([self.phi1, self.phi, self.phi2])
@property
def as_matrix(self):
"""
Return the active rotation matrix
"""
# NOTE:
# It is not recommended to directly associated Euler angles with
# other common transformation concept due to its unique passive
# nature.
# However, I am providing the conversion to (orientation) matrix
# here for some backward compatbility.
c1, s1 = np.cos(self.phi1), np.sin(self.phi1)
c, s = np.cos(self.phi ), np.sin(self.phi )
c2, s2 = np.cos(self.phi2), np.sin(self.phi2)
return np.array([
[ c1*c2-s1*c*s2, -c1*s2-s1*c*c2, s1*s],
[ s1*c2+c1*c*s2, -s1*s2+c1*c*c2, -c1*s],
[ s*s2, s*c2, c],
])
@staticmethod
def from_matrix(m: np.ndarray):
"""
Description
-----------
Initialize an Euler angle with a given rotation matrix
Parameters
----------
m: np.ndarray
input rotation matrix
Returns
-------
Eulers
"""
if isone(m[2,2]**2):
return Eulers(
0.0,
0.0,
np.arctan2(m[1,0], m[0,0]),
)
else:
return Eulers(
np.arctan2(m[0,2], -m[1,2]),
np.arccos(m[2,2]),
np.arctan2(m[2,0], m[2,1]),
)
@staticmethod
def eulers_to_matrices(eulers: np.ndarray) -> np.ndarray:
"""
Description
-----------
Vectorized batch conversion from Eulers (Bunge) angles to rotation
matices
Parameters
----------
eulers: np.ndarray
euler angles with the shape of (n_eulers, 3)
Returns
-------
np.ndarray
rotation matrices representation of the input Euler angles with
the shape of (n_eulers, 3, 3)
NOTE
----
Testing with 10k eulers
original implementation
>>%timeit m_old = EulerZXZ2MatVectorized(eulers)
1.17 ms ± 10.1 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>%timeit m_new = Eulers.eulers_to_matrices(eulers)
1.2 ms ± 22.1 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
"""
# ensure shape is correct
try:
eulers = eulers.reshape((-1, 3))
except:
raise ValueError(f"Eulers angles much be ROW/horizontal stacked")
c1, s1 = np.cos(eulers[:,0]), np.sin(eulers[:,0])
c, s = np.cos(eulers[:,1]), np.sin(eulers[:,1])
c2, s2 = np.cos(eulers[:,2]), np.sin(eulers[:,2])
m = np.zeros((eulers.shape[0], 3, 3))
m[:,0,0], m[:,0,1], m[:,0,2] = c1*c2-s1*c*s2, -c1*s2-s1*c*c2, s1*s
m[:,1,0], m[:,1,1], m[:,1,2] = s1*c2+c1*c*s2, -s1*s2+c1*c*c2, -c1*s
m[:,2,0], m[:,2,1], m[:,2,2] = s*s2, s*c2, c
return m
@staticmethod
def matrices_to_eulers(matrices: np.ndarray) -> np.ndarray:
"""
Description
-----------
Vectorized batch conversion from stack of rotation matrices to
Euler angles (Bunge)
Parameter
---------
matrices: np.ndarray
stack of rotation matrices
Returns
-------
np.ndarray
stakc of Euler angles (Bunge)
Note
----
Testing with 10k rotation matrices
original implementation
>>%timeit eulers_o = Mat2EulerZXZVectorized(Rs)
2.01 ms ± 87.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
current implementation
>>%timeit eulers_n = Eulers.matrices_to_eulers(Rs)
1.45 ms ± 8.63 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
"""
try:
matrices = matrices.reshape((-1,3,3))
except:
raise ValueError("Please stack rotation matrices along 1st-axis")
eulers = np.zeros((matrices.shape[0], 3))
# first work the degenerated cases
_idx = np.isclose(matrices[:,2,2]**2, 1)
eulers[_idx, 2] = np.arctan2(matrices[_idx,1,0], matrices[_idx,0,0])
# then the general cases
_idx = (1 - _idx).astype(bool)
eulers[_idx, 0] = np.arctan2(matrices[_idx,0,2], -matrices[_idx,1,2])
eulers[_idx, 1] = np.arccos(matrices[_idx,2,2]),
eulers[_idx, 2] = np.arctan2(matrices[_idx,2,0], matrices[_idx,2,1]),
return eulers%(2*np.pi)
@dataclass
class Rodrigues:
"""
Rodrigues–Frank vector: ([n_1, n_2, n_3], tan(ω/2))
"""
r1: float
r2: float
r3: float
@property
def as_array(self) -> np.ndarray:
"""As numpy array"""
return np.array([self.r1, self.r2, self.r3])
@property
def rot_axis(self) -> np.ndarray:
"""Rotation axis"""
return normalize(self.as_array)
@property
def rot_ang(self) -> float:
"""
Description
-----------
Rotation angle in radians
NOTE
----
Restrict the rotation angle to [0-pi], therefore the tan term is
always positive
"""
return np.arctan(norm(self.as_array))*2
@staticmethod
def rodrigues_from_quaternions(quats: np.ndarray) -> np.ndarray:
"""
Description
-----------
Vectorized batch conversion from unitary quaternions to
Rodrigues vectors
Parameters
----------
quats: np.ndarray
input quaternions stack along the first axis
Returns
-------
np.ndarray
output rodrigues vectors stack along the first axis
"""
try:
quats = quats.reshape(-1, 4)
except:
raise ValueError("Row stack input quaternions")
return quats[:,1:4]/quats[:,0][:,None]
@dataclass
class Quaternion:
"""
Unitary quaternion representation of rotation.
q = w + x i + y j + z k
reference:
http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/
Note:
No conversion methods to other representations is provided in this
class as the conversion requires the knowledge of reference frame,
whereas quaternion itself does not have a frame (an abstract concept).
"""
w: float # cos(theta/2)
x: float # sin(theta/2) * rotation_axis_x
y: float # sin(theta/2) * rotation_axis_y
z: float # sin(theta/2) * rotation_axis_z
normalized: bool=False
def __post_init__(self) -> None:
# standardize the quaternion
# 1. rotation angle range: [0, pi] -> self.w >= 0
# 2. |q| === 1
self.standardize()
def standardize(self) -> None:
_sgn = -1 if self.w < 0 else 1
_norm = norm([self.w, self.x, self.y, self.z]) * _sgn
self.w /= _norm
self.x /= _norm
self.y /= _norm
self.z /= _norm
self.normalized = True
@property
def as_array(self) -> np.ndarray:
return np.array([self.w, self.x, self.y, self.z])
@property
def as_rodrigues(self) -> 'Rodrigues':
_r = self.imag if iszero(self.real) else self.imag/self.real
return Rodrigues(*_r)
@property
def as_eulers(self) -> 'Eulers':
"""
Quaternion to Euler angles
"""
qu = self.as_array
q03 = qu[0]**2+qu[3]**2
q12 = qu[1]**2+qu[2]**2
chi = np.sqrt(q03*q12)
if iszero(chi):
if iszero(q12):
eu = np.array([
np.arctan2(-1*2.0*qu[0]*qu[3],qu[0]**2-qu[3]**2),
0.0,
0.0,
])
else:
eu = np.array([
np.arctan2(2.0*qu[1]*qu[2],qu[1]**2-qu[2]**2),
np.pi,
0.0,
])
else:
eu = np.array([
np.arctan2((-1*qu[0]*qu[2]+qu[1]*qu[3])*chi, (-1*qu[0]*qu[1]-qu[2]*qu[3])*chi ),
np.arctan2( 2.0*chi, q03-q12 ),
np.arctan2(( 1*qu[0]*qu[2]+qu[1]*qu[3])*chi, (-1*qu[0]*qu[1]+qu[2]*qu[3])*chi ),
])
# reduce Euler angles to definition range, i.e a lower limit of 0.0
return Eulers(*eu)
@property
def as_matrix(self) -> np.ndarray:
"""Return the rotation matrix"""
return self.as_eulers.as_matrix
@property
def real(self):
return self.w
@property
def imag(self):
return np.array([self.x, self.y, self.z])
@property
def norm(self) -> float:
return np.linalg.norm(self.as_array)
@property
def rot_angle(self):
return abs(np.arccos(self.w)*2)
@property
def rot_axis(self):
return -1*normalize(self.imag) if np.arccos(self.w)<0 else normalize(self.imag)
@property
def conjugate(self) -> 'Quaternion':
return Quaternion(self.w, -self.x, -self.y, -self.z)
def __add__(self, other: 'Quaternion') -> 'Quaternion':
# NOTE:
# Adding quaternions has no physical meaning unless the results is
# averaged to apprixmate the intermedia statem, provided that the
# two rotations are infinitely small.
return Quaternion(*(self.as_array + other.as_array))
def __sub__(self, other: 'Quaternion') -> 'Quaternion':
return Quaternion(*(self.as_array - other.as_array))
def __neg__(self) -> 'Quaternion':
return Quaternion(*(-self.as_array))
@methdispatch
def __mul__(self, other: 'Quaternion') -> 'Quaternion':
"""
Similar to complex number multiplication
"""
real = self.real*other.real - np.dot(self.imag, other.imag)
imag = self.real*other.imag \
+ other.real*self.imag \
+ np.cross(self.imag, other.imag)
return Quaternion(real, *imag)
@__mul__.register(int)
@__mul__.register(float)
def _(self, other: Union[int, float]) -> None:
raise ValueError("Scale a unitary quaternion is meaningless!")
@staticmethod
def combine_two(q2: 'Quaternion', q1: 'Quaternion') -> 'Quaternion':
"""
Description
-----------
Return the quaternion that represents the compounded rotation, i.e.
q3 = Quaternion.combine_two(q2, q1)
where q3 is the single rotation that is equivalent to rotate by q1,
then by q2.
Parameters
----------
q1: Quaternion
first active rotation
q2: Quaternion
second active rotation
Returns
-------
Quaternion
Reduced (single-step) rotation
"""
# NOTE:
# Combine two operation into one is as simple as multiply them
return q2*q1
@staticmethod
def average_quaternions(qs: list) -> 'Quaternion':
"""
Description
-----------
Return the average quaternion based on algorithm published in
<NAME>.al.
Averaging Quaternions,
doi: 10.2514/1.28949
Parameters
----------
qs: list
list of quaternions for average
Returns
-------
Quaternion
average quaternion of the given list
Note:
This method only provides an approximation, with about 1% error.
> See the associated unit test for more detials.
"""
_sum = np.sum([np.outer(q.as_array, q.as_array) for q in qs], axis=0)
_eigval, _eigvec = np.linalg.eig(_sum/len(qs))
return Quaternion(*np.real(_eigvec.T[_eigval.argmax()]))
@staticmethod
def from_angle_axis(angle: float, axis: np.ndarray) -> 'Quaternion':
"""
Description
-----------
Return a unitary quaternion based on given angle and axis vector
Parameters
----------
angle: float
rotation angle in radians (not the half angle omega)
axis: np.ndarray
rotation axis
Retruns
------
Quaternion
"""
if iszero(angle):
return Quaternion(1, 0, 0, 0)
else:
axis = normalize(axis)
return Quaternion(np.cos(angle/2), *(np.sin(angle/2)*axis))
@staticmethod
def from_eulers(euler: 'Eulers') -> 'Quatrnion':
""" Return a quaternion based on given Euler Angles """
# allow euler as an numpy array
# NOTE:
# single dispatch based polymorphysm did not work for static method
# therefore using try-catch block for a temp solution
try:
ee = 0.5*euler
except:
ee = 0.5*euler.as_array
cPhi = np.cos(ee[1])
sPhi = np.sin(ee[1])
return Quaternion(
+cPhi*np.cos(ee[0]+ee[2]),
-sPhi*np.cos(ee[0]-ee[2]),
-sPhi*np.sin(ee[0]-ee[2]),
-cPhi*np.sin(ee[0]+ee[2]),
)
@staticmethod
def from_rodrigues(ro: 'Rodrigues') -> 'Quaternion':
"""Construct an equivalent quaternion from given Rodrigues"""
if not isinstance(ro, Rodrigues):
ro = Rodrigues(*ro)
return Quaternion.from_angle_axis(ro.rot_ang, ro.rot_axis)
@staticmethod
def from_matrix(m: np.ndarray) -> 'Quaternion':
"""Construct quaternion from rotation matrix"""
return Quaternion.from_eulers(Eulers.from_matrix(m))
@staticmethod
def from_random():
return Quaternion.from_angle_axis(np.random.random()*np.pi,
random_three_vector()
)
@staticmethod
def quatrotate(q: 'Quaternion', v: np.ndarray) -> np.ndarray:
"""
Description
-----------
Active rotate a given vector v by given unitary quaternion q
v' = q*v_asq*q^-1
Parameters
----------
q: Quaternion
quaternion representation of the active rotation
v: np.ndarray
vector
Returns
-------
np.ndarray
rotated vector
"""
return (q.real**2 - sum(q.imag**2))*v \
+ 2*np.dot(q.imag, v)*q.imag \
+ 2*q.real*np.cross(q.imag, v)
@dataclass(frozen=True)
class Frame:
"""
Reference frame represented as three base vectors and an origin.
NOTE:
Mathematically, the frame transformation should also contain scaling
and even skewing, as the process is only related to how the base is
defined.
In materials science, the transformation above is not very common,
therefore these menthods are not implemented by default.
However, the user should still be able to extend its functionality
either through inheritance, or simply taking advantage of the dynamic
typing.
"""
e1: np.ndarray = np.array([1, 0, 0])
e2: np.ndarray = np.array([0, 1, 0])
e3: np.ndarray = np.array([0, 0, 1])
o: np.ndarray = np.array([0, 0, 0])
name: str = "lab"
@property
def origin(self) -> np.ndarray:
return self.o
@property
def base(self) -> tuple:
return (self.e1, self.e2, self.e3)
@staticmethod
def transformation_matrix(f1: 'Frame', f2: 'Frame') -> np.ndarray:
"""
Description
-----------
Return the 3D transformation matrix (4x4) that can translate
covariance from frame f1 to frame f2.
ref:
http://www.continuummechanics.org/coordxforms.html
Parameters
----------
f1: Frame
original frame
f2: Frame
target/destination frame
Returns
-------
np.ndarray
a transformation matrix that convert the covariance in frame f1 to
covariance in frame f2.
"""
_m = np.zeros((4,4))
_m[0:3, 0:3] = np.array([[np.dot(new_e, old_e) for old_e in f1.base]
for new_e in f2.base
])
_m[3,0:3] = f1.o - f2.o
_m[3,3] = 1
return _m
# NOTE:
# The following three static method provide a more general way to perform
# rigid body manipulation of an object, including rotation and translation.
#
@staticmethod
def transform_point(p_old: np.ndarray,
f_old: "Frame", f_new: "Frame") -> np.ndarray:
"""
Description
-----------
Transform the covariance of the given point in the old frame to
the new frame
Parameters
----------
p_old: np.ndarray
covariance of the point in the old frame (f_old)
f_old: Frame
old frame
f_new: Frame
new frame
Returns
-------
np.ndarray
covariance of the point in the new frame (f_new)
"""
return np.dot(
Frame.transformation_matrix(f_old, f_new),
np.append(p_old, 1),
)[:3]
# TODO:
# The Eienstein summation should work better here, making all the
# calculation essetially the same for vector and n-rank tensor.
# Currently we are restricting everything in standard R^3.
@staticmethod
def transform_vector(v_old: np.ndarray,
f_old: "Frame", f_new: "Frame") -> np.ndarray:
"""
Description
-----------
Transform the covariance of the given vector in the old frame
f_old to the new frame f_new
Parameters
----------
v_old: np.ndarray
covariance of the vector in the old frame, f_old
f_old: Frame
old frame
f_new: Frame
new frame
Returns
-------
np.ndarray
covariance of the vector in the new frame, f_new
"""
return np.dot(
Frame.transformation_matrix(f_old, f_new)[0:3, 0:3],
v_old,
)
@staticmethod
def transform_tensor(t_old: np.ndarray,
f_old: "Frame", f_new: "Frame") -> np.ndarray:
"""
Description
-----------
Transform the covariance of the given tensor in the old frame
f_old to the new frame f_new
Parameters
----------
t_old: np.ndarray
covariance of the given tensor in the old frame f_old
f_old: Frame
old frame
f_new: Frame
new frame
Returns
-------
np.ndarray
covariance of the given tensor in the new frame f_new
"""
_m = Frame.transformation_matrix(f_old, f_new)[0:3, 0:3]
return np.dot(_m, np.dot(t_old, _m.T))
@dataclass
class Orientation:
"""
Orientation is used to described a given object relative attitude with
respect to the given reference frame, more specifically
the orientation of the crystal is described as a rotation of the
reference frame (sample frame is a common choice) to coincide with
the crystal’s reference frame
It is worth pointing out that the pose of a rigid body contains both
attitude and position, the description of which are both closely tied to
the reference frame.
"""
q: Quaternion
f: Frame
@property
def frame(self) -> 'Frame':
return self.f
@frame.setter
def frame(self, new_frame: Frame) -> None:
# frame update
_m = self.q.as_matrix
for i in range(3):
_m[:,i] = Frame.transform_vector(_m[:,i], self.frame, new_frame)
self.q = Quaternion.from_matrix(_m)
self.f = new_frame
@property
def as_quaternion(self) -> 'Quaternion':
return self.q
@property
def as_rodrigues(self) -> 'Rodrigues':
return self.q.as_rodrigues
@property
def as_eulers(self) -> 'Eulers':
return self.q.as_eulers
@property
def as_matrix(self) -> np.ndarray:
return self.q.as_matrix
def misorientation(self, other: 'Orientation', lattice: str) -> tuple:
"""
Description
-----------
Calculate the misorientation bewteen self and other assuming given
lattice (symmetry)
Parameters
----------
other: Orientation
the other orientation instance
lattice: str
symmetry name
Returns
-------
tuple
Return the (angle, axis) pair
"""
# Step_1: get the symmetry operators
sym_ops = sym_operator(lattice)
# Step_2: make sure both are in the same frame
if self.f.name != other.f.name:
other.frame = self.f
# Step_3: calculate misorientations among all possible pairs
# NOTE:
# 1. Quaternion multiplication q2*q1 means rotate by q1, then q2,
# which is why the symmetry operator is always on the right
# 2. To calculate disorientation other -> me, we need to do the
# conjudate of other to bring ? to reference frame, then from
# reference frame to me, hence other.conjugate * me
# 3. Symmetry operators are required for both, fortunately the
# quaternion based calculation is really cheap.
_drs = [
(other.q*symop_tu).conjugate * (self.q*symop_mi)
for symop_mi in sym_ops
for symop_tu in sym_ops
]
# Step_4: Locate the one pair with the smallest rotation angle
_dr = _drs[np.argmin([me.rot_angle for me in _drs])]
return (_dr.rot_angle, _dr.rot_axis)
def misorientations(self,
others: list,
lattice: str,
ncores: int=2,
) -> list:
"""
Batch version of single misorientation calculation using Python native
multi-threading library.
"""
tmp = []
with cf.ProcessPoolExecutor(ncores) as e:
for other in others:
tmp.append(e.submit(self.misorientation, other, lattice))
return [me.result() for me in tmp]
@staticmethod
def random_orientations(n: int, frame: Frame) -> list:
"""Return n random orientations represented in the given frame"""
# NOTE:
# Whether this provides a uniform sampling of an orientation space
# is not tested yet.
return [
Orientation(Quaternion.from_random(), frame) for _ in range(n)
]
def sym_operator(lattice: str) -> list:
"""
Description
-----------
Return a list of symmetry operator in quaternions based on given lattice
structure. These quaternion are meant to operator on vectors in the
crystal frame.
Parameters
----------
lattice: str
lattice name
Returns
-------
list
list of quaternions as symmetry operators
NOTE
----
This function only provides a list, which is not associated with frame.
Therefore, one need to keep in mind that these operator are meant for
vectors in crystal frame.
"""
if lattice is None:
return [Quaternion(1,0,0,0)]
elif lattice.lower() in ['orthorhombic', 'ortho']:
return [
Quaternion(*me) for me in [
[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ],
]
]
elif lattice.lower() in ['tetragonal', 'tet']:
sqrt2 = np.sqrt(2)
return [
Quaternion(*me) for me in [
[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ],
[ 0.0, 0.5*sqrt2, 0.5*sqrt2, 0.0 ],
[ 0.0, -0.5*sqrt2, 0.5*sqrt2, 0.0 ],
[ 0.5*sqrt2, 0.0, 0.0, 0.5*sqrt2 ],
[-0.5*sqrt2, 0.0, 0.0, 0.5*sqrt2 ],
]
]
elif lattice.lower() in ['hexagonal', 'hcp', 'hex']:
sqrt3 = np.sqrt(3)
return [
Quaternion(*me) for me in [
[ 1.0, 0.0, 0.0, 0.0 ],
[-0.5*sqrt3, 0.0, 0.0, -0.5 ],
[ 0.5, 0.0, 0.0, 0.5*sqrt3 ],
[ 0.0, 0.0, 0.0, 1.0 ],
[-0.5, 0.0, 0.0, 0.5*sqrt3 ],
[-0.5*sqrt3, 0.0, 0.0, 0.5 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, -0.5*sqrt3, 0.5, 0.0 ],
[ 0.0, 0.5, -0.5*sqrt3, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, -0.5, -0.5*sqrt3, 0.0 ],
[ 0.0, 0.5*sqrt3, 0.5, 0.0 ],
]
]
elif lattice.lower() in ['cubic', 'bcc', 'fcc']:
sqrt2 = np.sqrt(2)
return [
Quaternion(*me) for me in [
[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ],
[ 0.0, 0.0, 0.5*sqrt2, 0.5*sqrt2 ],
[ 0.0, 0.0, 0.5*sqrt2, -0.5*sqrt2 ],
[ 0.0, 0.5*sqrt2, 0.0, 0.5*sqrt2 ],
[ 0.0, 0.5*sqrt2, 0.0, -0.5*sqrt2 ],
[ 0.0, 0.5*sqrt2, -0.5*sqrt2, 0.0 ],
[ 0.0, -0.5*sqrt2, -0.5*sqrt2, 0.0 ],
[ 0.5, 0.5, 0.5, 0.5 ],
[-0.5, 0.5, 0.5, 0.5 ],
[-0.5, 0.5, 0.5, -0.5 ],
[-0.5, 0.5, -0.5, 0.5 ],
[-0.5, -0.5, 0.5, 0.5 ],
[-0.5, -0.5, 0.5, -0.5 ],
[-0.5, -0.5, -0.5, 0.5 ],
[-0.5, 0.5, -0.5, -0.5 ],
[-0.5*sqrt2, 0.0, 0.0, 0.5*sqrt2 ],
[ 0.5*sqrt2, 0.0, 0.0, 0.5*sqrt2 ],
[-0.5*sqrt2, 0.0, 0.5*sqrt2, 0.0 ],
[-0.5*sqrt2, 0.0, -0.5*sqrt2, 0.0 ],
[-0.5*sqrt2, 0.5*sqrt2, 0.0, 0.0 ],
[-0.5*sqrt2, -0.5*sqrt2, 0.0, 0.0 ],
]
]
else:
raise ValueError(f"Unknown lattice structure {lattice}")
if __name__ == "__main__":
# Example_1:
# reudce multi-steps active rotations (unitary quaternions) into a
# single one
from functools import reduce
from pprint import pprint
print("Example_1: combine multiple rotations")
n_cases = 5
angs = np.random.random(n_cases) * np.pi
qs = [Quaternion.from_angle_axis(me, random_three_vector()) for me in angs]
pprint(qs)
print("Reduced to:")
pprint(reduce(Quaternion.combine_two, qs))
print()
# Example_2:
print("Example_2: rotate a vector")
ang = 120
quat = Quaternion.from_angle_axis(np.radians(ang), np.array([1,1,1]))
vec = np.array([1,0,0])
print(f"rotate {vec} by {quat} ({ang} deg) results in:")
print(Quaternion.quatrotate(quat, vec))
print()
# Example_3:
print("Example_3: sequential rotation is just multiplication")
q1 = Quaternion.from_random()
q2 = Quaternion.from_random()
print(q1*q2)
print(Quaternion.combine_two(q1, q2))
print()
# prevent the scaling of a unitary quanternion
# q1 = q1 * 5
# Example_4:
print("Example_4: calc transformation matrix")
f1 = Frame(np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([0, 0, 1]),
np.array([0, 0, 0]),
'old',
)
sqr2 = np.sqrt(2)
f2 = Frame(np.array([ 1/sqr2, 1/sqr2, 0]),
np.array([-1/sqr2, 1/sqr2, 0]),
np.array([0, 0, 1]),
np.array([0, 0, 0]),
'r_z_45',
)
print("original frame:")
pprint(f1)
print("target frame:")
pprint(f2)
print("transformation matrix is:")
print(Frame.transformation_matrix(f1, f2))
print()
| [
"numpy.arctan2",
"concurrent.futures.ProcessPoolExecutor",
"hexomap.npmath.normalize",
"numpy.argmin",
"numpy.isclose",
"numpy.sin",
"numpy.linalg.norm",
"pprint.pprint",
"hexomap.npmath.norm",
"hexomap.utility.iszero",
"hexomap.utility.isone",
"numpy.append",
"hexomap.utility.standarize_eul... | [((16477, 16499), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (16486, 16499), False, 'from dataclasses import dataclass\n'), ((17109, 17128), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (17117, 17128), True, 'import numpy as np\n'), ((17150, 17169), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (17158, 17169), True, 'import numpy as np\n'), ((17191, 17210), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (17199, 17210), True, 'import numpy as np\n'), ((17232, 17251), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (17240, 17251), True, 'import numpy as np\n'), ((29829, 29839), 'pprint.pprint', 'pprint', (['qs'], {}), '(qs)\n', (29835, 29839), False, 'from pprint import pprint\n'), ((30080, 30099), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (30088, 30099), True, 'import numpy as np\n'), ((30770, 30780), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (30777, 30780), True, 'import numpy as np\n'), ((31020, 31030), 'pprint.pprint', 'pprint', (['f1'], {}), '(f1)\n', (31026, 31030), False, 'from pprint import pprint\n'), ((31062, 31072), 'pprint.pprint', 'pprint', (['f2'], {}), '(f2)\n', (31068, 31072), False, 'from pprint import pprint\n'), ((1533, 1575), 'numpy.array', 'np.array', (['[self.phi1, self.phi, self.phi2]'], {}), '([self.phi1, self.phi, self.phi2])\n', (1541, 1575), True, 'import numpy as np\n'), ((1617, 1658), 'hexomap.utility.standarize_euler', 'standarize_euler', (['_euler', 'self.in_radians'], {}), '(_euler, self.in_radians)\n', (1633, 1658), False, 'from hexomap.utility import standarize_euler\n'), ((1875, 1917), 'numpy.array', 'np.array', (['[self.phi1, self.phi, self.phi2]'], {}), '([self.phi1, self.phi, self.phi2])\n', (1883, 1917), True, 'import numpy as np\n'), ((2510, 2660), 'numpy.array', 'np.array', (['[[c1 * c2 - s1 * c * s2, -c1 * s2 - s1 * c * c2, s1 * s], [s1 * c2 + c1 * c *\n s2, -s1 * s2 + c1 * c * c2, -c1 * s], [s * s2, s * c2, c]]'], {}), '([[c1 * c2 - s1 * c * s2, -c1 * s2 - s1 * c * c2, s1 * s], [s1 * c2 +\n c1 * c * s2, -s1 * s2 + c1 * c * c2, -c1 * s], [s * s2, s * c2, c]])\n', (2518, 2660), True, 'import numpy as np\n'), ((3051, 3070), 'hexomap.utility.isone', 'isone', (['(m[2, 2] ** 2)'], {}), '(m[2, 2] ** 2)\n', (3056, 3070), False, 'from hexomap.utility import isone\n'), ((4692, 4725), 'numpy.zeros', 'np.zeros', (['(eulers.shape[0], 3, 3)'], {}), '((eulers.shape[0], 3, 3))\n', (4700, 4725), True, 'import numpy as np\n'), ((6032, 6064), 'numpy.zeros', 'np.zeros', (['(matrices.shape[0], 3)'], {}), '((matrices.shape[0], 3))\n', (6040, 6064), True, 'import numpy as np\n'), ((6124, 6161), 'numpy.isclose', 'np.isclose', (['(matrices[:, 2, 2] ** 2)', '(1)'], {}), '(matrices[:, 2, 2] ** 2, 1)\n', (6134, 6161), True, 'import numpy as np\n'), ((6185, 6239), 'numpy.arctan2', 'np.arctan2', (['matrices[_idx, 1, 0]', 'matrices[_idx, 0, 0]'], {}), '(matrices[_idx, 1, 0], matrices[_idx, 0, 0])\n', (6195, 6239), True, 'import numpy as np\n'), ((6335, 6390), 'numpy.arctan2', 'np.arctan2', (['matrices[_idx, 0, 2]', '(-matrices[_idx, 1, 2])'], {}), '(matrices[_idx, 0, 2], -matrices[_idx, 1, 2])\n', (6345, 6390), True, 'import numpy as np\n'), ((6796, 6833), 'numpy.array', 'np.array', (['[self.r1, self.r2, self.r3]'], {}), '([self.r1, self.r2, self.r3])\n', (6804, 6833), True, 'import numpy as np\n'), ((6930, 6954), 'hexomap.npmath.normalize', 'normalize', (['self.as_array'], {}), '(self.as_array)\n', (6939, 6954), False, 'from hexomap.npmath import normalize\n'), ((9152, 9194), 'numpy.array', 'np.array', (['[self.w, self.x, self.y, self.z]'], {}), '([self.w, self.x, self.y, self.z])\n', (9160, 9194), True, 'import numpy as np\n'), ((9573, 9591), 'numpy.sqrt', 'np.sqrt', (['(q03 * q12)'], {}), '(q03 * q12)\n', (9580, 9591), True, 'import numpy as np\n'), ((9602, 9613), 'hexomap.utility.iszero', 'iszero', (['chi'], {}), '(chi)\n', (9608, 9613), False, 'from hexomap.utility import iszero\n'), ((10676, 10710), 'numpy.array', 'np.array', (['[self.x, self.y, self.z]'], {}), '([self.x, self.y, self.z])\n', (10684, 10710), True, 'import numpy as np\n'), ((10770, 10799), 'numpy.linalg.norm', 'np.linalg.norm', (['self.as_array'], {}), '(self.as_array)\n', (10784, 10799), True, 'import numpy as np\n'), ((14303, 14316), 'hexomap.utility.iszero', 'iszero', (['angle'], {}), '(angle)\n', (14309, 14316), False, 'from hexomap.utility import iszero\n'), ((14922, 14935), 'numpy.cos', 'np.cos', (['ee[1]'], {}), '(ee[1])\n', (14928, 14935), True, 'import numpy as np\n'), ((14951, 14964), 'numpy.sin', 'np.sin', (['ee[1]'], {}), '(ee[1])\n', (14957, 14964), True, 'import numpy as np\n'), ((18127, 18143), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (18135, 18143), True, 'import numpy as np\n'), ((29711, 29736), 'numpy.random.random', 'np.random.random', (['n_cases'], {}), '(n_cases)\n', (29727, 29736), True, 'import numpy as np\n'), ((29876, 29910), 'functools.reduce', 'reduce', (['Quaternion.combine_two', 'qs'], {}), '(Quaternion.combine_two, qs)\n', (29882, 29910), False, 'from functools import reduce\n'), ((30034, 30049), 'numpy.radians', 'np.radians', (['ang'], {}), '(ang)\n', (30044, 30049), True, 'import numpy as np\n'), ((30051, 30070), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (30059, 30070), True, 'import numpy as np\n'), ((30592, 30611), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (30600, 30611), True, 'import numpy as np\n'), ((30629, 30648), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (30637, 30648), True, 'import numpy as np\n'), ((30666, 30685), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (30674, 30685), True, 'import numpy as np\n'), ((30702, 30721), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (30710, 30721), True, 'import numpy as np\n'), ((30796, 30829), 'numpy.array', 'np.array', (['[1 / sqr2, 1 / sqr2, 0]'], {}), '([1 / sqr2, 1 / sqr2, 0])\n', (30804, 30829), True, 'import numpy as np\n'), ((30844, 30878), 'numpy.array', 'np.array', (['[-1 / sqr2, 1 / sqr2, 0]'], {}), '([-1 / sqr2, 1 / sqr2, 0])\n', (30852, 30878), True, 'import numpy as np\n'), ((30891, 30910), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (30899, 30910), True, 'import numpy as np\n'), ((30927, 30946), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (30935, 30946), True, 'import numpy as np\n'), ((2350, 2367), 'numpy.cos', 'np.cos', (['self.phi1'], {}), '(self.phi1)\n', (2356, 2367), True, 'import numpy as np\n'), ((2369, 2386), 'numpy.sin', 'np.sin', (['self.phi1'], {}), '(self.phi1)\n', (2375, 2386), True, 'import numpy as np\n'), ((2404, 2420), 'numpy.cos', 'np.cos', (['self.phi'], {}), '(self.phi)\n', (2410, 2420), True, 'import numpy as np\n'), ((2423, 2439), 'numpy.sin', 'np.sin', (['self.phi'], {}), '(self.phi)\n', (2429, 2439), True, 'import numpy as np\n'), ((2458, 2475), 'numpy.cos', 'np.cos', (['self.phi2'], {}), '(self.phi2)\n', (2464, 2475), True, 'import numpy as np\n'), ((2477, 2494), 'numpy.sin', 'np.sin', (['self.phi2'], {}), '(self.phi2)\n', (2483, 2494), True, 'import numpy as np\n'), ((4514, 4534), 'numpy.cos', 'np.cos', (['eulers[:, 0]'], {}), '(eulers[:, 0])\n', (4520, 4534), True, 'import numpy as np\n'), ((4535, 4555), 'numpy.sin', 'np.sin', (['eulers[:, 0]'], {}), '(eulers[:, 0])\n', (4541, 4555), True, 'import numpy as np\n'), ((4572, 4592), 'numpy.cos', 'np.cos', (['eulers[:, 1]'], {}), '(eulers[:, 1])\n', (4578, 4592), True, 'import numpy as np\n'), ((4593, 4613), 'numpy.sin', 'np.sin', (['eulers[:, 1]'], {}), '(eulers[:, 1])\n', (4599, 4613), True, 'import numpy as np\n'), ((4630, 4650), 'numpy.cos', 'np.cos', (['eulers[:, 2]'], {}), '(eulers[:, 2])\n', (4636, 4650), True, 'import numpy as np\n'), ((4651, 4671), 'numpy.sin', 'np.sin', (['eulers[:, 2]'], {}), '(eulers[:, 2])\n', (4657, 4671), True, 'import numpy as np\n'), ((6413, 6444), 'numpy.arccos', 'np.arccos', (['matrices[_idx, 2, 2]'], {}), '(matrices[_idx, 2, 2])\n', (6422, 6444), True, 'import numpy as np\n'), ((6470, 6524), 'numpy.arctan2', 'np.arctan2', (['matrices[_idx, 2, 0]', 'matrices[_idx, 2, 1]'], {}), '(matrices[_idx, 2, 0], matrices[_idx, 2, 1])\n', (6480, 6524), True, 'import numpy as np\n'), ((8911, 8949), 'hexomap.npmath.norm', 'norm', (['[self.w, self.x, self.y, self.z]'], {}), '([self.w, self.x, self.y, self.z])\n', (8915, 8949), False, 'from hexomap.npmath import norm\n'), ((9283, 9300), 'hexomap.utility.iszero', 'iszero', (['self.real'], {}), '(self.real)\n', (9289, 9300), False, 'from hexomap.utility import iszero\n'), ((9630, 9641), 'hexomap.utility.iszero', 'iszero', (['q12'], {}), '(q12)\n', (9636, 9641), False, 'from hexomap.utility import iszero\n'), ((10990, 11010), 'hexomap.npmath.normalize', 'normalize', (['self.imag'], {}), '(self.imag)\n', (10999, 11010), False, 'from hexomap.npmath import normalize\n'), ((11870, 11899), 'numpy.dot', 'np.dot', (['self.imag', 'other.imag'], {}), '(self.imag, other.imag)\n', (11876, 11899), True, 'import numpy as np\n'), ((11989, 12020), 'numpy.cross', 'np.cross', (['self.imag', 'other.imag'], {}), '(self.imag, other.imag)\n', (11997, 12020), True, 'import numpy as np\n'), ((14393, 14408), 'hexomap.npmath.normalize', 'normalize', (['axis'], {}), '(axis)\n', (14402, 14408), False, 'from hexomap.npmath import normalize\n'), ((15786, 15807), 'hexomap.npmath.random_three_vector', 'random_three_vector', ([], {}), '()\n', (15805, 15807), False, 'from hexomap.npmath import random_three_vector\n'), ((21059, 21078), 'numpy.dot', 'np.dot', (['t_old', '_m.T'], {}), '(t_old, _m.T)\n', (21065, 21078), True, 'import numpy as np\n'), ((23945, 23985), 'numpy.argmin', 'np.argmin', (['[me.rot_angle for me in _drs]'], {}), '([me.rot_angle for me in _drs])\n', (23954, 23985), True, 'import numpy as np\n'), ((24377, 24407), 'concurrent.futures.ProcessPoolExecutor', 'cf.ProcessPoolExecutor', (['ncores'], {}), '(ncores)\n', (24399, 24407), True, 'import concurrent.futures as cf\n'), ((29786, 29807), 'hexomap.npmath.random_three_vector', 'random_three_vector', ([], {}), '()\n', (29805, 29807), False, 'from hexomap.npmath import random_three_vector\n'), ((3154, 3182), 'numpy.arctan2', 'np.arctan2', (['m[1, 0]', 'm[0, 0]'], {}), '(m[1, 0], m[0, 0])\n', (3164, 3182), True, 'import numpy as np\n'), ((3254, 3283), 'numpy.arctan2', 'np.arctan2', (['m[0, 2]', '(-m[1, 2])'], {}), '(m[0, 2], -m[1, 2])\n', (3264, 3283), True, 'import numpy as np\n'), ((3299, 3317), 'numpy.arccos', 'np.arccos', (['m[2, 2]'], {}), '(m[2, 2])\n', (3308, 3317), True, 'import numpy as np\n'), ((3334, 3362), 'numpy.arctan2', 'np.arctan2', (['m[2, 0]', 'm[2, 1]'], {}), '(m[2, 0], m[2, 1])\n', (3344, 3362), True, 'import numpy as np\n'), ((7250, 7269), 'hexomap.npmath.norm', 'norm', (['self.as_array'], {}), '(self.as_array)\n', (7254, 7269), False, 'from hexomap.npmath import norm\n'), ((10859, 10876), 'numpy.arccos', 'np.arccos', (['self.w'], {}), '(self.w)\n', (10868, 10876), True, 'import numpy as np\n'), ((10965, 10982), 'numpy.arccos', 'np.arccos', (['self.w'], {}), '(self.w)\n', (10974, 10982), True, 'import numpy as np\n'), ((10941, 10961), 'hexomap.npmath.normalize', 'normalize', (['self.imag'], {}), '(self.imag)\n', (10950, 10961), False, 'from hexomap.npmath import normalize\n'), ((13653, 13685), 'numpy.outer', 'np.outer', (['q.as_array', 'q.as_array'], {}), '(q.as_array, q.as_array)\n', (13661, 13685), True, 'import numpy as np\n'), ((14439, 14456), 'numpy.cos', 'np.cos', (['(angle / 2)'], {}), '(angle / 2)\n', (14445, 14456), True, 'import numpy as np\n'), ((15010, 15031), 'numpy.cos', 'np.cos', (['(ee[0] + ee[2])'], {}), '(ee[0] + ee[2])\n', (15016, 15031), True, 'import numpy as np\n'), ((15049, 15070), 'numpy.cos', 'np.cos', (['(ee[0] - ee[2])'], {}), '(ee[0] - ee[2])\n', (15055, 15070), True, 'import numpy as np\n'), ((15088, 15109), 'numpy.sin', 'np.sin', (['(ee[0] - ee[2])'], {}), '(ee[0] - ee[2])\n', (15094, 15109), True, 'import numpy as np\n'), ((15127, 15148), 'numpy.sin', 'np.sin', (['(ee[0] + ee[2])'], {}), '(ee[0] + ee[2])\n', (15133, 15148), True, 'import numpy as np\n'), ((15717, 15735), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (15733, 15735), True, 'import numpy as np\n'), ((16454, 16473), 'numpy.cross', 'np.cross', (['q.imag', 'v'], {}), '(q.imag, v)\n', (16462, 16473), True, 'import numpy as np\n'), ((19290, 19309), 'numpy.append', 'np.append', (['p_old', '(1)'], {}), '(p_old, 1)\n', (19299, 19309), True, 'import numpy as np\n'), ((25989, 25999), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (25996, 25999), True, 'import numpy as np\n'), ((10084, 10186), 'numpy.arctan2', 'np.arctan2', (['((-1 * qu[0] * qu[2] + qu[1] * qu[3]) * chi)', '((-1 * qu[0] * qu[1] - qu[2] * qu[3]) * chi)'], {}), '((-1 * qu[0] * qu[2] + qu[1] * qu[3]) * chi, (-1 * qu[0] * qu[1] -\n qu[2] * qu[3]) * chi)\n', (10094, 10186), True, 'import numpy as np\n'), ((10181, 10213), 'numpy.arctan2', 'np.arctan2', (['(2.0 * chi)', '(q03 - q12)'], {}), '(2.0 * chi, q03 - q12)\n', (10191, 10213), True, 'import numpy as np\n'), ((10230, 10331), 'numpy.arctan2', 'np.arctan2', (['((1 * qu[0] * qu[2] + qu[1] * qu[3]) * chi)', '((-1 * qu[0] * qu[1] + qu[2] * qu[3]) * chi)'], {}), '((1 * qu[0] * qu[2] + qu[1] * qu[3]) * chi, (-1 * qu[0] * qu[1] +\n qu[2] * qu[3]) * chi)\n', (10240, 10331), True, 'import numpy as np\n'), ((18177, 18197), 'numpy.dot', 'np.dot', (['new_e', 'old_e'], {}), '(new_e, old_e)\n', (18183, 18197), True, 'import numpy as np\n'), ((26690, 26700), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (26697, 26700), True, 'import numpy as np\n'), ((9695, 9756), 'numpy.arctan2', 'np.arctan2', (['(-1 * 2.0 * qu[0] * qu[3])', '(qu[0] ** 2 - qu[3] ** 2)'], {}), '(-1 * 2.0 * qu[0] * qu[3], qu[0] ** 2 - qu[3] ** 2)\n', (9705, 9756), True, 'import numpy as np\n'), ((9894, 9950), 'numpy.arctan2', 'np.arctan2', (['(2.0 * qu[1] * qu[2])', '(qu[1] ** 2 - qu[2] ** 2)'], {}), '(2.0 * qu[1] * qu[2], qu[1] ** 2 - qu[2] ** 2)\n', (9904, 9950), True, 'import numpy as np\n'), ((14458, 14475), 'numpy.sin', 'np.sin', (['(angle / 2)'], {}), '(angle / 2)\n', (14464, 14475), True, 'import numpy as np\n'), ((16404, 16421), 'numpy.dot', 'np.dot', (['q.imag', 'v'], {}), '(q.imag, v)\n', (16410, 16421), True, 'import numpy as np\n'), ((27655, 27665), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (27662, 27665), True, 'import numpy as np\n')] |
import math
import numpy as np
from radon_server.radon_thread import RadonTransformThread
class DSSRadon(RadonTransformThread):
def get_algorithm_name(self):
return "dss"
def run_transform(self, image, n, variant=None):
M = int(np.shape(image)[0])
N = int(np.shape(image)[1])
self.radon = np.zeros((n, n), dtype='float64')
for h in range(0, n):
# calculate radon for horizontal lines
for k in range(0, int(n / 2)):
theta = math.pi * 0.25 + (k * math.pi) / n
# r = min_r + (max_r-min_r) * h / (n-1)
r = h - n / 2
x = np.array(range(int(-M / 2), int(M / 2)))
y = (r - x * np.cos(theta)) / np.sin(theta)
x += int(M / 2)
y += int(N / 2)
# calculate weights of line between pixels
y1 = y.astype(int)
w1 = 1 - (y - y1)
y2 = (y + 1).astype(int)
w2 = (y - y1)
# cut out of bounds values
# lower bound
x = x[np.where(y1 >= 0)]
w1 = w1[np.where(y1 >= 0)]
w2 = w2[np.where(y1 >= 0)]
y2 = y2[np.where(y1 >= 0)]
y1 = y1[np.where(y1 >= 0)]
# upper bound
x = x[np.where(y2 < N)]
w1 = w1[np.where(y2 < N)]
w2 = w2[np.where(y2 < N)]
y1 = y1[np.where(y2 < N)]
y2 = y2[np.where(y2 < N)]
self.radon[h, k] = (image[x, y1] * w1).sum() + (image[x, y2] * w2).sum()
# slower but more clear implementation
# sum = 0
# for x in range(-M/2, M/2):
# y = ((r - x*math.cos(theta))/math.sin(theta))
#
# y1 = int(y)
# w1 = 1-(y-y1)
# y2 = int(y+1)
# w2 = (y-y1)
#
# if y1 >= -N/2 and y1<N/2:
# sum = sum + image[x+N/2, y1+M/2]*w1
# if y2 >= -N/2 and y2<N/2:
# sum = sum + image[x+N/2, y2+M/2]*w2
# radon[h,k] = sum
# calculate radon for vertical lines
for k in range(0, int(n / 2)):
theta = math.pi * 0.75 + (k * math.pi) / n
# r = min_r + (max_r-min_r) * h / (n-1)
r = h - n / 2
y = np.array(range(int(-N / 2), int(N / 2)))
x = (r - y * np.sin(theta)) / np.cos(theta)
x += int(N / 2)
y += int(M / 2)
# calculate weights of line between pixels
x1 = x.astype(int)
w1 = 1 - (x - x1)
x2 = (x + 1).astype(int)
w2 = (x - x1)
# cut out of bounds values
# lower bound
y = y[np.where(x1 >= 0)]
w1 = w1[np.where(x1 >= 0)]
w2 = w2[np.where(x1 >= 0)]
x2 = x2[np.where(x1 >= 0)]
x1 = x1[np.where(x1 >= 0)]
# upper bound
y = y[np.where(x2 < N)]
w1 = w1[np.where(x2 < N)]
w2 = w2[np.where(x2 < N)]
x1 = x1[np.where(x2 < N)]
x2 = x2[np.where(x2 < N)]
self.radon[h, int(n / 2 + k)] = (image[x1, y] * w1).sum() + (image[x2, y] * w2).sum()
# slower implementation
# sum = 0
# for y in range(-M/2, M/2):
# x=(r-y*math.sin(theta))/math.cos(theta)
# x1 = int(x)
# w1 = 1-(x-x1)
# x2 = int(x+1)
# w2 = (x-x1)
#
# if x1 >= -N/2 and x1<N/2:
# sum = sum + image[x1+N/2, y+M/2]*w1
# if x2 >= -N/2 and x2<N/2:
# sum = sum + image[x1+N/2, y+M/2]*w2
# radon[h,n/2+k] = sum
self.update_progress(h, n)
return self.radon
# UNUSED - dss using cartesian coordinates
def discrete_slant_stacking_cart(self, image, steps):
H = steps
K = steps
pmin = -1
tmin = 0
dp = 0.02
dt = 1
p = np.arange(pmin, pmin + dp * H, dp, np.float)
tau = np.arange(tmin, tmin + dt * K, dt, np.float)
dx = 1
dy = 1
xmin = -math.floor(steps / 2)
ymin = 0
M = np.shape(image)[0]
N = np.shape(image)[1]
for k in range(0, K):
for h in range(0, H):
alpha = p[k] * dx / dy
beta = (p[k] * xmin + tau[h] - ymin) / dy
sum = 0
for m in range(0, M):
n = int(round(alpha * m + beta))
if (n >= 0 and n < N):
sum = sum + image[n, m]
self.radon[H - h - 1, K - k - 1] = sum
| [
"math.floor",
"numpy.zeros",
"numpy.shape",
"numpy.sin",
"numpy.arange",
"numpy.where",
"numpy.cos"
] | [((333, 366), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': '"""float64"""'}), "((n, n), dtype='float64')\n", (341, 366), True, 'import numpy as np\n'), ((4432, 4476), 'numpy.arange', 'np.arange', (['pmin', '(pmin + dp * H)', 'dp', 'np.float'], {}), '(pmin, pmin + dp * H, dp, np.float)\n', (4441, 4476), True, 'import numpy as np\n'), ((4491, 4535), 'numpy.arange', 'np.arange', (['tmin', '(tmin + dt * K)', 'dt', 'np.float'], {}), '(tmin, tmin + dt * K, dt, np.float)\n', (4500, 4535), True, 'import numpy as np\n'), ((4582, 4603), 'math.floor', 'math.floor', (['(steps / 2)'], {}), '(steps / 2)\n', (4592, 4603), False, 'import math\n'), ((4634, 4649), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (4642, 4649), True, 'import numpy as np\n'), ((4665, 4680), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (4673, 4680), True, 'import numpy as np\n'), ((256, 271), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (264, 271), True, 'import numpy as np\n'), ((292, 307), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (300, 307), True, 'import numpy as np\n'), ((744, 757), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (750, 757), True, 'import numpy as np\n'), ((1118, 1135), 'numpy.where', 'np.where', (['(y1 >= 0)'], {}), '(y1 >= 0)\n', (1126, 1135), True, 'import numpy as np\n'), ((1161, 1178), 'numpy.where', 'np.where', (['(y1 >= 0)'], {}), '(y1 >= 0)\n', (1169, 1178), True, 'import numpy as np\n'), ((1204, 1221), 'numpy.where', 'np.where', (['(y1 >= 0)'], {}), '(y1 >= 0)\n', (1212, 1221), True, 'import numpy as np\n'), ((1247, 1264), 'numpy.where', 'np.where', (['(y1 >= 0)'], {}), '(y1 >= 0)\n', (1255, 1264), True, 'import numpy as np\n'), ((1290, 1307), 'numpy.where', 'np.where', (['(y1 >= 0)'], {}), '(y1 >= 0)\n', (1298, 1307), True, 'import numpy as np\n'), ((1362, 1378), 'numpy.where', 'np.where', (['(y2 < N)'], {}), '(y2 < N)\n', (1370, 1378), True, 'import numpy as np\n'), ((1404, 1420), 'numpy.where', 'np.where', (['(y2 < N)'], {}), '(y2 < N)\n', (1412, 1420), True, 'import numpy as np\n'), ((1446, 1462), 'numpy.where', 'np.where', (['(y2 < N)'], {}), '(y2 < N)\n', (1454, 1462), True, 'import numpy as np\n'), ((1488, 1504), 'numpy.where', 'np.where', (['(y2 < N)'], {}), '(y2 < N)\n', (1496, 1504), True, 'import numpy as np\n'), ((1530, 1546), 'numpy.where', 'np.where', (['(y2 < N)'], {}), '(y2 < N)\n', (1538, 1546), True, 'import numpy as np\n'), ((2609, 2622), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2615, 2622), True, 'import numpy as np\n'), ((2983, 3000), 'numpy.where', 'np.where', (['(x1 >= 0)'], {}), '(x1 >= 0)\n', (2991, 3000), True, 'import numpy as np\n'), ((3026, 3043), 'numpy.where', 'np.where', (['(x1 >= 0)'], {}), '(x1 >= 0)\n', (3034, 3043), True, 'import numpy as np\n'), ((3069, 3086), 'numpy.where', 'np.where', (['(x1 >= 0)'], {}), '(x1 >= 0)\n', (3077, 3086), True, 'import numpy as np\n'), ((3112, 3129), 'numpy.where', 'np.where', (['(x1 >= 0)'], {}), '(x1 >= 0)\n', (3120, 3129), True, 'import numpy as np\n'), ((3155, 3172), 'numpy.where', 'np.where', (['(x1 >= 0)'], {}), '(x1 >= 0)\n', (3163, 3172), True, 'import numpy as np\n'), ((3227, 3243), 'numpy.where', 'np.where', (['(x2 < N)'], {}), '(x2 < N)\n', (3235, 3243), True, 'import numpy as np\n'), ((3269, 3285), 'numpy.where', 'np.where', (['(x2 < N)'], {}), '(x2 < N)\n', (3277, 3285), True, 'import numpy as np\n'), ((3311, 3327), 'numpy.where', 'np.where', (['(x2 < N)'], {}), '(x2 < N)\n', (3319, 3327), True, 'import numpy as np\n'), ((3353, 3369), 'numpy.where', 'np.where', (['(x2 < N)'], {}), '(x2 < N)\n', (3361, 3369), True, 'import numpy as np\n'), ((3395, 3411), 'numpy.where', 'np.where', (['(x2 < N)'], {}), '(x2 < N)\n', (3403, 3411), True, 'import numpy as np\n'), ((727, 740), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (733, 740), True, 'import numpy as np\n'), ((2592, 2605), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2598, 2605), True, 'import numpy as np\n')] |
import multiprocessing
import numpy as np
from multi_mesh.components.interpolator import inverse_transform
from multi_mesh.components.interpolator import get_coefficients
from pykdtree.kdtree import KDTree
from tqdm import tqdm
def map_to_ellipse(base_mesh, mesh):
"""Takes a base mesh with ellipticity topography and
stretches the mesh to have the same ellipticity.
# TODO, this could also be merged with interpolate functions, such
# TODO that weights do not need to be computed twice
"""
# Get radial ratio for each element node
r_earth = 6371000
r = np.sqrt(np.sum(base_mesh.points ** 2, axis=1)) / r_earth
_, i = np.unique(base_mesh.connectivity, return_index=True)
rad_1d_values = base_mesh.element_nodal_fields["z_node_1D"].flatten()[i]
r_ratio = r / rad_1d_values
r_ratio_element_nodal_base = r_ratio[base_mesh.connectivity]
# Map to sphere and store original points
orig_old_elliptic_mesh_points = np.copy(base_mesh.points)
map_to_sphere(base_mesh)
map_to_sphere(mesh)
# For each point in new mesh find nearest elements centroids in old mesh
elem_centroid = base_mesh.get_element_centroid()
centroid_tree = KDTree(elem_centroid)
gll_points = base_mesh.points[base_mesh.connectivity]
# Get elements and interpolation coefficients for new_points
print("Retrieving interpolation weigts")
elem_indices, coeffs = get_element_weights(
gll_points, centroid_tree, mesh.points
)
num_failed = len(np.where(elem_indices == -1)[0])
if num_failed > 0:
raise Exception(
f"{num_failed} points could not find an enclosing element."
)
mesh_point_r_ratio = np.sum(
coeffs * r_ratio_element_nodal_base[elem_indices], axis=1
)
mesh.points = np.array(mesh_point_r_ratio * mesh.points.T).T
base_mesh.points = orig_old_elliptic_mesh_points
def map_to_sphere(mesh):
"""Takes a salvus mesh and converts it to a sphere.
Acts on the passed object
"""
_, i = np.unique(mesh.connectivity, return_index=True)
rad_1D = mesh.element_nodal_fields["z_node_1D"].flatten()[i]
r_earth = 6371000
x, y, z = mesh.points.T
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
# Conert all points that do not lie right in the core
x[r > 0] = x[r > 0] * r_earth * rad_1D[r > 0] / r[r > 0]
y[r > 0] = y[r > 0] * r_earth * rad_1D[r > 0] / r[r > 0]
z[r > 0] = z[r > 0] * r_earth * rad_1D[r > 0] / r[r > 0]
def get_element_weights(gll_points, centroid_tree, points):
"""
A function to figure out inside which element the point to be
interpolated is. In addition, it gives the interpolation coefficients
for the respective element.
Returns -1 and no coeffs when nothing is found.
:param gll: All GLL nodes from old_mesh
:param centroid_tree: scipy.spatial.cKDTree that is initialized with the
centroids of the elements of old_mesh
:param points: List of points that require interpolation
:return: the enclosing elements and interpolation weights
"""
global _get_coeffs
nelem_to_search = 25
nodes_per_element = np.shape(gll_points)[1]
if nodes_per_element == 27:
order = 2
elif nodes_per_element == 8:
order = 1
else:
import sys
sys.exit('Not implemented error')
def _get_coeffs(point_indices):
_, nearest_elements = centroid_tree.query(
points[point_indices], k=nelem_to_search
)
element_num = np.arange(len(point_indices))
def check_inside(index, element_num):
"""
returns the element_id and coefficients for new_points[index]
returns -1 for index when nothing is found
"""
for element in nearest_elements[element_num]:
# get element gll_points
gll_points_elem = np.asfortranarray(
gll_points[element, :, :], dtype=np.float64
)
point = np.asfortranarray(points[index])
ref_coord = inverse_transform(
point, gll_points=gll_points_elem, dimension=3
)
if np.any(np.isnan(ref_coord)):
continue
# tolerance of 5%
if np.all(np.abs(ref_coord) < 1.05):
coeffs = get_coefficients(
order,
0,
0,
np.asfortranarray(ref_coord, dtype=np.float64),
3,
)
return element, coeffs
# return weights zero if nothing found
return -1, np.zeros(nodes_per_element)
a = np.vectorize(
check_inside, signature="(),()->(),(n)", otypes=[int, float]
)
return a(point_indices, element_num)
# Split array in chunks
num_processes = multiprocessing.cpu_count()
n = 50 * num_processes
task_list = np.array_split(np.arange(len(points)), n)
elems = []
coeffs = []
with multiprocessing.Pool(num_processes) as pool:
with tqdm(
total=len(task_list),
bar_format="{l_bar}{bar}[{elapsed}<{remaining},"
" '{rate_fmt}{postfix}]",
) as pbar:
for i, r in enumerate(pool.imap(_get_coeffs, task_list)):
elem_in, coeff = r
pbar.update()
elems.append(elem_in)
coeffs.append(coeff)
pool.close()
pool.join()
elems = np.concatenate(elems)
coeffs = np.concatenate(coeffs)
return elems, coeffs
def interpolate_to_points(mesh, points, params_to_interp,
make_spherical=False, centroid_tree=None):
"""
Interpolates from a mesh to point cloud.
:param mesh: Mesh from which you want to interpolate
:param points: np.array of points that require interpolation,
if they are not found. zero is returned
:param params_to_interp: list of params to interp
:param make_spherical: bool that determines if mesh gets mapped to a sphere.
Careful. Setting this will alter the passed object.
:param centroid_tree: KDTree initialized from the centroids of the elements
of mesh. Passing this is optional,, but helps to speed up this
function when it is placed in a loop.
:return: array[nparams_to_interp, npoints]
"""
if make_spherical:
map_to_sphere(mesh)
if not centroid_tree:
print("Initializing KDtree...")
elem_centroid = mesh.get_element_centroid()
centroid_tree = KDTree(elem_centroid)
# Get GLL points from old mesh
gll_points = mesh.points[mesh.connectivity]
# Get elements and interpolation coefficients for new_points
print("Retrieving interpolation weigts")
elem_indices, coeffs = get_element_weights(
gll_points, centroid_tree, points
)
num_failed = len(np.where(elem_indices == -1)[0])
if num_failed > 0:
print(
num_failed,
"points could not find an enclosing element. "
"These points will be set to zero. "
"Please check your domain or the interpolation tuning parameters",
)
print("Interpolating fields...")
vals = np.zeros((len(points), len(params_to_interp)))
for i, param in enumerate(params_to_interp):
old_element_nodal_vals = mesh.element_nodal_fields[param]
vals[:, i] = np.sum(
coeffs * old_element_nodal_vals[elem_indices], axis=1
)
return vals
def interpolate_to_mesh(
old_mesh, new_mesh, params_to_interp=["VSV", "VSH", "VPV", "VPH"]
):
"""
Maps both meshes to a sphere and interpolate values
from old mesh to new mesh for params to interp.
Returns the original coordinate system
Values that are not found are given zero
"""
# store original point locations
orig_old_elliptic_mesh_points = np.copy(old_mesh.points)
orig_new_elliptic_mesh_points = np.copy(new_mesh.points)
# Map both meshes to a sphere
map_to_sphere(old_mesh)
map_to_sphere(new_mesh)
vals = interpolate_to_points(old_mesh, new_mesh.points, params_to_interp)
for i, param in enumerate(params_to_interp):
new_element_nodal_vals = vals[:, i][new_mesh.connectivity]
new_mesh.element_nodal_fields[param][:] = new_element_nodal_vals
# Restore original coordinates
old_mesh.points = orig_old_elliptic_mesh_points
new_mesh.points = orig_new_elliptic_mesh_points
| [
"numpy.sum",
"numpy.concatenate",
"numpy.copy",
"numpy.vectorize",
"numpy.abs",
"numpy.zeros",
"numpy.asfortranarray",
"multi_mesh.components.interpolator.inverse_transform",
"numpy.isnan",
"multiprocessing.cpu_count",
"numpy.shape",
"pykdtree.kdtree.KDTree",
"numpy.where",
"numpy.array",
... | [((657, 709), 'numpy.unique', 'np.unique', (['base_mesh.connectivity'], {'return_index': '(True)'}), '(base_mesh.connectivity, return_index=True)\n', (666, 709), True, 'import numpy as np\n'), ((967, 992), 'numpy.copy', 'np.copy', (['base_mesh.points'], {}), '(base_mesh.points)\n', (974, 992), True, 'import numpy as np\n'), ((1197, 1218), 'pykdtree.kdtree.KDTree', 'KDTree', (['elem_centroid'], {}), '(elem_centroid)\n', (1203, 1218), False, 'from pykdtree.kdtree import KDTree\n'), ((1700, 1765), 'numpy.sum', 'np.sum', (['(coeffs * r_ratio_element_nodal_base[elem_indices])'], {'axis': '(1)'}), '(coeffs * r_ratio_element_nodal_base[elem_indices], axis=1)\n', (1706, 1765), True, 'import numpy as np\n'), ((2031, 2078), 'numpy.unique', 'np.unique', (['mesh.connectivity'], {'return_index': '(True)'}), '(mesh.connectivity, return_index=True)\n', (2040, 2078), True, 'import numpy as np\n'), ((2203, 2236), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (2210, 2236), True, 'import numpy as np\n'), ((4941, 4968), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4966, 4968), False, 'import multiprocessing\n'), ((5575, 5596), 'numpy.concatenate', 'np.concatenate', (['elems'], {}), '(elems)\n', (5589, 5596), True, 'import numpy as np\n'), ((5610, 5632), 'numpy.concatenate', 'np.concatenate', (['coeffs'], {}), '(coeffs)\n', (5624, 5632), True, 'import numpy as np\n'), ((7983, 8007), 'numpy.copy', 'np.copy', (['old_mesh.points'], {}), '(old_mesh.points)\n', (7990, 8007), True, 'import numpy as np\n'), ((8044, 8068), 'numpy.copy', 'np.copy', (['new_mesh.points'], {}), '(new_mesh.points)\n', (8051, 8068), True, 'import numpy as np\n'), ((1798, 1842), 'numpy.array', 'np.array', (['(mesh_point_r_ratio * mesh.points.T)'], {}), '(mesh_point_r_ratio * mesh.points.T)\n', (1806, 1842), True, 'import numpy as np\n'), ((3142, 3162), 'numpy.shape', 'np.shape', (['gll_points'], {}), '(gll_points)\n', (3150, 3162), True, 'import numpy as np\n'), ((4750, 4824), 'numpy.vectorize', 'np.vectorize', (['check_inside'], {'signature': '"""(),()->(),(n)"""', 'otypes': '[int, float]'}), "(check_inside, signature='(),()->(),(n)', otypes=[int, float])\n", (4762, 4824), True, 'import numpy as np\n'), ((5095, 5130), 'multiprocessing.Pool', 'multiprocessing.Pool', (['num_processes'], {}), '(num_processes)\n', (5115, 5130), False, 'import multiprocessing\n'), ((6638, 6659), 'pykdtree.kdtree.KDTree', 'KDTree', (['elem_centroid'], {}), '(elem_centroid)\n', (6644, 6659), False, 'from pykdtree.kdtree import KDTree\n'), ((7497, 7558), 'numpy.sum', 'np.sum', (['(coeffs * old_element_nodal_vals[elem_indices])'], {'axis': '(1)'}), '(coeffs * old_element_nodal_vals[elem_indices], axis=1)\n', (7503, 7558), True, 'import numpy as np\n'), ((597, 634), 'numpy.sum', 'np.sum', (['(base_mesh.points ** 2)'], {'axis': '(1)'}), '(base_mesh.points ** 2, axis=1)\n', (603, 634), True, 'import numpy as np\n'), ((1511, 1539), 'numpy.where', 'np.where', (['(elem_indices == -1)'], {}), '(elem_indices == -1)\n', (1519, 1539), True, 'import numpy as np\n'), ((3304, 3337), 'sys.exit', 'sys.exit', (['"""Not implemented error"""'], {}), "('Not implemented error')\n", (3312, 3337), False, 'import sys\n'), ((6973, 7001), 'numpy.where', 'np.where', (['(elem_indices == -1)'], {}), '(elem_indices == -1)\n', (6981, 7001), True, 'import numpy as np\n'), ((3882, 3944), 'numpy.asfortranarray', 'np.asfortranarray', (['gll_points[element, :, :]'], {'dtype': 'np.float64'}), '(gll_points[element, :, :], dtype=np.float64)\n', (3899, 3944), True, 'import numpy as np\n'), ((4007, 4039), 'numpy.asfortranarray', 'np.asfortranarray', (['points[index]'], {}), '(points[index])\n', (4024, 4039), True, 'import numpy as np\n'), ((4069, 4134), 'multi_mesh.components.interpolator.inverse_transform', 'inverse_transform', (['point'], {'gll_points': 'gll_points_elem', 'dimension': '(3)'}), '(point, gll_points=gll_points_elem, dimension=3)\n', (4086, 4134), False, 'from multi_mesh.components.interpolator import inverse_transform\n'), ((4709, 4736), 'numpy.zeros', 'np.zeros', (['nodes_per_element'], {}), '(nodes_per_element)\n', (4717, 4736), True, 'import numpy as np\n'), ((4200, 4219), 'numpy.isnan', 'np.isnan', (['ref_coord'], {}), '(ref_coord)\n', (4208, 4219), True, 'import numpy as np\n'), ((4312, 4329), 'numpy.abs', 'np.abs', (['ref_coord'], {}), '(ref_coord)\n', (4318, 4329), True, 'import numpy as np\n'), ((4495, 4541), 'numpy.asfortranarray', 'np.asfortranarray', (['ref_coord'], {'dtype': 'np.float64'}), '(ref_coord, dtype=np.float64)\n', (4512, 4541), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import numpy as np
from typing import Optional
from pyannote.core import Annotation
from pyannote.core import Timeline
from pyannote.core.utils.numpy import one_hot_decoding
from pyannote.pipeline import Pipeline
from pyannote.audio.features import Precomputed
from pyannote.pipeline.blocks.clustering import HierarchicalAgglomerativeClustering
from pyannote.pipeline.blocks.clustering import AffinityPropagationClustering
from .utils import assert_string_labels
from pyannote.audio.features.wrapper import Wrapper, Wrappable
class SpeechTurnClustering(Pipeline):
"""Speech turn clustering
Parameters
----------
embedding : Wrappable, optional
Describes how raw speaker embeddings should be obtained.
See pyannote.audio.features.wrapper.Wrapper documentation for details.
Defaults to "@emb" that indicates that protocol files provide
the scores in the "emb" key.
metric : {'euclidean', 'cosine', 'angular'}, optional
Metric used for comparing embeddings. Defaults to 'cosine'.
method : {'pool', 'affinity_propagation'}
Set method used for clustering. "pool" stands for agglomerative
hierarchical clustering with embedding pooling. "affinity_propagation"
is for clustering based on affinity propagation. Defaults to "pool".
window_wise : `bool`, optional
Set `window_wise` to True to apply clustering on embedding extracted
using the built-in sliding window. Defaults to apply clustering at
speech turn level (one average embedding per speech turn).
"""
def __init__(
self,
embedding: Wrappable = None,
metric: Optional[str] = "cosine",
method: Optional[str] = "pool",
window_wise: Optional[bool] = False,
):
super().__init__()
if embedding is None:
embedding = "@emb"
self.embedding = embedding
self._embedding = Wrapper(self.embedding)
self.metric = metric
self.method = method
if self.method == "affinity_propagation":
self.clustering = AffinityPropagationClustering(metric=self.metric)
# sklearn documentation: Preferences for each point - points with
# larger values of preferences are more likely to be chosen as
# exemplars. The number of exemplars, ie of clusters, is influenced by
# the input preferences value. If the preferences are not passed as
# arguments, they will be set to the median of the input similarities.
# NOTE one could set the preference value of each speech turn
# according to their duration. longer speech turns are expected to
# have more accurate embeddings, therefore should be prefered for
# exemplars
else:
self.clustering = HierarchicalAgglomerativeClustering(
method=self.method, metric=self.metric, use_threshold=True
)
self.window_wise = window_wise
def _window_level(self, current_file: dict, speech_regions: Timeline) -> Annotation:
"""Apply clustering at window level
Parameters
----------
current_file : `dict`
File as provided by a pyannote.database protocol.
speech_regions : `Timeline`
Speech regions.
Returns
-------
hypothesis : `pyannote.core.Annotation`
Clustering result.
"""
# load embeddings
embedding = self._embedding(current_file)
window = embedding.sliding_window
# extract and stack embeddings of speech regions
X = np.vstack(
[
embedding.crop(segment, mode="center", fixed=segment.duration)
for segment in speech_regions
]
)
# apply clustering
y_pred = self.clustering(X)
# reconstruct
y = np.zeros(len(embedding), dtype=np.int8)
# n = total number of "speech" embeddings
# s_pred = current position in y_pred
s_pred, n = 0, len(y_pred)
for segment in speech_regions:
# get indices of current speech segment
((s, e),) = window.crop(
segment, mode="center", fixed=segment.duration, return_ranges=True
)
# hack for the very last segment that might overflow by 1
e_pred = min(s_pred + e - s, n - 1)
e = s + (e_pred - s_pred)
# assign y_pred to the corresponding speech regions
y[s:e] = y_pred[s_pred:e_pred]
# increment current position in y_red
s_pred += e - s
# reconstruct hypothesis
return one_hot_decoding(y, window)
def _turn_level(self, current_file: dict, speech_turns: Annotation) -> Annotation:
"""Apply clustering at speech turn level
Parameters
----------
current_file : `dict`
File as provided by a pyannote.database protocol.
speech_turns : `Annotation`
Speech turns. Should only contain `str` labels.
Returns
-------
hypothesis : `pyannote.core.Annotation`
Clustering result.
"""
assert_string_labels(speech_turns, "speech_turns")
embedding = self._embedding(current_file)
labels = speech_turns.labels()
X, clustered_labels, skipped_labels = [], [], []
for l, label in enumerate(labels):
timeline = speech_turns.label_timeline(label, copy=False)
# be more and more permissive until we have
# at least one embedding for current speech turn
for mode in ["strict", "center", "loose"]:
x = embedding.crop(timeline, mode=mode)
if len(x) > 0:
break
# skip labels so small we don't have any embedding for it
if len(x) < 1:
skipped_labels.append(label)
continue
clustered_labels.append(label)
X.append(np.mean(x, axis=0))
# apply clustering of label embeddings
clusters = self.clustering(np.vstack(X))
# map each clustered label to its cluster (between 1 and N_CLUSTERS)
mapping = {label: k for label, k in zip(clustered_labels, clusters)}
# map each skipped label to its own cluster
# (between -1 and -N_SKIPPED_LABELS)
for l, label in enumerate(skipped_labels):
mapping[label] = -(l + 1)
# do the actual mapping
return speech_turns.rename_labels(mapping=mapping)
def __call__(
self, current_file: dict, speech_turns: Optional[Annotation] = None
) -> Annotation:
"""Apply speech turn clustering
Parameters
----------
current_file : `dict`
File as provided by a pyannote.database protocol.
speech_turns : `Annotation`, optional
Speech turns. Should only contain `str` labels.
Defaults to `current_file['speech_turns']`.
Returns
-------
speech_turns : `pyannote.core.Annotation`
Clustered speech turns (or windows in case `window_wise` is True)
"""
if speech_turns is None:
speech_turns = current_file["speech_turns"]
if self.window_wise:
return self._window_level(
current_file, speech_turns.get_timeline().support()
)
return self._turn_level(current_file, speech_turns)
| [
"pyannote.pipeline.blocks.clustering.HierarchicalAgglomerativeClustering",
"pyannote.core.utils.numpy.one_hot_decoding",
"numpy.mean",
"pyannote.pipeline.blocks.clustering.AffinityPropagationClustering",
"pyannote.audio.features.wrapper.Wrapper",
"numpy.vstack"
] | [((3129, 3152), 'pyannote.audio.features.wrapper.Wrapper', 'Wrapper', (['self.embedding'], {}), '(self.embedding)\n', (3136, 3152), False, 'from pyannote.audio.features.wrapper import Wrapper, Wrappable\n'), ((5917, 5944), 'pyannote.core.utils.numpy.one_hot_decoding', 'one_hot_decoding', (['y', 'window'], {}), '(y, window)\n', (5933, 5944), False, 'from pyannote.core.utils.numpy import one_hot_decoding\n'), ((3293, 3342), 'pyannote.pipeline.blocks.clustering.AffinityPropagationClustering', 'AffinityPropagationClustering', ([], {'metric': 'self.metric'}), '(metric=self.metric)\n', (3322, 3342), False, 'from pyannote.pipeline.blocks.clustering import AffinityPropagationClustering\n'), ((4044, 4143), 'pyannote.pipeline.blocks.clustering.HierarchicalAgglomerativeClustering', 'HierarchicalAgglomerativeClustering', ([], {'method': 'self.method', 'metric': 'self.metric', 'use_threshold': '(True)'}), '(method=self.method, metric=self.metric,\n use_threshold=True)\n', (4079, 4143), False, 'from pyannote.pipeline.blocks.clustering import HierarchicalAgglomerativeClustering\n'), ((7377, 7389), 'numpy.vstack', 'np.vstack', (['X'], {}), '(X)\n', (7386, 7389), True, 'import numpy as np\n'), ((7274, 7292), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (7281, 7292), True, 'import numpy as np\n')] |
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from sklearn.cross_validation import StratifiedKFold
NUM_BIZ_TRAIN = 2000
NUM_BIZ_TEST = 10000
def makeKFold(n_folds, y, reps):
assert y.shape[0] % reps == 0
y_compact = y[range(0, y.shape[0], reps)]
SKFold = StratifiedKFold(y_compact, n_folds=n_folds, shuffle=True)
for train_index, test_index in SKFold:
train_ix = np.array([np.arange(reps * i, reps * i + reps)
for i in train_index]).flatten()
test_ix = np.array([np.arange(reps * i, reps * i + reps)
for i in test_index]).flatten()
yield (train_ix, test_ix)
def vote_by_majority(pred_list):
reps = pred_list.size
npos = np.sum(pred_list > 0.5)
return 1 if npos > int(np.floor(reps / 2)) else 0
def vote_by_mean(pred_list):
score = np.mean(pred_list)
return 1 if score > 0.5 else 0
def mean_pool(pred_list):
return np.mean(pred_list)
def agg_preds(preds, reps, vote_func):
assert preds.shape[0] % reps == 0
n_samples = int(preds.shape[0] / reps)
preds_r = np.reshape(preds, (n_samples, reps))
return np.apply_along_axis(vote_func, axis=1, arr=preds_r)
| [
"numpy.sum",
"numpy.floor",
"numpy.apply_along_axis",
"numpy.mean",
"numpy.arange",
"numpy.reshape",
"sklearn.cross_validation.StratifiedKFold"
] | [((394, 451), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (['y_compact'], {'n_folds': 'n_folds', 'shuffle': '(True)'}), '(y_compact, n_folds=n_folds, shuffle=True)\n', (409, 451), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((854, 877), 'numpy.sum', 'np.sum', (['(pred_list > 0.5)'], {}), '(pred_list > 0.5)\n', (860, 877), True, 'import numpy as np\n'), ((975, 993), 'numpy.mean', 'np.mean', (['pred_list'], {}), '(pred_list)\n', (982, 993), True, 'import numpy as np\n'), ((1068, 1086), 'numpy.mean', 'np.mean', (['pred_list'], {}), '(pred_list)\n', (1075, 1086), True, 'import numpy as np\n'), ((1224, 1260), 'numpy.reshape', 'np.reshape', (['preds', '(n_samples, reps)'], {}), '(preds, (n_samples, reps))\n', (1234, 1260), True, 'import numpy as np\n'), ((1272, 1323), 'numpy.apply_along_axis', 'np.apply_along_axis', (['vote_func'], {'axis': '(1)', 'arr': 'preds_r'}), '(vote_func, axis=1, arr=preds_r)\n', (1291, 1323), True, 'import numpy as np\n'), ((905, 923), 'numpy.floor', 'np.floor', (['(reps / 2)'], {}), '(reps / 2)\n', (913, 923), True, 'import numpy as np\n'), ((524, 560), 'numpy.arange', 'np.arange', (['(reps * i)', '(reps * i + reps)'], {}), '(reps * i, reps * i + reps)\n', (533, 560), True, 'import numpy as np\n'), ((651, 687), 'numpy.arange', 'np.arange', (['(reps * i)', '(reps * i + reps)'], {}), '(reps * i, reps * i + reps)\n', (660, 687), True, 'import numpy as np\n')] |
import numpy as np
import pickle
import os
from tqdm import tqdm
import pandas as pd
import argparse
import matplotlib.pyplot as plt
import matplotlib
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 10}
matplotlib.rc('font', **font)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--kmers', type=int, default='7', help='kmer')
opts = parser.parse_args()
return opts
opts=get_args()
nts=[
"A",
"T",
"G",
"C"]
def int2nucleotide(nt_sequence,target_length=None):
seq=''
for nt in nt_sequence:
seq+=nts[nt]
return seq
with open("prediction_dict.p","rb") as f:
prediction_dict=pickle.load(f)
df=pd.DataFrame(columns=['index','sequence'])
def get_kmers(sequence,k):
kmers=[]
for i in range(len(sequence)-k+1):
kmers.append(sequence[i:i+k])
return kmers
os.system('mkdir aw_visualized')
top=10
count=0
sequences=[]
top_kmers=[]
top_k_count=[]
for i in tqdm(range(len(prediction_dict['sequences']))):
count+=1
sequence=int2nucleotide(prediction_dict['sequences'][i])
sequences.append(sequence)
attention_weights=prediction_dict['attention_weights'][i]
ground_truth=prediction_dict['ground_truths'][i]
prediction=prediction_dict['predictions'][i]
kmers=np.asarray(get_kmers(sequence,opts.kmers))
attention_weights=attention_weights[-1].sum(0)
#attention_weights=attention_weights/attention_weights.sum()
# plt.imshow(attention_weights.reshape(1,-1).astype('float32'))
# plt.show()
#exit()
if ground_truth==1:
state='positive'
else:
state='negative'
if ground_truth==prediction:
eval='correct'
else:
eval='wrong'
if state=='positive' and eval=='correct':
sorted_indices=np.argsort(attention_weights)
#print(attention_weights[sorted_indices][-3:])
top_k=kmers[sorted_indices][-3:]
for kmer in top_k:
if kmer not in top_kmers:
top_kmers.append(kmer)
top_k_count.append(1)
else:
top_k_count[top_kmers.index(kmer)]=top_k_count[top_kmers.index(kmer)]+1
#exit()
top_kmers=np.asarray(top_kmers)
top_k_count=np.asarray(top_k_count)
#exit()
top_indices=np.flip(np.argsort(top_k_count))
fig, ax = plt.subplots()
x=np.arange(top)
width=0.4
bar=ax.bar(x,top_k_count[top_indices[:top]],edgecolor='k',linewidth=2)
ax.set_ylabel('Num of appearancesin top 3',fontsize=10)
#ax.set_title('Scores by group and gender')
ax.set_xticks(x)
ax.set_xticklabels(top_kmers[top_indices[:top]])
plt.setp(ax.get_xticklabels(), rotation=30, ha="right",
rotation_mode="anchor")
ax.legend()
plt.savefig('promoter_motifs.eps')
#plt.show()
| [
"pandas.DataFrame",
"matplotlib.rc",
"argparse.ArgumentParser",
"numpy.asarray",
"os.system",
"numpy.argsort",
"pickle.load",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((245, 274), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (258, 274), False, 'import matplotlib\n'), ((730, 773), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['index', 'sequence']"}), "(columns=['index', 'sequence'])\n", (742, 773), True, 'import pandas as pd\n'), ((917, 949), 'os.system', 'os.system', (['"""mkdir aw_visualized"""'], {}), "('mkdir aw_visualized')\n", (926, 949), False, 'import os\n'), ((2284, 2305), 'numpy.asarray', 'np.asarray', (['top_kmers'], {}), '(top_kmers)\n', (2294, 2305), True, 'import numpy as np\n'), ((2319, 2342), 'numpy.asarray', 'np.asarray', (['top_k_count'], {}), '(top_k_count)\n', (2329, 2342), True, 'import numpy as np\n'), ((2415, 2429), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2427, 2429), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2447), 'numpy.arange', 'np.arange', (['top'], {}), '(top)\n', (2442, 2447), True, 'import numpy as np\n'), ((2806, 2840), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""promoter_motifs.eps"""'], {}), "('promoter_motifs.eps')\n", (2817, 2840), True, 'import matplotlib.pyplot as plt\n'), ((308, 333), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (331, 333), False, 'import argparse\n'), ((707, 721), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (718, 721), False, 'import pickle\n'), ((2377, 2400), 'numpy.argsort', 'np.argsort', (['top_k_count'], {}), '(top_k_count)\n', (2387, 2400), True, 'import numpy as np\n'), ((1876, 1905), 'numpy.argsort', 'np.argsort', (['attention_weights'], {}), '(attention_weights)\n', (1886, 1905), True, 'import numpy as np\n')] |
import os
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from setuptools.command.build_py import build_py
try:
from numpy import get_include
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy==1.19.2"])
from numpy import get_include
try:
from Cython.Build import cythonize
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "Cython==0.29.22"])
from Cython.Build import cythonize
class CustomBuild(build_py): # type: ignore
"""Custom build command to build PortAudio."""
def run(self) -> None:
"""Custom run function that builds and installs PortAudio/PyAudio."""
if sys.platform == "mingw":
# build with MinGW for windows
command = ["./configure && make && make install"]
elif sys.platform in ["win32", "win64"]:
# win32/64 users should install the PyAudio wheel or Conda package
command = None
else:
# macos or linux
command = ["./configure && make"]
if command:
# build PortAudio with system specific command
subprocess.run(
command,
shell=True,
check=True,
cwd="spokestack/extensions/portaudio",
)
# install PyAudio after PortAudio has been built
subprocess.run(
[sys.executable, "-m", "pip", "install", "pyaudio"],
shell=True,
check=True,
)
# run the normal build process
build_py.run(self)
SOURCES = [
os.path.join("spokestack/extensions/webrtc", source)
for source in [
"filter_audio/other/complex_bit_reverse.c",
"filter_audio/other/complex_fft.c",
"filter_audio/other/copy_set_operations.c",
"filter_audio/other/cross_correlation.c",
"filter_audio/other/division_operations.c",
"filter_audio/other/dot_product_with_scale.c",
"filter_audio/other/downsample_fast.c",
"filter_audio/other/energy.c",
"filter_audio/other/get_scaling_square.c",
"filter_audio/other/min_max_operations.c",
"filter_audio/other/real_fft.c",
"filter_audio/other/resample_by_2.c",
"filter_audio/other/resample_by_2_internal.c",
"filter_audio/other/resample_fractional.c",
"filter_audio/other/resample_48khz.c",
"filter_audio/other/spl_init.c",
"filter_audio/other/spl_sqrt.c",
"filter_audio/other/spl_sqrt_floor.c",
"filter_audio/other/vector_scaling_operations.c",
"filter_audio/vad/vad_core.c",
"filter_audio/vad/vad_filterbank.c",
"filter_audio/vad/vad_gmm.c",
"filter_audio/vad/vad_sp.c",
"filter_audio/vad/webrtc_vad.c",
"filter_audio/agc/analog_agc.c",
"filter_audio/agc/digital_agc.c",
"filter_audio/ns/nsx_core.c",
"filter_audio/ns/nsx_core_c.c",
"filter_audio/ns/noise_suppression_x.c",
]
]
EXTENSIONS = [
Extension(
"spokestack.extensions.webrtc.agc",
["spokestack/extensions/webrtc/agc.pyx"] + SOURCES,
include_dirs=["filter_audio/agc/include/"],
),
Extension(
"spokestack.extensions.webrtc.nsx",
["spokestack/extensions/webrtc/nsx.pyx"] + SOURCES,
include_dirs=["filter_audio/ns/include/"],
),
Extension(
"spokestack.extensions.webrtc.vad",
["spokestack/extensions/webrtc/vad.pyx"] + SOURCES,
include_dirs=["filter_audio/agc/include/"],
),
]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="spokestack",
version="0.0.23",
author="Spokestack",
author_email="<EMAIL>",
description="Spokestack Library for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/spokestack/spokestack-python",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
setup_requires=["setuptools", "wheel", "numpy==1.19.2", "Cython>=0.29.22"],
install_requires=[
"numpy==1.19.2",
"Cython>=0.29.22",
"websocket_client",
"tokenizers",
"requests",
],
ext_modules=cythonize(EXTENSIONS),
include_dirs=[get_include()],
cmdclass={"build_py": CustomBuild},
zip_safe=False,
)
| [
"subprocess.run",
"setuptools.Extension",
"subprocess.check_call",
"Cython.Build.cythonize",
"numpy.get_include",
"setuptools.command.build_py.build_py.run",
"os.path.join",
"setuptools.find_packages"
] | [((1675, 1727), 'os.path.join', 'os.path.join', (['"""spokestack/extensions/webrtc"""', 'source'], {}), "('spokestack/extensions/webrtc', source)\n", (1687, 1727), False, 'import os\n'), ((3108, 3259), 'setuptools.Extension', 'Extension', (['"""spokestack.extensions.webrtc.agc"""', "(['spokestack/extensions/webrtc/agc.pyx'] + SOURCES)"], {'include_dirs': "['filter_audio/agc/include/']"}), "('spokestack.extensions.webrtc.agc', [\n 'spokestack/extensions/webrtc/agc.pyx'] + SOURCES, include_dirs=[\n 'filter_audio/agc/include/'])\n", (3117, 3259), False, 'from setuptools import Extension, find_packages, setup\n'), ((3286, 3436), 'setuptools.Extension', 'Extension', (['"""spokestack.extensions.webrtc.nsx"""', "(['spokestack/extensions/webrtc/nsx.pyx'] + SOURCES)"], {'include_dirs': "['filter_audio/ns/include/']"}), "('spokestack.extensions.webrtc.nsx', [\n 'spokestack/extensions/webrtc/nsx.pyx'] + SOURCES, include_dirs=[\n 'filter_audio/ns/include/'])\n", (3295, 3436), False, 'from setuptools import Extension, find_packages, setup\n'), ((3463, 3614), 'setuptools.Extension', 'Extension', (['"""spokestack.extensions.webrtc.vad"""', "(['spokestack/extensions/webrtc/vad.pyx'] + SOURCES)"], {'include_dirs': "['filter_audio/agc/include/']"}), "('spokestack.extensions.webrtc.vad', [\n 'spokestack/extensions/webrtc/vad.pyx'] + SOURCES, include_dirs=[\n 'filter_audio/agc/include/'])\n", (3472, 3614), False, 'from setuptools import Extension, find_packages, setup\n'), ((209, 294), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', 'numpy==1.19.2']"], {}), "([sys.executable, '-m', 'pip', 'install', 'numpy==1.19.2']\n )\n", (230, 294), False, 'import subprocess\n'), ((393, 479), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', 'Cython==0.29.22']"], {}), "([sys.executable, '-m', 'pip', 'install',\n 'Cython==0.29.22'])\n", (414, 479), False, 'import subprocess\n'), ((1638, 1656), 'setuptools.command.build_py.build_py.run', 'build_py.run', (['self'], {}), '(self)\n', (1650, 1656), False, 'from setuptools.command.build_py import build_py\n'), ((4024, 4039), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (4037, 4039), False, 'from setuptools import Extension, find_packages, setup\n'), ((4498, 4519), 'Cython.Build.cythonize', 'cythonize', (['EXTENSIONS'], {}), '(EXTENSIONS)\n', (4507, 4519), False, 'from Cython.Build import cythonize\n'), ((1197, 1288), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'check': '(True)', 'cwd': '"""spokestack/extensions/portaudio"""'}), "(command, shell=True, check=True, cwd=\n 'spokestack/extensions/portaudio')\n", (1211, 1288), False, 'import subprocess\n'), ((1436, 1532), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'pip', 'install', 'pyaudio']"], {'shell': '(True)', 'check': '(True)'}), "([sys.executable, '-m', 'pip', 'install', 'pyaudio'], shell=\n True, check=True)\n", (1450, 1532), False, 'import subprocess\n'), ((4539, 4552), 'numpy.get_include', 'get_include', ([], {}), '()\n', (4550, 4552), False, 'from numpy import get_include\n')] |
import sys
import numpy as np
def main() -> int:
a = np.array([[1, 2, ], [3, 4, ]], dtype=np.float32)
print(a)
print("np.min(a, axis=0): ", np.min(a, axis=0))
print("np.max(a, axis=0): ", np.max(a, axis=0))
print("np.min(a, axis=1): ", np.min(a, axis=1))
print("np.max(a, axis=1): ", np.max(a, axis=1))
print("np.mean(a, axis=0): ", np.mean(a, axis=0))
print("np.mean(a, axis=1): ", np.mean(a, axis=1))
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"numpy.mean",
"numpy.min",
"numpy.max",
"numpy.array"
] | [((60, 104), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {'dtype': 'np.float32'}), '([[1, 2], [3, 4]], dtype=np.float32)\n', (68, 104), True, 'import numpy as np\n'), ((156, 173), 'numpy.min', 'np.min', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (162, 173), True, 'import numpy as np\n'), ((208, 225), 'numpy.max', 'np.max', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (214, 225), True, 'import numpy as np\n'), ((261, 278), 'numpy.min', 'np.min', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (267, 278), True, 'import numpy as np\n'), ((313, 330), 'numpy.max', 'np.max', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (319, 330), True, 'import numpy as np\n'), ((367, 385), 'numpy.mean', 'np.mean', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (374, 385), True, 'import numpy as np\n'), ((421, 439), 'numpy.mean', 'np.mean', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (428, 439), True, 'import numpy as np\n')] |
# Copyright 2021 by <NAME>. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Bio.Align.nexus module."""
import unittest
from io import StringIO
from Bio.Align.nexus import AlignmentIterator, AlignmentWriter
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install numpy if you want to use Bio.Align.nexus."
) from None
class TestNexusReading(unittest.TestCase):
def check_reading_writing(self, path):
alignments = AlignmentIterator(path)
stream = StringIO()
writer = AlignmentWriter(stream)
n = writer.write_file(alignments)
self.assertEqual(n, 1)
alignments = AlignmentIterator(path)
alignments = list(alignments)
alignment = alignments[0]
stream.seek(0)
saved_alignments = AlignmentIterator(stream)
saved_alignments = list(saved_alignments)
self.assertEqual(len(alignments), len(saved_alignments))
saved_alignment = saved_alignments[0]
for i, (sequence, saved_sequence) in enumerate(
zip(alignment.sequences, saved_alignment.sequences)
):
self.assertEqual(sequence.id, saved_sequence.id)
self.assertEqual(sequence.seq, saved_sequence.seq)
self.assertEqual(sequence.annotations, saved_sequence.annotations)
self.assertEqual(alignment[i], saved_alignment[i])
self.assertTrue(
numpy.array_equal(alignment.coordinates, saved_alignment.coordinates)
)
def test_nexus1(self):
path = "Nexus/test_Nexus_input.nex"
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignments = list(alignments)
self.assertEqual(len(alignments), 1)
alignment = alignments[0]
self.assertEqual(len(alignment), 9)
self.assertEqual(alignment.shape, (9, 46))
self.assertEqual(alignment.sequences[0].id, "t1")
self.assertEqual(alignment.sequences[1].id, "t2 the name")
self.assertEqual(alignment.sequences[2].id, "isn'that [a] strange name?")
self.assertEqual(
alignment.sequences[3].id, "one should be punished, for (that)!"
)
self.assertEqual(alignment.sequences[4].id, "t5")
self.assertEqual(alignment.sequences[5].id, "t6")
self.assertEqual(alignment.sequences[6].id, "t7")
self.assertEqual(alignment.sequences[7].id, "t8")
self.assertEqual(alignment.sequences[8].id, "t9")
self.assertEqual(alignment.sequences[0].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[1].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[2].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[3].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[4].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[5].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[6].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[7].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[8].annotations, {"molecule_type": "DNA"})
self.assertEqual(
alignment.sequences[0].seq, "ACGTcgtgtgtgctctttacgtgtgtgctcttt"
)
self.assertEqual(alignment.sequences[1].seq, "ACGcTcgtgtctttacacgtgtcttt")
self.assertEqual(alignment.sequences[2].seq, "ACcGcTcgtgtgtgctacacacgtgtgtgct")
self.assertEqual(alignment.sequences[3].seq, "ACGT")
self.assertEqual(
alignment.sequences[4].seq, "AC?GT?acgt???????????acgt????????"
)
self.assertEqual(
alignment.sequences[5].seq, "AcCaGtTc?aaaaaaaaaaacgactac?aaaaaaaaaa"
)
self.assertEqual(
alignment.sequences[6].seq, "A?CGgTgggggggggggggg???gggggggggggggggg"
)
self.assertEqual(
alignment.sequences[7].seq, "AtCtGtTtttttttttttt??ttttttttttttttttttt??"
)
self.assertEqual(
alignment.sequences[8].seq, "cccccccccccccccccccNcccccccccccccccccccccNcc"
)
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array(
[
[
0,
1,
1,
2,
2,
3,
3,
4,
5,
6,
8,
12,
13,
14,
16,
16,
17,
17,
18,
18,
18,
18,
19,
20,
21,
23,
27,
28,
29,
31,
31,
32,
32,
33,
],
[
0,
1,
1,
2,
2,
3,
4,
5,
6,
7,
9,
9,
9,
10,
12,
12,
13,
13,
14,
14,
14,
16,
17,
18,
19,
21,
21,
21,
22,
24,
24,
25,
25,
26,
],
[
0,
1,
1,
2,
3,
4,
5,
6,
7,
8,
10,
14,
15,
16,
16,
16,
16,
16,
16,
16,
18,
20,
21,
22,
23,
25,
29,
30,
31,
31,
31,
31,
31,
31,
],
[
0,
1,
1,
2,
2,
3,
3,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
],
[
0,
1,
1,
2,
3,
4,
4,
5,
6,
6,
8,
12,
12,
13,
15,
15,
16,
17,
18,
18,
20,
20,
20,
21,
21,
23,
27,
27,
28,
30,
30,
31,
32,
33,
],
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
9,
13,
14,
15,
17,
17,
18,
18,
19,
21,
23,
25,
26,
27,
28,
28,
32,
33,
34,
36,
36,
37,
37,
38,
],
[
0,
1,
2,
3,
3,
4,
5,
6,
7,
8,
10,
14,
15,
16,
18,
18,
19,
19,
20,
22,
22,
24,
25,
26,
27,
29,
33,
34,
35,
37,
37,
38,
38,
39,
],
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
11,
15,
16,
17,
19,
19,
20,
20,
21,
23,
25,
27,
28,
29,
30,
32,
36,
37,
38,
40,
40,
41,
41,
42,
],
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
11,
15,
16,
17,
19,
20,
21,
21,
22,
24,
26,
28,
29,
30,
31,
33,
37,
38,
39,
41,
42,
43,
43,
44,
],
]
),
)
)
self.assertEqual(
alignment[0],
"A-C-G-Tcgtgtgtgctct-t-t------acgtgtgtgctct-t-t",
)
self.assertEqual(
alignment[1],
"A-C-GcTcgtg-----tct-t-t----acacgtg-----tct-t-t",
)
self.assertEqual(alignment[2], "A-CcGcTcgtgtgtgct--------acacacgtgtgtgct------")
self.assertEqual(alignment[3], "A-C-G-T---------------------------------------")
self.assertEqual(alignment[4], "A-C?G-T?-acgt??-???-???--??---?-acgt??-???-???")
self.assertEqual(alignment[5], "AcCaGtTc?--aaaaaaaa-a-aacgactac?--aaaaaaaa-a-a")
self.assertEqual(alignment[6], "A?C-GgTgggggggggggg-g-g??--?gggggggggggggg-g-g")
self.assertEqual(alignment[7], "AtCtGtTtttttttttttt-?-?ttttttttttttttttttt-?-?")
self.assertEqual(alignment[8], "cccccccccccccccccccNc-ccccccccccccccccccccNc-c")
self.check_reading_writing(path)
def test_nexus2(self):
path = "Nexus/codonposset.nex"
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignments = list(alignments)
self.assertEqual(len(alignments), 1)
alignment = alignments[0]
self.assertEqual(len(alignment), 2)
self.assertEqual(alignment.shape, (2, 22))
self.assertEqual(alignment.sequences[0].id, "Aegotheles")
self.assertEqual(alignment.sequences[1].id, "Aerodramus")
self.assertEqual(alignment.sequences[0].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[1].annotations, {"molecule_type": "DNA"})
self.assertEqual(alignment.sequences[0].seq, "AAAAAGGCATTGTGGTGGGAAT")
self.assertEqual(alignment.sequences[1].seq, "?????????TTGTGGTGGGAAT")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[0, 22], [0, 22]]))
)
self.assertEqual(alignment[0], "AAAAAGGCATTGTGGTGGGAAT")
self.assertEqual(alignment[1], "?????????TTGTGGTGGGAAT")
self.check_reading_writing(path)
class TestNexusBasic(unittest.TestCase):
def test_empty(self):
import io
stream = io.StringIO()
with self.assertRaisesRegex(ValueError, "Empty file."):
AlignmentIterator(stream)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| [
"unittest.main",
"io.StringIO",
"unittest.TextTestRunner",
"Bio.Align.nexus.AlignmentWriter",
"Bio.MissingPythonDependencyError",
"Bio.Align.nexus.AlignmentIterator",
"numpy.array",
"numpy.array_equal"
] | [((17090, 17126), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (17113, 17126), False, 'import unittest\n'), ((17131, 17163), 'unittest.main', 'unittest.main', ([], {'testRunner': 'runner'}), '(testRunner=runner)\n', (17144, 17163), False, 'import unittest\n'), ((467, 553), 'Bio.MissingPythonDependencyError', 'MissingPythonDependencyError', (['"""Install numpy if you want to use Bio.Align.nexus."""'], {}), "(\n 'Install numpy if you want to use Bio.Align.nexus.')\n", (495, 553), False, 'from Bio import MissingPythonDependencyError\n'), ((682, 705), 'Bio.Align.nexus.AlignmentIterator', 'AlignmentIterator', (['path'], {}), '(path)\n', (699, 705), False, 'from Bio.Align.nexus import AlignmentIterator, AlignmentWriter\n'), ((723, 733), 'io.StringIO', 'StringIO', ([], {}), '()\n', (731, 733), False, 'from io import StringIO\n'), ((751, 774), 'Bio.Align.nexus.AlignmentWriter', 'AlignmentWriter', (['stream'], {}), '(stream)\n', (766, 774), False, 'from Bio.Align.nexus import AlignmentIterator, AlignmentWriter\n'), ((869, 892), 'Bio.Align.nexus.AlignmentIterator', 'AlignmentIterator', (['path'], {}), '(path)\n', (886, 892), False, 'from Bio.Align.nexus import AlignmentIterator, AlignmentWriter\n'), ((1015, 1040), 'Bio.Align.nexus.AlignmentIterator', 'AlignmentIterator', (['stream'], {}), '(stream)\n', (1032, 1040), False, 'from Bio.Align.nexus import AlignmentIterator, AlignmentWriter\n'), ((16932, 16945), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (16943, 16945), False, 'import io\n'), ((1860, 1885), 'Bio.Align.nexus.AlignmentIterator', 'AlignmentIterator', (['stream'], {}), '(stream)\n', (1877, 1885), False, 'from Bio.Align.nexus import AlignmentIterator, AlignmentWriter\n'), ((15829, 15854), 'Bio.Align.nexus.AlignmentIterator', 'AlignmentIterator', (['stream'], {}), '(stream)\n', (15846, 15854), False, 'from Bio.Align.nexus import AlignmentIterator, AlignmentWriter\n'), ((17022, 17047), 'Bio.Align.nexus.AlignmentIterator', 'AlignmentIterator', (['stream'], {}), '(stream)\n', (17039, 17047), False, 'from Bio.Align.nexus import AlignmentIterator, AlignmentWriter\n'), ((1644, 1713), 'numpy.array_equal', 'numpy.array_equal', (['alignment.coordinates', 'saved_alignment.coordinates'], {}), '(alignment.coordinates, saved_alignment.coordinates)\n', (1661, 1713), False, 'import numpy\n'), ((4541, 5741), 'numpy.array', 'numpy.array', (['[[0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 8, 12, 13, 14, 16, 16, 17, 17, 18, 18, 18, \n 18, 19, 20, 21, 23, 27, 28, 29, 31, 31, 32, 32, 33], [0, 1, 1, 2, 2, 3,\n 4, 5, 6, 7, 9, 9, 9, 10, 12, 12, 13, 13, 14, 14, 14, 16, 17, 18, 19, 21,\n 21, 21, 22, 24, 24, 25, 25, 26], [0, 1, 1, 2, 3, 4, 5, 6, 7, 8, 10, 14,\n 15, 16, 16, 16, 16, 16, 16, 16, 18, 20, 21, 22, 23, 25, 29, 30, 31, 31,\n 31, 31, 31, 31], [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,\n 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [0, 1, 1, 2, 3, 4, 4, \n 5, 6, 6, 8, 12, 12, 13, 15, 15, 16, 17, 18, 18, 20, 20, 20, 21, 21, 23,\n 27, 27, 28, 30, 30, 31, 32, 33], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 13, \n 14, 15, 17, 17, 18, 18, 19, 21, 23, 25, 26, 27, 28, 28, 32, 33, 34, 36,\n 36, 37, 37, 38], [0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 10, 14, 15, 16, 18, 18,\n 19, 19, 20, 22, 22, 24, 25, 26, 27, 29, 33, 34, 35, 37, 37, 38, 38, 39],\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 15, 16, 17, 19, 19, 20, 20, 21, 23, \n 25, 27, 28, 29, 30, 32, 36, 37, 38, 40, 40, 41, 41, 42], [0, 1, 2, 3, 4,\n 5, 6, 7, 8, 9, 11, 15, 16, 17, 19, 20, 21, 21, 22, 24, 26, 28, 29, 30, \n 31, 33, 37, 38, 39, 41, 42, 43, 43, 44]]'], {}), '([[0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 8, 12, 13, 14, 16, 16, 17, 17, \n 18, 18, 18, 18, 19, 20, 21, 23, 27, 28, 29, 31, 31, 32, 32, 33], [0, 1,\n 1, 2, 2, 3, 4, 5, 6, 7, 9, 9, 9, 10, 12, 12, 13, 13, 14, 14, 14, 16, 17,\n 18, 19, 21, 21, 21, 22, 24, 24, 25, 25, 26], [0, 1, 1, 2, 3, 4, 5, 6, 7,\n 8, 10, 14, 15, 16, 16, 16, 16, 16, 16, 16, 18, 20, 21, 22, 23, 25, 29, \n 30, 31, 31, 31, 31, 31, 31], [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4, 4,\n 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [0, 1, 1, \n 2, 3, 4, 4, 5, 6, 6, 8, 12, 12, 13, 15, 15, 16, 17, 18, 18, 20, 20, 20,\n 21, 21, 23, 27, 27, 28, 30, 30, 31, 32, 33], [0, 1, 2, 3, 4, 5, 6, 7, 8,\n 9, 9, 13, 14, 15, 17, 17, 18, 18, 19, 21, 23, 25, 26, 27, 28, 28, 32, \n 33, 34, 36, 36, 37, 37, 38], [0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 10, 14, 15,\n 16, 18, 18, 19, 19, 20, 22, 22, 24, 25, 26, 27, 29, 33, 34, 35, 37, 37,\n 38, 38, 39], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 15, 16, 17, 19, 19, 20,\n 20, 21, 23, 25, 27, 28, 29, 30, 32, 36, 37, 38, 40, 40, 41, 41, 42], [0,\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 15, 16, 17, 19, 20, 21, 21, 22, 24, 26, \n 28, 29, 30, 31, 33, 37, 38, 39, 41, 42, 43, 43, 44]])\n', (4552, 5741), False, 'import numpy\n'), ((16613, 16644), 'numpy.array', 'numpy.array', (['[[0, 22], [0, 22]]'], {}), '([[0, 22], [0, 22]])\n', (16624, 16644), False, 'import numpy\n')] |
from data_loader.bw_data_loader import MyDataLoader
from models.bw_model import MyModel
from trainers.my_trainer import MyModelTrainer
from utils.config import process_config
from utils.dirs import create_dirs
from utils.utils import get_args
import numpy as np
from matplotlib import pyplot as plt
plt.ion()
def main():
# capture the config path from the run arguments
# then process the json configuration file
try:
args = get_args()
config = process_config(args.config)
except:
print("missing or invalid arguments")
exit(0)
# create the experiments dirs
create_dirs([config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir])
print('Create the data generator.')
data_loader = MyDataLoader(config)
print('Create the model.')
model = MyModel(config)
print('Create the trainer')
trainer = MyModelTrainer(model.model,
(data_loader.get_train_data(),
data_loader.get_test_data()),
config)
print('Start training the model.')
#trainer.train()
print('Plotting random samples.')
# training samples
fig1 = plt.figure(1)
fig1.clf()
rand_idx = np.random.choice(data_loader.X_train.shape[0], size=4)
y_pred = model.model.predict(data_loader.X_train[rand_idx,:,:,:])
for i, idx in enumerate(rand_idx):
img = data_loader.X_train[idx,:,:,:]
ax = fig1.add_subplot(2,2,i+1)
lm_x_true = data_loader.y_train[idx, 0::2]
lm_y_true = data_loader.y_train[idx, 1::2]
ax.imshow(np.transpose(img, axes=[1,0,2]).squeeze())
ax.plot(lm_x_true*config.data.IMAGE_SIZE,
lm_y_true*config.data.IMAGE_SIZE, 'gx')
lm_x_pred = y_pred[i, 0::2]
lm_y_pred = y_pred[i, 1::2]
ax.plot(lm_x_pred*config.data.IMAGE_SIZE,
lm_y_pred*config.data.IMAGE_SIZE, 'rx')
# print( np.mean((y_pred - y_train[rand_idx, :])**2) )
fig1.savefig('training_samples.png')
# test samples
fig2 = plt.figure(2)
fig2.clf()
rand_idx = np.random.choice(data_loader.X_test.shape[0], size=4)
y_pred = model.model.predict(data_loader.X_test[rand_idx,:,:,:])
for i, idx in enumerate(rand_idx):
img = data_loader.X_test[idx,:,:,:]
ax = fig2.add_subplot(2,2,i+1)
lm_x_true = data_loader.y_test[idx, 0::2]
lm_y_true = data_loader.y_test[idx, 1::2]
ax.imshow(np.transpose(img, axes=[1,0,2]).squeeze())
ax.plot(lm_x_true*config.data.IMAGE_SIZE,
lm_y_true*config.data.IMAGE_SIZE, 'gx')
lm_x_pred = y_pred[i, 0::2]
lm_y_pred = y_pred[i, 1::2]
ax.plot(lm_x_pred*config.data.IMAGE_SIZE,
lm_y_pred*config.data.IMAGE_SIZE, 'rx')
# print( np.mean((y_pred - y_train[rand_idx, :])**2) )
fig2.savefig('test_samples.png')
if __name__ == '__main__':
main()
| [
"numpy.transpose",
"models.bw_model.MyModel",
"utils.dirs.create_dirs",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.figure",
"utils.config.process_config",
"data_loader.bw_data_loader.MyDataLoader",
"numpy.random.choice",
"utils.utils.get_args"
] | [((300, 309), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (307, 309), True, 'from matplotlib import pyplot as plt\n'), ((616, 705), 'utils.dirs.create_dirs', 'create_dirs', (['[config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir]'], {}), '([config.callbacks.tensorboard_log_dir, config.callbacks.\n checkpoint_dir])\n', (627, 705), False, 'from utils.dirs import create_dirs\n'), ((760, 780), 'data_loader.bw_data_loader.MyDataLoader', 'MyDataLoader', (['config'], {}), '(config)\n', (772, 780), False, 'from data_loader.bw_data_loader import MyDataLoader\n'), ((825, 840), 'models.bw_model.MyModel', 'MyModel', (['config'], {}), '(config)\n', (832, 840), False, 'from models.bw_model import MyModel\n'), ((1217, 1230), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1227, 1230), True, 'from matplotlib import pyplot as plt\n'), ((1262, 1316), 'numpy.random.choice', 'np.random.choice', (['data_loader.X_train.shape[0]'], {'size': '(4)'}), '(data_loader.X_train.shape[0], size=4)\n', (1278, 1316), True, 'import numpy as np\n'), ((2107, 2120), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2117, 2120), True, 'from matplotlib import pyplot as plt\n'), ((2152, 2205), 'numpy.random.choice', 'np.random.choice', (['data_loader.X_test.shape[0]'], {'size': '(4)'}), '(data_loader.X_test.shape[0], size=4)\n', (2168, 2205), True, 'import numpy as np\n'), ((447, 457), 'utils.utils.get_args', 'get_args', ([], {}), '()\n', (455, 457), False, 'from utils.utils import get_args\n'), ((475, 502), 'utils.config.process_config', 'process_config', (['args.config'], {}), '(args.config)\n', (489, 502), False, 'from utils.config import process_config\n'), ((1635, 1668), 'numpy.transpose', 'np.transpose', (['img'], {'axes': '[1, 0, 2]'}), '(img, axes=[1, 0, 2])\n', (1647, 1668), True, 'import numpy as np\n'), ((2520, 2553), 'numpy.transpose', 'np.transpose', (['img'], {'axes': '[1, 0, 2]'}), '(img, axes=[1, 0, 2])\n', (2532, 2553), True, 'import numpy as np\n')] |
import logging
import anndata
import igraph as ig
import leidenalg
import numpy as np
import scanpy
from anndata import AnnData
from sklearn.cluster import (DBSCAN, AgglomerativeClustering, Birch, KMeans,
SpectralClustering)
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import kneighbors_graph
from ..log import setup_logger
from ..methods import KMedoids
from ..utils.exceptions import InvalidArgument
from ..utils.validation import _validate_clu_n_clusters
from ._cluster_multiple import cluster_multiple
from ._evaluation import Eval_Silhouette
from ._unit import Unit
from ..methods import knn_auto
default_eval_obj = Eval_Silhouette()
def _get_wrapper(x, obj_def, n_clusters=np.array([2, 4, 8, 16]),
eval_obj=default_eval_obj,
n_jobs=None, attribute_name='n_clusters', **kwargs):
"""
Wrapper function for those classes which specify the number of clusters
in advance and also have fit_predict implemented. Classes include:
KMedoids, KMeans, SpectralClustering, AgglomerativeClustering, Birch.
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
obj_def: object name
Object to be instantiated in this function.
n_clusters: array or int or tuple, dtype int, default [2, 4, 8, 16]
Array containing the different values of clusters to try,
or single int specifying the number of clusters,
or tuple of the form (a, b, c) which specifies a range
for (x=a; x<b; x+=c)
eval_obj: Eval or None, default None
Evaluation object to compare performance of different trials.
n_jobs: int or None, default None
Number of jobs to use if multithreading. See
https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html.
attribute_name: string, default 'n_clusters'
Name of the obj.attribute_name that corresponds to n_clusters.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
# Determine type of n_clusters passed
k = _validate_clu_n_clusters(n_clusters, x.shape[0])
# If n_clusters determined to be single integer
if isinstance(k, int):
logger = setup_logger('Cluster.Single')
kwargs[attribute_name] = k
y = obj_def(**kwargs).fit_predict(x)
if eval_obj is None:
eval_obj = default_eval_obj
score = eval_obj.get(x, y)
logger.info(
"Finished clustering with k={0}. Score={1:.2f}.".format(k,
score))
return y, score
# If n_clusters determined to be a list of integers
elif isinstance(k, (list, np.ndarray)):
return cluster_multiple(
x, obj_def=obj_def, k_list=k, attribute_name=attribute_name,
eval_obj=eval_obj, method_name='fit_predict',
n_jobs=n_jobs, **kwargs)
class Clu_KMedoids(Unit):
"""
See src.methods._k_medoids
"""
def __init__(self, n_clusters=np.array([2, 4, 8, 16]),
eval_obj=default_eval_obj, n_jobs=None, **kwargs):
"""
Parameters
__________
n_clusters: array or int or tuple, dtype int, default [2, 4, 8, 16]
Array containing the different values of clusters to try,
or single int specifying the number of clusters,
or tuple of the form (a, b, c) which specifies a range
for (x=a; x<b; x+=c)
eval_obj: Eval or None, default None
Evaluation object to compare performance of different trials.
n_jobs: int or None, default None
Number of jobs to use if multithreading. See
https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
"""
self.logger = setup_logger('KMedoids')
self.n_clusters = n_clusters
self.eval_obj = eval_obj
self.n_jobs = n_jobs
self.kwargs = kwargs
def get(self, x):
"""
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
self.logger.info("Initializing KMedoids.")
return _get_wrapper(x, obj_def=KMedoids, n_clusters=self.n_clusters,
eval_obj=self.eval_obj, n_jobs=self.n_jobs,
**self.kwargs)
class Clu_KMeans(Unit):
"""
See https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
"""
def __init__(self, n_clusters=np.array([2, 4, 8, 16]),
eval_obj=default_eval_obj, n_jobs=None, **kwargs):
"""
Parameters
__________
n_clusters: array or int or tuple, dtype int, default [2, 4, 8, 16]
Array containing the different values of clusters to try,
or single int specifying the number of clusters,
or tuple of the form (a, b, c) which specifies a range
for (x=a; x<b; x+=c)
eval_obj: Eval or None, default None
Evaluation object to compare performance of different trials.
n_jobs: int or None, default None
Number of jobs to use if multithreading. See
https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
"""
self.logger = setup_logger('KMeans')
self.n_clusters = n_clusters
self.eval_obj = eval_obj
self.n_jobs = n_jobs
self.kwargs = kwargs
def get(self, x):
"""
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
self.logger.info("Initializing KMeans.")
return _get_wrapper(x, obj_def=KMeans, n_clusters=self.n_clusters,
eval_obj=self.eval_obj, n_jobs=self.n_jobs,
**self.kwargs)
class Clu_SpectralClustering(Unit):
"""
See https://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html
"""
def __init__(self, n_clusters=np.array([2, 4, 8, 16]),
eval_obj=default_eval_obj, n_jobs=None, **kwargs):
"""
Parameters
__________
n_clusters: array or int or tuple, dtype int, default [2, 4, 8, 16]
Array containing the different values of clusters to try,
or single int specifying the number of clusters,
or tuple of the form (a, b, c) which specifies a range
for (x=a; x<b; x+=c)
eval_obj: Eval or None, default None
Evaluation object to compare performance of different trials.
n_jobs: int or None, default None
Number of jobs to use if multithreading. See
https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
"""
self.logger = setup_logger('Spectral Clustering')
if 'affinity' not in kwargs:
kwargs['affinity'] = 'nearest_neighbors'
self.n_clusters = n_clusters
self.eval_obj = eval_obj
self.n_jobs = n_jobs
self.kwargs = kwargs
def get(self, x):
"""
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
self.logger.info("Initializing SpectralClustering.")
return _get_wrapper(x, obj_def=SpectralClustering,
n_clusters=self.n_clusters, eval_obj=self.eval_obj,
n_jobs=self.n_jobs, **self.kwargs)
class Clu_Agglomerative(Unit):
"""
See https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html
"""
def __init__(self, n_clusters=np.array([2, 4, 8, 16]),
eval_obj=default_eval_obj, n_jobs=None, **kwargs):
"""
Parameters
__________
n_clusters: array or int or tuple, dtype int, default [2, 4, 8, 16]
Array containing the different values of clusters to try,
or single int specifying the number of clusters,
or tuple of the form (a, b, c) which specifies a range
for (x=a; x<b; x+=c)
eval_obj: Eval or None, default None
Evaluation object to compare performance of different trials.
n_jobs: int or None, default None
Number of jobs to use if multithreading. See
https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
"""
self.logger = setup_logger('Agglomerative')
self.n_clusters = n_clusters
self.eval_obj = eval_obj
self.n_jobs = n_jobs
self.kwargs = kwargs
def get(self, x):
"""
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
self.logger.info("Initializing Agglomerative Clustering.")
return _get_wrapper(x, obj_def=AgglomerativeClustering,
n_clusters=self.n_clusters, eval_obj=self.eval_obj,
n_jobs=self.n_jobs, **self.kwargs)
class Clu_Birch(Unit):
"""
See https://scikit-learn.org/stable/modules/generated/sklearn.cluster.Birch.html
"""
def __init__(self, n_clusters=np.array([2, 4, 8, 16]),
eval_obj=default_eval_obj, n_jobs=None, **kwargs):
"""
Parameters
__________
n_clusters: array or int or tuple, dtype int, default [2, 4, 8, 16]
Array containing the different values of clusters to try,
or single int specifying the number of clusters,
or tuple of the form (a, b, c) which specifies a range
for (x=a; x<b; x+=c)
eval_obj: Eval or None, default None
Evaluation object to compare performance of different trials.
n_jobs: int or None, default None
Number of jobs to use if multithreading. See
https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
"""
self.logger = setup_logger('Birch')
self.n_clusters = n_clusters
self.eval_obj = eval_obj
self.n_jobs = n_jobs
self.kwargs = kwargs
def get(self, x):
"""
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
self.logger.info("Initializing Birch Clustering.")
return _get_wrapper(x, obj_def=Birch, n_clusters=self.n_clusters,
eval_obj=self.eval_obj, n_jobs=self.n_jobs,
**self.kwargs)
class Clu_DBSCAN(Unit):
"""
See https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
"""
def __init__(self, eval_obj=default_eval_obj, n_jobs=None, **kwargs):
"""
Parameters
__________
eval_obj: Eval or None, default None
Evaluation object to evaluate clustering.
n_jobs: Ignored
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
"""
self.logger = setup_logger('DBSCAN')
self.eval_obj = eval_obj
self.kwargs = kwargs
def get(self, x):
"""
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
self.logger.info("Initializing DBSCAN.")
y = DBSCAN(**self.kwargs).fit_predict(x)
unqy = len(np.unique(y))
noise = np.sum(y == -1)
if self.eval_obj is not None:
score = self.eval_obj.get(x, y)
self.logger.info(
"Found {0} labels using DBSCAN.".format(unqy-(noise >= 1)) +
"Score={0:.2f}.".format(score))
else:
self.logger.info("Found {0} labels using DBSCAN.".format(unqy))
self.logger.info(
"Found {0} noisy points. Assigning label -1.".format(noise))
return y, score
class Clu_GaussianMixture(Unit):
"""
See https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html
"""
def __init__(self, n_clusters=np.array([2, 4, 8, 16]),
eval_obj=default_eval_obj, n_jobs=None, **kwargs):
"""
Parameters
__________
n_clusters: array or int or tuple, dtype int, default [2, 4, 8, 16]
Array containing the different values of clusters to try,
or single int specifying the number of clusters,
or tuple of the form (a, b, c) which specifies a range
for (x=a; x<b; x+=c)
eval_obj: Eval or None, default None
Evaluation object to compare performance of different trials.
n_jobs: int or None, default None
Number of jobs to use if multithreading. See
https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
"""
self.logger = setup_logger('GaussianMixture')
self.n_clusters = n_clusters
self.eval_obj = eval_obj
self.n_jobs = n_jobs
self.kwargs = kwargs
def get(self, x):
"""
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
self.logger.info("Initializing Gaussian Mixture Model.")
return _get_wrapper(x, obj_def=GaussianMixture, n_clusters=self.n_clusters,
eval_obj=self.eval_obj, n_jobs=self.n_jobs,
attribute_name='n_components', **self.kwargs)
class Clu_Leiden(Unit):
"""
See https://github.com/vtraag/leidenalg
"""
def __init__(self, n_neighbors=15, n_clusters=None, resolution=1,
eval_obj=default_eval_obj, n_jobs=None,
n_iterations=-1, directed=True, **kwargs):
"""
Parameters
__________
n_neighbors: int
Number of neighbors to use when constructing neighbors graph.
n_clusters: Ignored. Present for consistency.
eval_obj: Ignored. Present for consistency.
n_jobs: Ignored. Present for consistency.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
"""
self.logger = setup_logger('Leiden')
self.n_neighbors = int(n_neighbors)
if self.n_neighbors < 1:
raise InvalidArgument("Invalid number of neighbors.")
self.eval_obj = eval_obj
self.resolution = float(resolution)
self.n_iterations = n_iterations
self.directed = directed
if self.resolution <= 0:
raise InvalidArgument("Invalid resolution.")
self.kwargs = kwargs
def get(self, x):
"""
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
Returns
_______
y: array, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
self.logger.info("Initializing Leiden Clustering.")
try:
import warnings
from numba.errors import NumbaPerformanceWarning
warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
except:
pass
is_anndata = isinstance(x, AnnData)
if not is_anndata:
x_to_use = x
else:
x_to_use = x.obsm['x_emb']
sources, targets, weights = knn_auto(
x_to_use, n_neighbors=self.n_neighbors, mode='distance')
self.logger.info("Constructing neighbors graph.")
gg = ig.Graph(directed=self.directed)
gg.add_vertices(x_to_use.shape[0])
gg.add_edges(list(zip(list(sources), list(targets))))
self.logger.info("Running Leiden clustering.")
part = leidenalg.find_partition(
gg, leidenalg.RBConfigurationVertexPartition,
weights=weights,
n_iterations=self.n_iterations,
resolution_parameter=self.resolution)
return np.array(part.membership).astype(int), 0
| [
"sklearn.cluster.DBSCAN",
"numpy.sum",
"warnings.filterwarnings",
"igraph.Graph",
"leidenalg.find_partition",
"numpy.array",
"numpy.unique"
] | [((736, 759), 'numpy.array', 'np.array', (['[2, 4, 8, 16]'], {}), '([2, 4, 8, 16])\n', (744, 759), True, 'import numpy as np\n'), ((3278, 3301), 'numpy.array', 'np.array', (['[2, 4, 8, 16]'], {}), '([2, 4, 8, 16])\n', (3286, 3301), True, 'import numpy as np\n'), ((5100, 5123), 'numpy.array', 'np.array', (['[2, 4, 8, 16]'], {}), '([2, 4, 8, 16])\n', (5108, 5123), True, 'import numpy as np\n'), ((6940, 6963), 'numpy.array', 'np.array', (['[2, 4, 8, 16]'], {}), '([2, 4, 8, 16])\n', (6948, 6963), True, 'import numpy as np\n'), ((8907, 8930), 'numpy.array', 'np.array', (['[2, 4, 8, 16]'], {}), '([2, 4, 8, 16])\n', (8915, 8930), True, 'import numpy as np\n'), ((10763, 10786), 'numpy.array', 'np.array', (['[2, 4, 8, 16]'], {}), '([2, 4, 8, 16])\n', (10771, 10786), True, 'import numpy as np\n'), ((13542, 13557), 'numpy.sum', 'np.sum', (['(y == -1)'], {}), '(y == -1)\n', (13548, 13557), True, 'import numpy as np\n'), ((14189, 14212), 'numpy.array', 'np.array', (['[2, 4, 8, 16]'], {}), '([2, 4, 8, 16])\n', (14197, 14212), True, 'import numpy as np\n'), ((18048, 18080), 'igraph.Graph', 'ig.Graph', ([], {'directed': 'self.directed'}), '(directed=self.directed)\n', (18056, 18080), True, 'import igraph as ig\n'), ((18257, 18423), 'leidenalg.find_partition', 'leidenalg.find_partition', (['gg', 'leidenalg.RBConfigurationVertexPartition'], {'weights': 'weights', 'n_iterations': 'self.n_iterations', 'resolution_parameter': 'self.resolution'}), '(gg, leidenalg.RBConfigurationVertexPartition,\n weights=weights, n_iterations=self.n_iterations, resolution_parameter=\n self.resolution)\n', (18281, 18423), False, 'import leidenalg\n'), ((13512, 13524), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (13521, 13524), True, 'import numpy as np\n'), ((17608, 17675), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'NumbaPerformanceWarning'}), "('ignore', category=NumbaPerformanceWarning)\n", (17631, 17675), False, 'import warnings\n'), ((13456, 13477), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {}), '(**self.kwargs)\n', (13462, 13477), False, 'from sklearn.cluster import DBSCAN, AgglomerativeClustering, Birch, KMeans, SpectralClustering\n'), ((18480, 18505), 'numpy.array', 'np.array', (['part.membership'], {}), '(part.membership)\n', (18488, 18505), True, 'import numpy as np\n')] |
import os
import numpy as np
try:
import matplotlib.cm as mplcm
from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
pass
import openpifpaf
from .transforms import transform_skeleton
CAR_KEYPOINTS_24 = [
'front_up_right', # 1
'front_up_left', # 2
'front_light_right', # 3
'front_light_left', # 4
'front_low_right', # 5
'front_low_left', # 6
'central_up_left', # 7
'front_wheel_left', # 8
'rear_wheel_left', # 9
'rear_corner_left', # 10
'rear_up_left', # 11
'rear_up_right', # 12
'rear_light_left', # 13
'rear_light_right', # 14
'rear_low_left', # 15
'rear_low_right', # 16
'central_up_right', # 17
'rear_corner_right', # 18
'rear_wheel_right', # 19
'front_wheel_right', # 20
'rear_plate_left', # 21
'rear_plate_right', # 22
'mirror_edge_left', # 23
'mirror_edge_right', # 24
]
SKELETON_ORIG = [
[49, 46], [49, 8], [49, 57], [8, 0], [8, 11], [57, 0],
[57, 52], [0, 5], [52, 5], [5, 7], # frontal
[7, 20], [11, 23], [20, 23], [23, 25], [34, 32],
[9, 11], [9, 7], [9, 20], [7, 0], [9, 0], [9, 8], # L-lat
[24, 33], [24, 25], [24, 11], [25, 32], [25, 28],
[33, 32], [33, 46], [32, 29], [28, 29], # rear
[65, 64], [65, 25], [65, 28], [65, 20], [64, 29],
[64, 32], [64, 37], [29, 37], [28, 20], # new rear
[34, 37], [34, 46], [37, 50], [50, 52], [46, 48], [48, 37],
[48, 49], [50, 57], [48, 57], [48, 50]
]
KPS_MAPPING = [49, 8, 57, 0, 52, 5, 11, 7, 20, 23, 24, 33, 25, 32, 28,
29, 46, 34, 37, 50, 65, 64, 9, 48]
CAR_SKELETON_24 = transform_skeleton(SKELETON_ORIG, KPS_MAPPING)
CAR_SIGMAS_24 = [0.05] * len(KPS_MAPPING)
split, error = divmod(len(CAR_KEYPOINTS_24), 4)
CAR_SCORE_WEIGHTS_24 = [10.0] * split + [3.0] * split + \
[1.0] * split + [0.1] * split + [0.1] * error
assert len(CAR_SCORE_WEIGHTS_24) == len(CAR_KEYPOINTS_24)
HFLIP_24 = {
'front_up_right': 'front_up_left',
'front_light_right': 'front_light_left',
'front_low_right': 'front_low_left',
'central_up_left': 'central_up_right',
'front_wheel_left': 'front_wheel_right',
'rear_wheel_left': 'rear_wheel_right',
'rear_corner_left': 'rear_corner_right',
'rear_up_left': 'rear_up_right',
'rear_light_left': 'rear_light_right',
'rear_low_left': 'rear_low_right',
'front_up_left': 'front_up_right',
'front_light_left': 'front_light_right',
'front_low_left': 'front_low_right',
'central_up_right': 'central_up_left',
'front_wheel_right': 'front_wheel_left',
'rear_wheel_right': 'rear_wheel_left',
'rear_corner_right': 'rear_corner_left',
'rear_up_right': 'rear_up_left',
'rear_light_right': 'rear_light_left',
'rear_low_right': 'rear_low_left',
'rear_plate_left': 'rear_plate_right',
'rear_plate_right': 'rear_plate_left',
'mirror_edge_left': 'mirror_edge_right',
'mirror_edge_right': 'mirror_edge_left'
}
CAR_CATEGORIES_24 = ['car']
p = 0.25
FRONT = -6.0
BACK = 4.5
# CAR POSE is used for joint rescaling. x = [-3, 3] y = [0,4]
CAR_POSE_24 = np.array([
[-2.9, 4.0, FRONT * 0.5], # 'front_up_right', # 1
[2.9, 4.0, FRONT * 0.5], # 'front_up_left', # 2
[-2.0, 2.0, FRONT], # 'front_light_right', # 3
[2.0, 2.0, FRONT], # 'front_light_left', # 4
[-2.5, 0.0, FRONT], # 'front_low_right', # 5
[2.5, 0.0, FRONT], # 'front_low_left', # 6
[2.6, 4.2, 0.0], # 'central_up_left' # 7
[3.2, 0.2, FRONT * 0.7], # 'front_wheel_left', # 8
[3.0, 0.3, BACK * 0.7], # 'rear_wheel_left' # 9
[3.1, 2.1, BACK * 0.5], # 'rear_corner_left', # 10
[2.4, 4.3, BACK * 0.35], # 'rear_up_left', # 11
[-2.4, 4.3, BACK * 0.35], # 'rear_up_right' # 12
[2.5, 2.2, BACK], # 'rear_light_left', # 13
[-2.5, 2.2, BACK], # 'rear_light_right', # 14
[2.1, 0.1, BACK], # 'rear_low_left', # 15
[-2.1, 0.1, BACK], # 'rear_low_right', # 16
[-2.6, 4.2, 0.0], # 'central_up_right' # 17
[-3.1, 2.1, BACK * 0.5], # 'rear_corner_right', # 18
[-3.0, 0.3, BACK * 0.7], # 'rear_wheel_right' # 19
[-3.2, 0.2, FRONT * 0.7], # 'front_wheel_right', # 20
[1.0, 1.3, BACK], # 'rear_plate_left', # 21
[-1.0, 1.3, BACK], # 'rear_plate_right', # 22
[2.8, 3, FRONT * 0.35], # 'mirror_edge_left' # 23
[-2.8, 3, FRONT * 0.35], # 'mirror_edge_right' # 24
])
CAR_POSE_FRONT_24 = np.array([
[-2.0, 4.0, 2.0], # 'front_up_right', # 1
[2.0, 4.0, 2.0], # 'front_up_left', # 2
[-1.3, 2.0, 2.0], # 'front_light_right', # 3
[1.3, 2.0, 2.0], # 'front_light_left', # 4
[-2.2, 0.0, 2.0], # 'front_low_right', # 5
[2.2, 0.0, 2.0], # 'front_low_left', # 6
[2.0 - p / 2, 4.0 + p, 1.0], # 'central_up_left', # 7
[2.0 + p, 0.1 - p / 2, 1.0], # 'front_wheel_left', # 8
[2, 0.1, 0.0], # 'rear_wheel_left', # 9
[2.6, 1.7, 0.0], # 'rear_corner_left', # 10
[2.0, 4.1, 0.0], # 'rear_up_left', # 11
[-2.0, 4.0, 0.0], # 'rear_up_right', # 12
[2.1, 1.9, 0.0], # 'rear_light_left', # 13
[-2.1, 1.9, 0.0], # 'rear_right_right', # 14
[2.4, 0.1, 0.0], # 'rear_low_left', # 15
[-2.4, 0.1, 0.0], # 'rear_low_right', # 16
[-2.0 + p / 2, 4.0 + p, 1.0], # 'central_up_right', # 17
[-2.6, 1.75, 0.0], # 'rear_corner_right', # 18
[-2, 0.0, 0.0], # 'rear_wheel_right', # 19
[-2 - p, 0.0 - p / 2, 1.0], # 'front_wheel_right', # 20
])
CAR_POSE_REAR_24 = np.array([
[-2.0, 4.0, 0.0], # 'front_up_right', # 1
[2.0, 4.0, 0.0], # 'front_up_left', # 2
[-1.3, 2.0, 0.0], # 'front_light_right', # 3
[1.3, 2.0, 0.0], # 'front_light_left', # 4
[-2.2, 0.0, 0.0], # 'front_low_right', # 5
[2.2, 0.0, 0.0], # 'front_low_left', # 6
[-2.0 + p, 4.0 + p, 2.0], # 'central_up_left', # 7
[2, 0.0, 0.0], # 'front_wheel_left', # 8
[2, 0.0, 0.0], # 'rear_wheel_left', # 9
[-1.6 - p, 2.2 - p, 2.0], # 'rear_corner_left', # 10
[-2.0, 4.0, 2.0], # 'rear_up_left', # 11
[2.0, 4.0, 2.0], # 'rear_up_right', # 12
[-1.6, 2.2, 2.0], # 'rear_light_left', # 13
[1.6, 2.2, 2.0], # 'rear_right_right', # 14
[-2.4, 0.0, 2.0], # 'rear_low_left', # 15
[2.4, 0.0, 2.0], # 'rear_low_right', # 16
[2.0 - p, 4.0 + p, 2.0], # 'central_up_right', # 17
[1.6 + p, 2.2 - p, 2.0], # 'rear_corner_right', # 18
[-2, 0.0, 0.0], # 'rear_wheel_right', # 19
[-2, 0.0, 0.0], # 'front_wheel_right', # 20
])
CAR_POSE_LEFT_24 = np.array([
[-2.0, 4.0, 0.0], # 'front_up_right', # 1
[0 - 5 * p, 4.0 - p / 2, 2.0], # 'front_up_left', # 2
[-1.3, 2.0, 0.0], # 'front_light_right', # 3
[1.3, 2.0, 0.0], # 'front_light_left', # 4
[-2.2, 0.0, 0.0], # 'front_low_right', # 5
[-4 - 3 * p, 0.0, 2.0], # 'front_low_left', # 6
[0, 4.0, 2.0], # 'central_up_left', # 7
[-4, 0.0, 2.0], # 'front_wheel_left', # 8
[4, 0.0, 2.0], # 'rear_wheel_left', # 9
[5, 2, 2.0], # 'rear_corner_left', # 10
[0 + 5 * p, 4.0 - p / 2, 2.0], # 'rear_up_left', # 11
[2.0, 4.0, 0.0], # 'rear_up_right', # 12
[5 + p, 2 + p, 1.0], # 'rear_light_left', # 13
[1.6, 2.2, 0.0], # 'rear_right_right', # 14
[-2.4, 0.0, 0.0], # 'rear_low_left', # 15
[2.4, 0.0, 0.0], # 'rear_low_right', # 16
[2.0, 4.0, 0.0], # 'central_up_right', # 17
[1.6, 2.2, 0.0], # 'rear_corner_right', # 18
[-2, 0.0, 0.0], # 'rear_wheel_right', # 19
[-2, 0.0, 0.0], # 'front_wheel_right', # 20
])
CAR_POSE_RIGHT_24 = np.array([
[0 + 5 * p, 4.0 - p / 2, 2.0], # 'front_up_right', # 1
[0, 4.0, 0.0], # 'front_up_left', # 2
[-1.3, 2.0, 0.0], # 'front_light_right', # 3
[1.3, 2.0, 0.0], # 'front_light_left', # 4
[4 + 3 * p, 0.0, 2.0], # 'front_low_right', # 5
[-4 - 3, 0.0, 0.0], # 'front_low_left', # 6
[0, 4.0, 0.0], # 'central_up_left', # 7
[-4, 0.0, 0.0], # 'front_wheel_left', # 8
[4, 0.0, 0.0], # 'rear_wheel_left', # 9
[5, 2, 0.0], # 'rear_corner_left', # 10
[0 + 5, 4.0, 0.0], # 'rear_up_left', # 11
[0 - 5 * p, 4.0 - p / 2, 2.0], # 'rear_up_right', # 12
[5, 2, 0.0], # 'rear_light_left', # 13
[-5 - p, 2.0 + p, 2.0], # 'rear_light_right', # 14
[-2.4, 0.0, 0.0], # 'rear_low_left', # 15
[2.4, 0.0, 0.0], # 'rear_low_right', # 16
[0.0, 4.0, 2.0], # 'central_up_right', # 17
[-5, 2.0, 2.0], # 'rear_corner_right', # 18
[-4, 0.0, 2.0], # 'rear_wheel_right', # 19
[4, 0.0, 2.0], # 'front_wheel_right', # 20
])
CAR_KEYPOINTS_66 = [
"top_left_c_left_front_car_light", # 0
"bottom_left_c_left_front_car_light", # 1
"top_right_c_left_front_car_light", # 2
"bottom_right_c_left_front_car_light", # 3
"top_right_c_left_front_fog_light", # 4
"bottom_right_c_left_front_fog_light", # 5
"front_section_left_front_wheel", # 6
"center_left_front_wheel", # 7
"top_right_c_front_glass", # 8
"top_left_c_left_front_door", # 9
"bottom_left_c_left_front_door", # 10
"top_right_c_left_front_door", # 11
"middle_c_left_front_door", # 12
"front_c_car_handle_left_front_door", # 13
"rear_c_car_handle_left_front_door", # 14
"bottom_right_c_left_front_door", # 15
"top_right_c_left_rear_door", # 16
"front_c_car_handle_left_rear_door", # 17
"rear_c_car_handle_left_rear_door", # 18
"bottom_right_c_left_rear_door", # 19
"center_left_rear_wheel", # 20
"rear_section_left_rear_wheel", # 21
"top_left_c_left_rear_car_light", # 22
"bottom_left_c_left_rear_car_light", # 23
"top_left_c_rear_glass", # 24
"top_right_c_left_rear_car_light", # 25
"bottom_right_c_left_rear_car_light", # 26
"bottom_left_c_trunk", # 27
"Left_c_rear_bumper", # 28
"Right_c_rear_bumper", # 29
"bottom_right_c_trunk", # 30
"bottom_left_c_right_rear_car_light", # 31
"top_left_c_right_rear_car_light", # 32
"top_right_c_rear_glass", # 33
"bottom_right_c_right_rear_car_light", # 34
"top_right_c_right_rear_car_light", # 35
"rear_section_right_rear_wheel", # 36
"center_right_rear_wheel", # 37
"bottom_left_c_right_rear_car_door", # 38
"rear_c_car_handle_right_rear_car_door", # 39
"front_c_car_handle_right_rear_car_door", # 40
"top_left_c_right_rear_car_door", # 41
"bottom_left_c_right_front_car_door", # 42
"rear_c_car_handle_right_front_car_door", # 43
"front_c_car_handle_right_front_car_door", # 44
"middle_c_right_front_car_door", # 45
"top_left_c_right_front_car_door", # 46
"bottom_right_c_right_front_car_door", # 47
"top_right_c_right_front_car_door", # 48
"top_left_c_front_glass", # 49
"center_right_front_wheel", # 50
"front_section_right_front_wheel", # 51
"bottom_left_c_right_fog_light", # 52
"top_left_c_right_fog_light", # 53
"bottom_left_c_right_front_car_light", # 54
"top_left_c_right_front_car_light", # 55
"bottom_right_c_right_front_car_light", # 56
"top_right_c_right_front_car_light", # 57
"top_right_c_front_lplate", # 58
"top_left_c_front_lplate", # 59
"bottom_right_c_front_lplate", # 60
"bottom_left_c_front_lplate", # 61
"top_left_c_rear_lplate", # 62
"top_right_c_rear_lplate", # 63
"bottom_right_c_rear_lplate", # 64
"bottom_left_c_rear_lplate", ] # 65
HFLIP_ids = {
0: 57,
1: 56,
2: 55,
3: 54,
4: 53,
5: 52,
6: 51,
7: 50,
8: 49,
9: 48,
10: 47,
11: 46,
12: 45,
13: 44,
14: 43,
15: 42,
16: 41,
17: 40,
18: 39,
19: 38,
20: 37,
21: 36,
22: 35,
23: 34,
24: 33,
25: 32,
26: 31,
27: 30,
28: 29,
59: 58,
61: 60,
62: 63,
65: 64
}
HFLIP_66 = {}
checklist = []
for ind in HFLIP_ids:
HFLIP_66[CAR_KEYPOINTS_66[ind]] = CAR_KEYPOINTS_66[HFLIP_ids[ind]]
HFLIP_66[CAR_KEYPOINTS_66[HFLIP_ids[ind]]] = CAR_KEYPOINTS_66[ind]
checklist.append(ind)
checklist.append(HFLIP_ids[ind])
assert sorted(checklist) == list(range(len(CAR_KEYPOINTS_66)))
assert len(HFLIP_66) == len(CAR_KEYPOINTS_66)
CAR_CATEGORIES_66 = ['car']
SKELETON_LEFT = [
[59, 61], [59, 1], [61, 5], [0, 1], [0, 2], [2, 3], [3, 1], [3, 4], [4, 5], # front
[5, 6], [6, 7], [4, 7], [2, 9], [9, 8], [8, 11], [7, 10], [6, 10], [9, 10], # side front part
[11, 12], [11, 24], [9, 12], [10, 15], [12, 15],
[9, 13], [13, 14], [14, 12], [14, 15], # side middle part
[24, 16], [12, 16], [12, 17], [17, 18], [18, 16],
[15, 19], [19, 20], [19, 18], [20, 21], [16, 21], # side back part
[16, 22], [21, 28], [22, 23], [23, 28], [22, 25], [25, 26],
[23, 26], [26, 27], [25, 62], [27, 65], [62, 65], [28, 65]]
SKELETON_RIGHT = [[HFLIP_ids[bone[0]], HFLIP_ids[bone[1]]] for bone in SKELETON_LEFT]
SKELETON_CONNECT = [
[28, 29], [62, 63], [65, 64], [24, 33], [46, 11],
[48, 9], [59, 58], [60, 61], [0, 57], [49, 8]]
SKELETON_ALL = SKELETON_LEFT + SKELETON_RIGHT + SKELETON_CONNECT
CAR_SKELETON_66 = [(bone[0] + 1, bone[1] + 1) for bone in SKELETON_ALL] # COCO style skeleton
CAR_SIGMAS_66 = [0.05] * len(CAR_KEYPOINTS_66)
split, error = divmod(len(CAR_KEYPOINTS_66), 4)
CAR_SCORE_WEIGHTS_66 = [10.0] * split + [3.0] * split + \
[1.0] * split + [0.1] * split + [0.1] * error
assert len(CAR_SCORE_WEIGHTS_66) == len(CAR_KEYPOINTS_66)
# number plate offsets
P_X = 0.3
P_Y_TOP = -0.2
P_Y_BOTTOM = -0.4
# z for front
FRONT_Z = -2.0
FRONT_Z_SIDE = -1.8
FRONT_Z_CORNER = -1.7
FRONT_Z_WHEEL = -1.4
FRONT_Z_DOOR = -1.0
# lights x offset
LIGHT_X_INSIDE = 0.8
X_OUTSIDE = 1.0
# y offsets
TOP_CAR = 0.5
BOTTOM_LINE = -0.75
TOP_LINE = 0.1
# z for the back
BACK_Z_WHEEL = 1.0
BACK_Z = 1.5
BACK_Z_SIDE = 1.3
CAR_POSE_HALF = np.array([
[-LIGHT_X_INSIDE, 0.0, FRONT_Z], # 0
[-LIGHT_X_INSIDE, -0.2, FRONT_Z], # 1
[-X_OUTSIDE, 0.0, FRONT_Z_SIDE], # 2
[-X_OUTSIDE, -0.2, FRONT_Z_SIDE], # 3
[-X_OUTSIDE, P_Y_BOTTOM, FRONT_Z_SIDE], # 4
[-X_OUTSIDE, P_Y_BOTTOM - 0.2, FRONT_Z_SIDE], # 5
[-X_OUTSIDE, BOTTOM_LINE, FRONT_Z_CORNER], # 6
[-X_OUTSIDE, BOTTOM_LINE + 0.1, FRONT_Z_WHEEL], # 7
[-X_OUTSIDE + 0.1, TOP_CAR, FRONT_Z_DOOR + 0.5], # 8
[-X_OUTSIDE, TOP_LINE, FRONT_Z_DOOR], # 9
[-X_OUTSIDE, BOTTOM_LINE, FRONT_Z_DOOR], # 10
[-X_OUTSIDE + 0.1, TOP_CAR, 0.1], # 11
[-X_OUTSIDE, TOP_LINE, 0.05], # 12
[-X_OUTSIDE, 0.0, -0.1], # 13
[-X_OUTSIDE, 0.0, 0.0], # 14
[-X_OUTSIDE, BOTTOM_LINE, 0.0], # 15
[-X_OUTSIDE, TOP_LINE, BACK_Z_WHEEL], # 16
[-X_OUTSIDE, 0.0, BACK_Z_WHEEL * 0.8], # 17
[-X_OUTSIDE, 0.0, BACK_Z_WHEEL * 0.9], # 18
[-X_OUTSIDE, BOTTOM_LINE, BACK_Z_WHEEL * 0.6], # 19
[-X_OUTSIDE, BOTTOM_LINE + 0.1, BACK_Z_WHEEL], # 20
[-X_OUTSIDE, BOTTOM_LINE, BACK_Z_SIDE - 0.2], # 21
[-X_OUTSIDE, 0.0, BACK_Z_SIDE], # 22
[-X_OUTSIDE, -0.2, BACK_Z_SIDE], # 23
[-X_OUTSIDE + 0.1, TOP_CAR - 0.1, BACK_Z_WHEEL], # 24
[-LIGHT_X_INSIDE, 0.0, BACK_Z], # 25
[-LIGHT_X_INSIDE, -0.2, BACK_Z], # 26
[-LIGHT_X_INSIDE + 0.1, -0.3, BACK_Z], # 27
[-X_OUTSIDE + 0.1, BOTTOM_LINE, BACK_Z]] + \
[[np.nan, np.nan, np.nan]] * 30 + \
[[-P_X, P_Y_TOP, FRONT_Z]] + \
[[np.nan, np.nan, np.nan]] + \
[[-P_X, P_Y_BOTTOM, FRONT_Z], # 61
[-P_X, P_Y_TOP, BACK_Z]] + \
[[np.nan, np.nan, np.nan]] * 2 + \
[[-P_X, P_Y_BOTTOM, BACK_Z]]) # 65
CAR_POSE_66 = CAR_POSE_HALF
for key in HFLIP_ids:
CAR_POSE_66[HFLIP_ids[key], :] = CAR_POSE_HALF[key, :]
CAR_POSE_66[HFLIP_ids[key], 0] = -CAR_POSE_HALF[key, 0]
assert not np.any(CAR_POSE_66 == np.nan)
training_weights_local_centrality = [
0.890968488270775,
0.716506138617812,
1.05674590410869,
0.764774195768455,
0.637682585483328,
0.686680807728366,
0.955422595797394,
0.936714585642375,
1.34823795445326,
1.38308992581967,
1.32689945125819,
1.38838655605483,
1.18980184904613,
1.02584355494795,
0.90969156732068,
1.24732068576104,
1.11338768064342,
0.933815217550391,
0.852297518872114,
1.04167641424727,
1.01668968075247,
1.34625964088011,
0.911796331039028,
0.866206536337413,
1.55957820407853,
0.730844382675724,
0.651138644197359,
0.758018559633786,
1.31842501396691,
1.32186116654782,
0.744347016851606,
0.636390683664723,
0.715244950821949,
1.63122349407032,
0.849835699185461,
0.910488007220499,
1.44244151650561,
1.14150437331681,
1.19808610191343,
0.960186788642886,
1.05023623286937,
1.19761709710598,
1.3872216313401,
1.01256700741214,
1.1167909667759,
1.27893496336199,
1.54475684725655,
1.40343733870633,
1.45552060866114,
1.47264222155031,
0.970060423999993,
0.944450314768933,
0.623987071240172,
0.5745237907704,
0.66890646050993,
0.978411632994504,
0.587396395188292,
0.76307999741129,
0.609793563449648,
0.67983566494545,
0.685883538168462,
0.753587600664775,
0.770335133588157,
0.764713638033368,
0.792364155965385,
0.796435233566833
]
def get_constants(num_kps):
if num_kps == 24:
CAR_POSE_24[:, 2] = 2.0
return [CAR_KEYPOINTS_24, CAR_SKELETON_24, HFLIP_24, CAR_SIGMAS_24,
CAR_POSE_24, CAR_CATEGORIES_24, CAR_SCORE_WEIGHTS_24]
if num_kps == 66:
CAR_POSE_66[:, 2] = 2.0
return [CAR_KEYPOINTS_66, CAR_SKELETON_66, HFLIP_66, CAR_SIGMAS_66,
CAR_POSE_66, CAR_CATEGORIES_66, CAR_SCORE_WEIGHTS_66]
# using no if-elif-else construction due to pylint no-else-return error
raise Exception("Only poses with 24 or 66 keypoints are available.")
def draw_ann(ann, *, keypoint_painter, filename=None, margin=0.5, aspect=None, **kwargs):
from openpifpaf import show # pylint: disable=import-outside-toplevel
bbox = ann.bbox()
xlim = bbox[0] - margin, bbox[0] + bbox[2] + margin
ylim = bbox[1] - margin, bbox[1] + bbox[3] + margin
if aspect == 'equal':
fig_w = 5.0
else:
fig_w = 5.0 / (ylim[1] - ylim[0]) * (xlim[1] - xlim[0])
with show.canvas(filename, figsize=(fig_w, 5), nomargin=True, **kwargs) as ax:
ax.set_axis_off()
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
if aspect is not None:
ax.set_aspect(aspect)
keypoint_painter.annotation(ax, ann)
def draw_skeletons(pose, sigmas, skel, kps, scr_weights):
from openpifpaf.annotation import Annotation # pylint: disable=import-outside-toplevel
from openpifpaf import show # pylint: disable=import-outside-toplevel
scale = np.sqrt(
(np.max(pose[:, 0]) - np.min(pose[:, 0]))
* (np.max(pose[:, 1]) - np.min(pose[:, 1]))
)
show.KeypointPainter.show_joint_scales = True
keypoint_painter = show.KeypointPainter()
ann = Annotation(keypoints=kps, skeleton=skel, score_weights=scr_weights)
ann.set(pose, np.array(sigmas) * scale)
os.makedirs('docs', exist_ok=True)
draw_ann(ann, filename='docs/skeleton_car.png', keypoint_painter=keypoint_painter)
def plot3d_red(ax_2D, p3d, skeleton):
skeleton = [(bone[0] - 1, bone[1] - 1) for bone in skeleton]
rot_p90_x = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])
p3d = p3d @ rot_p90_x
fig = ax_2D.get_figure()
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
ax.set_axis_off()
ax_2D.set_axis_off()
ax.view_init(azim=-90, elev=20)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
max_range = np.array([p3d[:, 0].max() - p3d[:, 0].min(),
p3d[:, 1].max() - p3d[:, 1].min(),
p3d[:, 2].max() - p3d[:, 2].min()]).max() / 2.0
mid_x = (p3d[:, 0].max() + p3d[:, 0].min()) * 0.5
mid_y = (p3d[:, 1].max() + p3d[:, 1].min()) * 0.5
mid_z = (p3d[:, 2].max() + p3d[:, 2].min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range) # pylint: disable=no-member
for ci, bone in enumerate(skeleton):
c = mplcm.get_cmap('tab20')((ci % 20 + 0.05) / 20) # Same coloring as Pifpaf preds
ax.plot(p3d[bone, 0], p3d[bone, 1], p3d[bone, 2], color=c)
def animate(i):
ax.view_init(elev=10., azim=i)
return fig
return FuncAnimation(fig, animate, frames=360, interval=100)
def print_associations():
print("\nAssociations of the car skeleton with 24 keypoints")
for j1, j2 in CAR_SKELETON_24:
print(CAR_KEYPOINTS_24[j1 - 1], '-', CAR_KEYPOINTS_24[j2 - 1])
print("\nAssociations of the car skeleton with 66 keypoints")
for j1, j2 in CAR_SKELETON_66:
print(CAR_KEYPOINTS_66[j1 - 1], '-', CAR_KEYPOINTS_66[j2 - 1])
def main():
print_associations()
# =============================================================================
# draw_skeletons(CAR_POSE_24, sigmas = CAR_SIGMAS_24, skel = CAR_SKELETON_24,
# kps = CAR_KEYPOINTS_24, scr_weights = CAR_SCORE_WEIGHTS_24)
# draw_skeletons(CAR_POSE_66, sigmas = CAR_SIGMAS_66, skel = CAR_SKELETON_66,
# kps = CAR_KEYPOINTS_66, scr_weights = CAR_SCORE_WEIGHTS_66)
# =============================================================================
with openpifpaf.show.Canvas.blank(nomargin=True) as ax_2D:
anim_66 = plot3d_red(ax_2D, CAR_POSE_66, CAR_SKELETON_66)
anim_66.save('openpifpaf/plugins/apollocar3d/docs/CAR_66_Pose.gif', fps=30)
with openpifpaf.show.Canvas.blank(nomargin=True) as ax_2D:
anim_24 = plot3d_red(ax_2D, CAR_POSE_24, CAR_SKELETON_24)
anim_24.save('openpifpaf/plugins/apollocar3d/docs/CAR_24_Pose.gif', fps=30)
if __name__ == '__main__':
main()
| [
"os.makedirs",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.cm.get_cmap",
"openpifpaf.show.KeypointPainter",
"matplotlib.animation.FuncAnimation",
"numpy.any",
"openpifpaf.show.canvas",
"numpy.max",
"numpy.min",
"numpy.array",
"openpifpaf.show.Canvas.blank",
"openpifpaf.annotation.Annotation"
] | [((3250, 3815), 'numpy.array', 'np.array', (['[[-2.9, 4.0, FRONT * 0.5], [2.9, 4.0, FRONT * 0.5], [-2.0, 2.0, FRONT], [\n 2.0, 2.0, FRONT], [-2.5, 0.0, FRONT], [2.5, 0.0, FRONT], [2.6, 4.2, 0.0\n ], [3.2, 0.2, FRONT * 0.7], [3.0, 0.3, BACK * 0.7], [3.1, 2.1, BACK * \n 0.5], [2.4, 4.3, BACK * 0.35], [-2.4, 4.3, BACK * 0.35], [2.5, 2.2,\n BACK], [-2.5, 2.2, BACK], [2.1, 0.1, BACK], [-2.1, 0.1, BACK], [-2.6, \n 4.2, 0.0], [-3.1, 2.1, BACK * 0.5], [-3.0, 0.3, BACK * 0.7], [-3.2, 0.2,\n FRONT * 0.7], [1.0, 1.3, BACK], [-1.0, 1.3, BACK], [2.8, 3, FRONT * \n 0.35], [-2.8, 3, FRONT * 0.35]]'], {}), '([[-2.9, 4.0, FRONT * 0.5], [2.9, 4.0, FRONT * 0.5], [-2.0, 2.0,\n FRONT], [2.0, 2.0, FRONT], [-2.5, 0.0, FRONT], [2.5, 0.0, FRONT], [2.6,\n 4.2, 0.0], [3.2, 0.2, FRONT * 0.7], [3.0, 0.3, BACK * 0.7], [3.1, 2.1, \n BACK * 0.5], [2.4, 4.3, BACK * 0.35], [-2.4, 4.3, BACK * 0.35], [2.5, \n 2.2, BACK], [-2.5, 2.2, BACK], [2.1, 0.1, BACK], [-2.1, 0.1, BACK], [-\n 2.6, 4.2, 0.0], [-3.1, 2.1, BACK * 0.5], [-3.0, 0.3, BACK * 0.7], [-3.2,\n 0.2, FRONT * 0.7], [1.0, 1.3, BACK], [-1.0, 1.3, BACK], [2.8, 3, FRONT *\n 0.35], [-2.8, 3, FRONT * 0.35]])\n', (3258, 3815), True, 'import numpy as np\n'), ((4767, 5194), 'numpy.array', 'np.array', (['[[-2.0, 4.0, 2.0], [2.0, 4.0, 2.0], [-1.3, 2.0, 2.0], [1.3, 2.0, 2.0], [-\n 2.2, 0.0, 2.0], [2.2, 0.0, 2.0], [2.0 - p / 2, 4.0 + p, 1.0], [2.0 + p,\n 0.1 - p / 2, 1.0], [2, 0.1, 0.0], [2.6, 1.7, 0.0], [2.0, 4.1, 0.0], [-\n 2.0, 4.0, 0.0], [2.1, 1.9, 0.0], [-2.1, 1.9, 0.0], [2.4, 0.1, 0.0], [-\n 2.4, 0.1, 0.0], [-2.0 + p / 2, 4.0 + p, 1.0], [-2.6, 1.75, 0.0], [-2, \n 0.0, 0.0], [-2 - p, 0.0 - p / 2, 1.0]]'], {}), '([[-2.0, 4.0, 2.0], [2.0, 4.0, 2.0], [-1.3, 2.0, 2.0], [1.3, 2.0, \n 2.0], [-2.2, 0.0, 2.0], [2.2, 0.0, 2.0], [2.0 - p / 2, 4.0 + p, 1.0], [\n 2.0 + p, 0.1 - p / 2, 1.0], [2, 0.1, 0.0], [2.6, 1.7, 0.0], [2.0, 4.1, \n 0.0], [-2.0, 4.0, 0.0], [2.1, 1.9, 0.0], [-2.1, 1.9, 0.0], [2.4, 0.1, \n 0.0], [-2.4, 0.1, 0.0], [-2.0 + p / 2, 4.0 + p, 1.0], [-2.6, 1.75, 0.0],\n [-2, 0.0, 0.0], [-2 - p, 0.0 - p / 2, 1.0]])\n', (4775, 5194), True, 'import numpy as np\n'), ((5924, 6333), 'numpy.array', 'np.array', (['[[-2.0, 4.0, 0.0], [2.0, 4.0, 0.0], [-1.3, 2.0, 0.0], [1.3, 2.0, 0.0], [-\n 2.2, 0.0, 0.0], [2.2, 0.0, 0.0], [-2.0 + p, 4.0 + p, 2.0], [2, 0.0, 0.0\n ], [2, 0.0, 0.0], [-1.6 - p, 2.2 - p, 2.0], [-2.0, 4.0, 2.0], [2.0, 4.0,\n 2.0], [-1.6, 2.2, 2.0], [1.6, 2.2, 2.0], [-2.4, 0.0, 2.0], [2.4, 0.0, \n 2.0], [2.0 - p, 4.0 + p, 2.0], [1.6 + p, 2.2 - p, 2.0], [-2, 0.0, 0.0],\n [-2, 0.0, 0.0]]'], {}), '([[-2.0, 4.0, 0.0], [2.0, 4.0, 0.0], [-1.3, 2.0, 0.0], [1.3, 2.0, \n 0.0], [-2.2, 0.0, 0.0], [2.2, 0.0, 0.0], [-2.0 + p, 4.0 + p, 2.0], [2, \n 0.0, 0.0], [2, 0.0, 0.0], [-1.6 - p, 2.2 - p, 2.0], [-2.0, 4.0, 2.0], [\n 2.0, 4.0, 2.0], [-1.6, 2.2, 2.0], [1.6, 2.2, 2.0], [-2.4, 0.0, 2.0], [\n 2.4, 0.0, 2.0], [2.0 - p, 4.0 + p, 2.0], [1.6 + p, 2.2 - p, 2.0], [-2, \n 0.0, 0.0], [-2, 0.0, 0.0]])\n', (5932, 6333), True, 'import numpy as np\n'), ((7047, 7450), 'numpy.array', 'np.array', (['[[-2.0, 4.0, 0.0], [0 - 5 * p, 4.0 - p / 2, 2.0], [-1.3, 2.0, 0.0], [1.3, \n 2.0, 0.0], [-2.2, 0.0, 0.0], [-4 - 3 * p, 0.0, 2.0], [0, 4.0, 2.0], [-4,\n 0.0, 2.0], [4, 0.0, 2.0], [5, 2, 2.0], [0 + 5 * p, 4.0 - p / 2, 2.0], [\n 2.0, 4.0, 0.0], [5 + p, 2 + p, 1.0], [1.6, 2.2, 0.0], [-2.4, 0.0, 0.0],\n [2.4, 0.0, 0.0], [2.0, 4.0, 0.0], [1.6, 2.2, 0.0], [-2, 0.0, 0.0], [-2,\n 0.0, 0.0]]'], {}), '([[-2.0, 4.0, 0.0], [0 - 5 * p, 4.0 - p / 2, 2.0], [-1.3, 2.0, 0.0],\n [1.3, 2.0, 0.0], [-2.2, 0.0, 0.0], [-4 - 3 * p, 0.0, 2.0], [0, 4.0, 2.0\n ], [-4, 0.0, 2.0], [4, 0.0, 2.0], [5, 2, 2.0], [0 + 5 * p, 4.0 - p / 2,\n 2.0], [2.0, 4.0, 0.0], [5 + p, 2 + p, 1.0], [1.6, 2.2, 0.0], [-2.4, 0.0,\n 0.0], [2.4, 0.0, 0.0], [2.0, 4.0, 0.0], [1.6, 2.2, 0.0], [-2, 0.0, 0.0],\n [-2, 0.0, 0.0]])\n', (7055, 7450), True, 'import numpy as np\n'), ((8163, 8567), 'numpy.array', 'np.array', (['[[0 + 5 * p, 4.0 - p / 2, 2.0], [0, 4.0, 0.0], [-1.3, 2.0, 0.0], [1.3, 2.0,\n 0.0], [4 + 3 * p, 0.0, 2.0], [-4 - 3, 0.0, 0.0], [0, 4.0, 0.0], [-4, \n 0.0, 0.0], [4, 0.0, 0.0], [5, 2, 0.0], [0 + 5, 4.0, 0.0], [0 - 5 * p, \n 4.0 - p / 2, 2.0], [5, 2, 0.0], [-5 - p, 2.0 + p, 2.0], [-2.4, 0.0, 0.0\n ], [2.4, 0.0, 0.0], [0.0, 4.0, 2.0], [-5, 2.0, 2.0], [-4, 0.0, 2.0], [4,\n 0.0, 2.0]]'], {}), '([[0 + 5 * p, 4.0 - p / 2, 2.0], [0, 4.0, 0.0], [-1.3, 2.0, 0.0], [\n 1.3, 2.0, 0.0], [4 + 3 * p, 0.0, 2.0], [-4 - 3, 0.0, 0.0], [0, 4.0, 0.0\n ], [-4, 0.0, 0.0], [4, 0.0, 0.0], [5, 2, 0.0], [0 + 5, 4.0, 0.0], [0 - \n 5 * p, 4.0 - p / 2, 2.0], [5, 2, 0.0], [-5 - p, 2.0 + p, 2.0], [-2.4, \n 0.0, 0.0], [2.4, 0.0, 0.0], [0.0, 4.0, 2.0], [-5, 2.0, 2.0], [-4, 0.0, \n 2.0], [4, 0.0, 2.0]])\n', (8171, 8567), True, 'import numpy as np\n'), ((14918, 16325), 'numpy.array', 'np.array', (['([[-LIGHT_X_INSIDE, 0.0, FRONT_Z], [-LIGHT_X_INSIDE, -0.2, FRONT_Z], [-\n X_OUTSIDE, 0.0, FRONT_Z_SIDE], [-X_OUTSIDE, -0.2, FRONT_Z_SIDE], [-\n X_OUTSIDE, P_Y_BOTTOM, FRONT_Z_SIDE], [-X_OUTSIDE, P_Y_BOTTOM - 0.2,\n FRONT_Z_SIDE], [-X_OUTSIDE, BOTTOM_LINE, FRONT_Z_CORNER], [-X_OUTSIDE, \n BOTTOM_LINE + 0.1, FRONT_Z_WHEEL], [-X_OUTSIDE + 0.1, TOP_CAR, \n FRONT_Z_DOOR + 0.5], [-X_OUTSIDE, TOP_LINE, FRONT_Z_DOOR], [-X_OUTSIDE,\n BOTTOM_LINE, FRONT_Z_DOOR], [-X_OUTSIDE + 0.1, TOP_CAR, 0.1], [-\n X_OUTSIDE, TOP_LINE, 0.05], [-X_OUTSIDE, 0.0, -0.1], [-X_OUTSIDE, 0.0, \n 0.0], [-X_OUTSIDE, BOTTOM_LINE, 0.0], [-X_OUTSIDE, TOP_LINE,\n BACK_Z_WHEEL], [-X_OUTSIDE, 0.0, BACK_Z_WHEEL * 0.8], [-X_OUTSIDE, 0.0,\n BACK_Z_WHEEL * 0.9], [-X_OUTSIDE, BOTTOM_LINE, BACK_Z_WHEEL * 0.6], [-\n X_OUTSIDE, BOTTOM_LINE + 0.1, BACK_Z_WHEEL], [-X_OUTSIDE, BOTTOM_LINE, \n BACK_Z_SIDE - 0.2], [-X_OUTSIDE, 0.0, BACK_Z_SIDE], [-X_OUTSIDE, -0.2,\n BACK_Z_SIDE], [-X_OUTSIDE + 0.1, TOP_CAR - 0.1, BACK_Z_WHEEL], [-\n LIGHT_X_INSIDE, 0.0, BACK_Z], [-LIGHT_X_INSIDE, -0.2, BACK_Z], [-\n LIGHT_X_INSIDE + 0.1, -0.3, BACK_Z], [-X_OUTSIDE + 0.1, BOTTOM_LINE,\n BACK_Z]] + [[np.nan, np.nan, np.nan]] * 30 + [[-P_X, P_Y_TOP, FRONT_Z]] +\n [[np.nan, np.nan, np.nan]] + [[-P_X, P_Y_BOTTOM, FRONT_Z], [-P_X,\n P_Y_TOP, BACK_Z]] + [[np.nan, np.nan, np.nan]] * 2 + [[-P_X, P_Y_BOTTOM,\n BACK_Z]])'], {}), '([[-LIGHT_X_INSIDE, 0.0, FRONT_Z], [-LIGHT_X_INSIDE, -0.2, FRONT_Z],\n [-X_OUTSIDE, 0.0, FRONT_Z_SIDE], [-X_OUTSIDE, -0.2, FRONT_Z_SIDE], [-\n X_OUTSIDE, P_Y_BOTTOM, FRONT_Z_SIDE], [-X_OUTSIDE, P_Y_BOTTOM - 0.2,\n FRONT_Z_SIDE], [-X_OUTSIDE, BOTTOM_LINE, FRONT_Z_CORNER], [-X_OUTSIDE, \n BOTTOM_LINE + 0.1, FRONT_Z_WHEEL], [-X_OUTSIDE + 0.1, TOP_CAR, \n FRONT_Z_DOOR + 0.5], [-X_OUTSIDE, TOP_LINE, FRONT_Z_DOOR], [-X_OUTSIDE,\n BOTTOM_LINE, FRONT_Z_DOOR], [-X_OUTSIDE + 0.1, TOP_CAR, 0.1], [-\n X_OUTSIDE, TOP_LINE, 0.05], [-X_OUTSIDE, 0.0, -0.1], [-X_OUTSIDE, 0.0, \n 0.0], [-X_OUTSIDE, BOTTOM_LINE, 0.0], [-X_OUTSIDE, TOP_LINE,\n BACK_Z_WHEEL], [-X_OUTSIDE, 0.0, BACK_Z_WHEEL * 0.8], [-X_OUTSIDE, 0.0,\n BACK_Z_WHEEL * 0.9], [-X_OUTSIDE, BOTTOM_LINE, BACK_Z_WHEEL * 0.6], [-\n X_OUTSIDE, BOTTOM_LINE + 0.1, BACK_Z_WHEEL], [-X_OUTSIDE, BOTTOM_LINE, \n BACK_Z_SIDE - 0.2], [-X_OUTSIDE, 0.0, BACK_Z_SIDE], [-X_OUTSIDE, -0.2,\n BACK_Z_SIDE], [-X_OUTSIDE + 0.1, TOP_CAR - 0.1, BACK_Z_WHEEL], [-\n LIGHT_X_INSIDE, 0.0, BACK_Z], [-LIGHT_X_INSIDE, -0.2, BACK_Z], [-\n LIGHT_X_INSIDE + 0.1, -0.3, BACK_Z], [-X_OUTSIDE + 0.1, BOTTOM_LINE,\n BACK_Z]] + [[np.nan, np.nan, np.nan]] * 30 + [[-P_X, P_Y_TOP, FRONT_Z]] +\n [[np.nan, np.nan, np.nan]] + [[-P_X, P_Y_BOTTOM, FRONT_Z], [-P_X,\n P_Y_TOP, BACK_Z]] + [[np.nan, np.nan, np.nan]] * 2 + [[-P_X, P_Y_BOTTOM,\n BACK_Z]])\n', (14926, 16325), True, 'import numpy as np\n'), ((16752, 16781), 'numpy.any', 'np.any', (['(CAR_POSE_66 == np.nan)'], {}), '(CAR_POSE_66 == np.nan)\n', (16758, 16781), True, 'import numpy as np\n'), ((20011, 20033), 'openpifpaf.show.KeypointPainter', 'show.KeypointPainter', ([], {}), '()\n', (20031, 20033), False, 'from openpifpaf import show\n'), ((20044, 20111), 'openpifpaf.annotation.Annotation', 'Annotation', ([], {'keypoints': 'kps', 'skeleton': 'skel', 'score_weights': 'scr_weights'}), '(keypoints=kps, skeleton=skel, score_weights=scr_weights)\n', (20054, 20111), False, 'from openpifpaf.annotation import Annotation\n'), ((20160, 20194), 'os.makedirs', 'os.makedirs', (['"""docs"""'], {'exist_ok': '(True)'}), "('docs', exist_ok=True)\n", (20171, 20194), False, 'import os\n'), ((20404, 20447), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 0, 1], [0, 1, 0]]'], {}), '([[1, 0, 0], [0, 0, 1], [0, 1, 0]])\n', (20412, 20447), True, 'import numpy as np\n'), ((20513, 20550), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'auto_add_to_figure': '(False)'}), '(fig, auto_add_to_figure=False)\n', (20519, 20550), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((21567, 21620), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'animate'], {'frames': '(360)', 'interval': '(100)'}), '(fig, animate, frames=360, interval=100)\n', (21580, 21620), False, 'from matplotlib.animation import FuncAnimation\n'), ((19314, 19380), 'openpifpaf.show.canvas', 'show.canvas', (['filename'], {'figsize': '(fig_w, 5)', 'nomargin': '(True)'}), '(filename, figsize=(fig_w, 5), nomargin=True, **kwargs)\n', (19325, 19380), False, 'from openpifpaf import show\n'), ((22527, 22570), 'openpifpaf.show.Canvas.blank', 'openpifpaf.show.Canvas.blank', ([], {'nomargin': '(True)'}), '(nomargin=True)\n', (22555, 22570), False, 'import openpifpaf\n'), ((22740, 22783), 'openpifpaf.show.Canvas.blank', 'openpifpaf.show.Canvas.blank', ([], {'nomargin': '(True)'}), '(nomargin=True)\n', (22768, 22783), False, 'import openpifpaf\n'), ((20130, 20146), 'numpy.array', 'np.array', (['sigmas'], {}), '(sigmas)\n', (20138, 20146), True, 'import numpy as np\n'), ((21329, 21352), 'matplotlib.cm.get_cmap', 'mplcm.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (21343, 21352), True, 'import matplotlib.cm as mplcm\n'), ((19838, 19856), 'numpy.max', 'np.max', (['pose[:, 0]'], {}), '(pose[:, 0])\n', (19844, 19856), True, 'import numpy as np\n'), ((19859, 19877), 'numpy.min', 'np.min', (['pose[:, 0]'], {}), '(pose[:, 0])\n', (19865, 19877), True, 'import numpy as np\n'), ((19890, 19908), 'numpy.max', 'np.max', (['pose[:, 1]'], {}), '(pose[:, 1])\n', (19896, 19908), True, 'import numpy as np\n'), ((19911, 19929), 'numpy.min', 'np.min', (['pose[:, 1]'], {}), '(pose[:, 1])\n', (19917, 19929), True, 'import numpy as np\n')] |
# ActivitySim
# See full license in LICENSE.txt.
import sys
import os
import logging
import yaml
import numpy as np
import pandas as pd
from activitysim.abm.models.util import tour_frequency as tf
from activitysim.core.util import reindex
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
logger.addHandler(ch)
CONSTANTS = {}
SURVEY_TOUR_ID = 'survey_tour_id'
SURVEY_PARENT_TOUR_ID = 'survey_parent_tour_id'
SURVEY_PARTICIPANT_ID = 'survey_participant_id'
ASIM_TOUR_ID = 'tour_id'
ASIM_PARENT_TOUR_ID = 'parent_tour_id'
survey_tables = {
'households': {
'file_name': 'survey_households.csv',
'index': 'household_id'
},
'persons': {
'file_name': 'survey_persons.csv',
'index': 'person_id'
},
'tours': {
'file_name': 'survey_tours.csv'
},
'joint_tour_participants': {
'file_name': 'survey_joint_tour_participants.csv'
},
}
outputs = {
'households': 'override_households.csv',
'persons': 'override_persons.csv',
'tours': 'override_tours.csv',
'joint_tour_participants': 'override_joint_tour_participants.csv'
}
control_tables = {
'households': {
'file_name': 'final_households.csv',
'index': 'household_id'
},
'persons': {
'file_name': 'final_persons.csv',
'index': 'person_id'
},
'tours': {
'file_name': 'final_tours.csv'
},
'joint_tour_participants': {
'file_name': 'final_joint_tour_participants.csv'
},
}
apply_controls = True
skip_controls = not apply_controls
def mangle_ids(ids):
return ids * 10
def unmangle_ids(ids):
return ids // 10
def infer_cdap_activity(persons, tours, joint_tour_participants):
mandatory_tour_types = ['work', 'school']
non_mandatory_tour_types = ['escort', 'shopping', 'othmaint', 'othdiscr', 'eatout', 'social']
num_mandatory_tours = \
tours[tours.tour_type.isin(mandatory_tour_types)].\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_non_mandatory_tours = \
tours[tours.tour_type.isin(non_mandatory_tour_types)].\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_joint_tours = \
joint_tour_participants.\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_non_mandatory_tours += num_joint_tours
cdap_activity = pd.Series('H', index=persons.index)
cdap_activity = cdap_activity.where(num_mandatory_tours == 0, 'M')
cdap_activity = cdap_activity.where((cdap_activity == 'M') | (num_non_mandatory_tours == 0), 'N')
return cdap_activity
def infer_mandatory_tour_frequency(persons, tours):
num_work_tours = \
tours[tours.tour_type == 'work'].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
num_school_tours = \
tours[tours.tour_type == 'school'].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
mtf = {
0: '',
1: 'work1',
2: 'work2',
10: 'school1',
20: 'school2',
11: 'work_and_school'
}
mandatory_tour_frequency = (num_work_tours + num_school_tours*10).map(mtf)
return mandatory_tour_frequency
def infer_non_mandatory_tour_frequency(configs_dir, persons, tours):
def read_alts():
# escort,shopping,othmaint,othdiscr,eatout,social
# 0,0,0,0,0,0
# 0,0,0,1,0,0, ...
alts = \
pd.read_csv(os.path.join(configs_dir, 'non_mandatory_tour_frequency_alternatives.csv'),
comment='#')
alts = alts.astype(np.int8) # - NARROW
return alts
tours = tours[tours.tour_category == 'non_mandatory']
alts = read_alts()
tour_types = list(alts.columns.values)
# tour_frequency is index in alts table
alts['alt_id'] = alts.index
# actual tour counts (may exceed counts envisioned by alts)
unconstrained_tour_counts = pd.DataFrame(index=persons.index)
for tour_type in tour_types:
unconstrained_tour_counts[tour_type] = \
tours[tours.tour_type == tour_type].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
# unextend tour counts
# activitysim extend tours counts based on a probability table
# counts can only be extended if original count is between 1 and 4
# and tours can only be extended if their count is at the max possible
max_tour_counts = alts[tour_types].max(axis=0)
constrained_tour_counts = pd.DataFrame(index=persons.index)
for tour_type in tour_types:
constrained_tour_counts[tour_type] = unconstrained_tour_counts[tour_type].clip(upper=max_tour_counts[tour_type])
# persons whose tours were constrained who aren't eligible for extension becuase they have > 4 constrained tours
has_constrained_tours = (unconstrained_tour_counts != constrained_tour_counts).any(axis=1)
print("%s persons with constrained tours" % (has_constrained_tours.sum()))
too_many_tours = has_constrained_tours & constrained_tour_counts.sum(axis=1) > 4
if too_many_tours.any():
print("%s persons with too many tours" % (too_many_tours.sum()))
print(constrained_tour_counts[too_many_tours])
# not sure what to do about this. Throw out some tours? let them through?
print("not sure what to do about this. Throw out some tours? let them through?")
assert False
# determine alt id corresponding to constrained_tour_counts
# need to do index waltz because pd.merge doesn't preserve index in this case
alt_id = \
pd.merge(constrained_tour_counts.reset_index(), alts,
left_on=tour_types, right_on=tour_types, how='left').set_index(persons.index.name).alt_id
# did we end up with any tour frequencies not in alts?
if alt_id.isna().any():
bad_tour_frequencies = alt_id.isna()
logger.warning("WARNING Bad joint tour frequencies\n\n")
logger.warning("\nWARNING Bad non_mandatory tour frequencies: num_tours\n%s" %
constrained_tour_counts[bad_tour_frequencies])
logger.warning("\nWARNING Bad non_mandatory tour frequencies: num_tours\n%s" %
tours[tours.person_id.isin(persons.index[bad_tour_frequencies])].sort_values('person_id'))
bug
tf = unconstrained_tour_counts.rename(columns={tour_type: '_%s' % tour_type for tour_type in tour_types})
tf['non_mandatory_tour_frequency'] = alt_id
return tf
def infer_joint_tour_frequency(configs_dir, households, tours):
def read_alts():
# right now this file just contains the start and end hour
alts = \
pd.read_csv(os.path.join(configs_dir, 'joint_tour_frequency_alternatives.csv'),
comment='#', index_col='alt')
alts = alts.astype(np.int8) # - NARROW
return alts
alts = read_alts()
tour_types = list(alts.columns.values)
assert(len(alts.index[(alts == 0).all(axis=1)]) == 1) # should be one zero_tours alt
zero_tours_alt = alts.index[(alts == 0).all(axis=1)].values[0]
alts['joint_tour_frequency'] = alts.index
joint_tours = tours[tours.tour_category == 'joint']
num_tours = pd.DataFrame(index=households.index)
for tour_type in tour_types:
joint_tour_is_tour_type = (joint_tours.tour_type == tour_type)
if joint_tour_is_tour_type.any():
num_tours[tour_type] = \
joint_tours[joint_tour_is_tour_type].\
groupby('household_id').size().\
reindex(households.index).fillna(0)
else:
logger.warning("WARNING infer_joint_tour_frequency - no tours of type '%s'" % tour_type)
num_tours[tour_type] = 0
num_tours = num_tours.fillna(0).astype(np.int64)
# need to do index waltz because pd.merge doesn't preserve index in this case
jtf = pd.merge(num_tours.reset_index(), alts, left_on=tour_types, right_on=tour_types, how='left').\
set_index(households.index.name)
if jtf.joint_tour_frequency.isna().any():
bad_tour_frequencies = jtf.joint_tour_frequency.isna()
logger.warning("WARNING Bad joint tour frequencies\n\n")
logger.warning("\nWARNING Bad joint tour frequencies: num_tours\n%s" %
num_tours[bad_tour_frequencies])
logger.warning("\nWARNING Bad joint tour frequencies: num_tours\n%s" %
joint_tours[joint_tours.household_id.isin(households.index[bad_tour_frequencies])])
bug
logger.info("infer_joint_tour_frequency: %s households with joint tours",
(jtf.joint_tour_frequency != zero_tours_alt).sum())
return jtf.joint_tour_frequency
def infer_joint_tour_composition(persons, tours, joint_tour_participants):
"""
assign joint_tours a 'composition' column ('adults', 'children', or 'mixed')
depending on the composition of the joint_tour_participants
"""
joint_tours = tours[tours.tour_category == 'joint'].copy()
joint_tour_participants = \
pd.merge(joint_tour_participants, persons,
left_on='person_id', right_index=True, how='left')
# FIXME - computed by asim annotate persons - not needed if embeded in asim and called just-in-time
if 'adult' not in joint_tour_participants:
joint_tour_participants['adult'] = (joint_tour_participants.age >= 18)
tour_has_adults = \
joint_tour_participants[joint_tour_participants.adult]\
.groupby(SURVEY_TOUR_ID).size()\
.reindex(joint_tours[SURVEY_TOUR_ID]).fillna(0) > 0
tour_has_children = \
joint_tour_participants[~joint_tour_participants.adult]\
.groupby([SURVEY_TOUR_ID]).size()\
.reindex(joint_tours[SURVEY_TOUR_ID]).fillna(0) > 0
assert (tour_has_adults | tour_has_children).all()
joint_tours['composition'] = np.where(tour_has_adults, np.where(tour_has_children, 'mixed', 'adults'), 'children')
return joint_tours.composition.reindex(tours.index).fillna('').astype(str)
def infer_tour_scheduling(configs_dir, tours):
# given start and end periods, infer tdd
def read_tdd_alts():
# right now this file just contains the start and end hour
tdd_alts = pd.read_csv(os.path.join(configs_dir, 'tour_departure_and_duration_alternatives.csv'))
tdd_alts['duration'] = tdd_alts.end - tdd_alts.start
tdd_alts = tdd_alts.astype(np.int8) # - NARROW
tdd_alts['tdd'] = tdd_alts.index
return tdd_alts
tdd_alts = read_tdd_alts()
if not tours.start.isin(tdd_alts.start).all():
print(tours[~tours.start.isin(tdd_alts.start)])
assert tours.start.isin(tdd_alts.start).all(), "not all tour starts in tdd_alts"
assert tours.end.isin(tdd_alts.end).all(), "not all tour starts in tdd_alts"
tdds = pd.merge(tours[['start', 'end']], tdd_alts, left_on=['start', 'end'], right_on=['start', 'end'], how='left')
if tdds.tdd.isna().any():
bad_tdds = tours[tdds.tdd.isna()]
print("Bad tour start/end times:")
print(bad_tdds)
bug
# print("tdd_alts\n%s" %tdd_alts, "\n")
# print("tours\n%s" %tours[['start', 'end']])
# print("tdds\n%s" %tdds)
return tdds.tdd
def patch_tour_ids(persons, tours, joint_tour_participants):
def set_tour_index(tours, parent_tour_num_col, is_joint):
group_cols = ['person_id', 'tour_category', 'tour_type']
if 'parent_tour_num' in tours:
group_cols += ['parent_tour_num']
tours['tour_type_num'] = \
tours.sort_values(by=group_cols).groupby(group_cols).cumcount() + 1
return tf.set_tour_index(tours, parent_tour_num_col=parent_tour_num_col, is_joint=is_joint)
assert 'mandatory_tour_frequency' in persons
# replace survey_tour ids with asim standard tour_ids (which are based on person_id and tour_type)
# tours.insert(loc=0, column='legacy_index', value=tours.index)
#####################
# mandatory tours
#####################
mandatory_tours = \
set_tour_index(tours[tours.tour_category == 'mandatory'], parent_tour_num_col=None, is_joint=False)
assert mandatory_tours.index.name == 'tour_id'
#####################
# joint tours
#####################
# joint tours tour_id was assigned based on person_id of the first person in household (PNUM == 1)
# because the actual point person forthe tour is only identified later in joint_tour_participants)
temp_point_persons = persons.loc[persons.PNUM == 1, ['household_id']]
temp_point_persons['person_id'] = temp_point_persons.index
temp_point_persons.set_index('household_id', inplace=True)
# patch person_id with value of temp_point_person_id and use it to set_tour_index
joint_tours = tours[tours.tour_category == 'joint']
joint_tours['cache_point_person_id'] = joint_tours['person_id']
joint_tours['person_id'] = reindex(temp_point_persons.person_id, joint_tours.household_id)
joint_tours = set_tour_index(joint_tours, parent_tour_num_col=None, is_joint=True)
joint_tours['person_id'] = joint_tours['cache_point_person_id']
del joint_tours['cache_point_person_id']
# patch tour_id column in patched_joint_tour_participants
patched_joint_tour_participants = joint_tour_participants.copy()
asim_tour_id = pd.Series(joint_tours.index, index=joint_tours[SURVEY_TOUR_ID])
patched_joint_tour_participants[ASIM_TOUR_ID] = \
reindex(asim_tour_id, patched_joint_tour_participants[SURVEY_TOUR_ID])
#####################
# non_mandatory tours
#####################
non_mandatory_tours = \
set_tour_index(tours[tours.tour_category == 'non_mandatory'], parent_tour_num_col=None, is_joint=False)
#####################
# atwork tours
#####################
atwork_tours = tours[tours.tour_category == 'atwork']
# patch atwork tours parent_tour_id before assigning their tour_id
# tours for workers with both work and school trips should have lower tour_num for work,
# tours for students with both work and school trips should have lower tour_num for school
# tours are already sorted, but schools comes before work (which is alphabetical, not the alternative id order),
# so work_and_school tour_nums are correct for students (school=1, work=2) but workers need to be flipped
mandatory_tour_frequency = \
reindex(persons.mandatory_tour_frequency, mandatory_tours.person_id)
is_worker = \
reindex(persons.pemploy, mandatory_tours.person_id).\
isin([CONSTANTS['PEMPLOY_FULL'], CONSTANTS['PEMPLOY_PART']])
work_and_school_and_worker = (mandatory_tour_frequency == 'work_and_school') & is_worker
# calculate tour_num for work tours (required to set_tour_index for atwork subtours)
parent_tours = mandatory_tours[['survey_tour_id']]
parent_tours['tour_num'] = \
mandatory_tours.\
sort_values(by=['person_id', 'tour_category', 'tour_type']).\
groupby(['person_id', 'tour_category']).cumcount() + 1
parent_tours.tour_num = parent_tours.tour_num.where(~work_and_school_and_worker, 3 - parent_tours.tour_num)
parent_tours = parent_tours.set_index('survey_tour_id', drop=True)
# temporarily add parent_tour_num column to atwork tours, call set_tour_index, and then delete it
atwork_tours['parent_tour_num'] = reindex(parent_tours.tour_num, atwork_tours[SURVEY_PARENT_TOUR_ID])
atwork_tours = set_tour_index(atwork_tours, parent_tour_num_col='parent_tour_num', is_joint=False)
del atwork_tours['parent_tour_num']
# tours['household_id'] = reindex(persons.household_id, tours.person_id)
asim_tour_id = pd.Series(mandatory_tours.index, index=mandatory_tours[SURVEY_TOUR_ID])
atwork_tours[ASIM_PARENT_TOUR_ID] = reindex(asim_tour_id, atwork_tours[SURVEY_PARENT_TOUR_ID])
#####################
# concat tours
#####################
# only true for fake data
assert (mandatory_tours.index == unmangle_ids(mandatory_tours.survey_tour_id)).all()
assert (joint_tours.index == unmangle_ids(joint_tours.survey_tour_id)).all()
assert (non_mandatory_tours.index == unmangle_ids(non_mandatory_tours.survey_tour_id)).all()
patched_tours = pd.concat([mandatory_tours, joint_tours, non_mandatory_tours, atwork_tours])
assert patched_tours.index.name == 'tour_id'
patched_tours = patched_tours.reset_index().rename(columns={'tour_id': ASIM_TOUR_ID})
del patched_tours['tour_type_num']
assert ASIM_TOUR_ID in patched_tours
assert ASIM_PARENT_TOUR_ID in patched_tours
return patched_tours, patched_joint_tour_participants
def infer_atwork_subtour_frequency(configs_dir, tours):
# first column is 'atwork_subtour_frequency' nickname, remaining columns are trip type counts
alts = pd.read_csv(os.path.join(configs_dir, 'atwork_subtour_frequency_alternatives.csv'), comment='#')
tour_types = list(alts.drop(columns=alts.columns[0]).columns) # get trip_types, ignoring first column
alts['alt_id'] = alts.index
# alt eat business maint alt_id
# 0 no_subtours 0 0 0 0
# 1 eat 1 0 0 1
# 2 business1 0 1 0 2
# 3 maint 0 0 1 3
# 4 business2 0 2 0 4
# 5 eat_business 1 1 0 5
work_tours = tours[tours.tour_type == 'work']
work_tours = work_tours[[ASIM_TOUR_ID]]
subtours = tours[tours.tour_category == 'atwork']
subtours = subtours[['tour_id', 'tour_type', 'parent_tour_id']]
# actual tour counts (may exceed counts envisioned by alts)
tour_counts = pd.DataFrame(index=work_tours[ASIM_TOUR_ID])
for tour_type in tour_types:
# count subtours of this type by parent_tour_id
tour_type_count = subtours[subtours.tour_type == tour_type].groupby('parent_tour_id').size()
# backfill with 0 count
tour_counts[tour_type] = tour_type_count.reindex(tour_counts.index).fillna(0).astype(np.int8)
# determine alt id corresponding to constrained_tour_counts
# need to do index waltz because pd.merge doesn't preserve index in this case
tour_counts = \
pd.merge(tour_counts.reset_index(), alts,
left_on=tour_types, right_on=tour_types, how='left').set_index(tour_counts.index.name)
atwork_subtour_frequency = tour_counts.alt
# did we end up with any tour frequencies not in alts?
if atwork_subtour_frequency.isna().any():
bad_tour_frequencies = atwork_subtour_frequency.isna()
logger.warning("WARNING Bad atwork subtour frequencies for %s work tours" % bad_tour_frequencies.sum())
logger.warning("WARNING Bad atwork subtour frequencies: num_tours\n%s" %
tour_counts[bad_tour_frequencies])
logger.warning("WARNING Bad atwork subtour frequencies: num_tours\n%s" %
subtours[subtours.parent_tour_id.isin(tour_counts[bad_tour_frequencies].index)].
sort_values('parent_tour_id'))
bug
atwork_subtour_frequency = reindex(atwork_subtour_frequency, tours[ASIM_TOUR_ID]).fillna('')
return atwork_subtour_frequency
def read_tables(input_dir, tables):
for table, info in tables.items():
table = pd.read_csv(os.path.join(input_dir, info['file_name']), index_col=info.get('index'))
# coerce missing data in string columns to empty strings, not NaNs
for c in table.columns:
# read_csv converts empty string to NaN, even if all non-empty values are strings
if table[c].dtype == 'object':
print("##### converting", c, table[c].dtype)
table[c] = table[c].fillna('').astype(str)
info['table'] = table
households = tables['households'].get('table')
persons = tables['persons'].get('table')
tours = tables['tours'].get('table')
joint_tour_participants = tables['joint_tour_participants'].get('table')
return households, persons, tours, joint_tour_participants
def check_controls(table_name, column_name):
table = survey_tables[table_name].get('table')
c_table = control_tables[table_name].get('table')
if column_name == 'index':
dont_match = (table.index != c_table.index)
else:
dont_match = (table[column_name] != c_table[column_name])
if dont_match.any():
print("check_controls %s.%s: %s out of %s do not match" %
(table_name, column_name, dont_match.sum(), len(table)))
# print("control\n%s" % c_table[dont_match][[column_name]])
# print("survey\n%s" % table[dont_match][[column_name]])
print("control\n%s" % c_table[dont_match][table.columns])
print("survey\n%s" % table[dont_match][table.columns])
return False
return True
def infer(configs_dir, input_dir, output_dir):
households, persons, tours, joint_tour_participants = read_tables(input_dir, survey_tables)
# be explicit about all tour_ids to avoid confusion between asim and survey ids
tours = tours.rename(columns={'tour_id': SURVEY_TOUR_ID, 'parent_tour_id': SURVEY_PARENT_TOUR_ID})
joint_tour_participants = \
joint_tour_participants.rename(columns={'tour_id': SURVEY_TOUR_ID, 'participant_id': SURVEY_PARTICIPANT_ID})
# mangle survey tour ids to keep us honest
tours[SURVEY_TOUR_ID] = mangle_ids(tours[SURVEY_TOUR_ID])
tours[SURVEY_PARENT_TOUR_ID] = mangle_ids(tours[SURVEY_PARENT_TOUR_ID])
joint_tour_participants[SURVEY_TOUR_ID] = mangle_ids(joint_tour_participants[SURVEY_TOUR_ID])
joint_tour_participants[SURVEY_PARTICIPANT_ID] = mangle_ids(joint_tour_participants[SURVEY_PARTICIPANT_ID])
# persons.cdap_activity
persons['cdap_activity'] = infer_cdap_activity(persons, tours, joint_tour_participants)
# check but don't assert as this is not deterministic
skip_controls or check_controls('persons', 'cdap_activity')
# persons.mandatory_tour_frequency
persons['mandatory_tour_frequency'] = infer_mandatory_tour_frequency(persons, tours)
assert skip_controls or check_controls('persons', 'mandatory_tour_frequency')
# persons.non_mandatory_tour_frequency
tour_frequency = infer_non_mandatory_tour_frequency(configs_dir, persons, tours)
for c in tour_frequency.columns:
print("assigning persons", c)
persons[c] = tour_frequency[c]
assert skip_controls or check_controls('persons', 'non_mandatory_tour_frequency')
# patch_tour_ids
tours, joint_tour_participants = patch_tour_ids(persons, tours, joint_tour_participants)
survey_tables['tours']['table'] = tours
survey_tables['joint_tour_participants']['table'] = joint_tour_participants
assert skip_controls or check_controls('tours', 'index')
assert skip_controls or check_controls('joint_tour_participants', 'index')
# households.joint_tour_frequency
households['joint_tour_frequency'] = infer_joint_tour_frequency(configs_dir, households, tours)
assert skip_controls or check_controls('households', 'joint_tour_frequency')
# tours.composition
tours['composition'] = infer_joint_tour_composition(persons, tours, joint_tour_participants)
assert skip_controls or check_controls('tours', 'composition')
# tours.tdd
tours['tdd'] = infer_tour_scheduling(configs_dir, tours)
assert skip_controls or check_controls('tours', 'tdd')
tours['atwork_subtour_frequency'] = infer_atwork_subtour_frequency(configs_dir, tours)
assert skip_controls or check_controls('tours', 'atwork_subtour_frequency')
# write output files
households.to_csv(os.path.join(output_dir, outputs['households']), index=True)
persons.to_csv(os.path.join(output_dir, outputs['persons']), index=True)
tours.to_csv(os.path.join(output_dir, outputs['tours']), index=False)
joint_tour_participants.to_csv(os.path.join(output_dir, outputs['joint_tour_participants']), index=False)
# python infer.py data
args = sys.argv[1:]
assert len(args) == 2, "usage: python infer.py <data_dir> <configs_dir>"
data_dir = args[0]
configs_dir = args[1]
with open(os.path.join(configs_dir, 'constants.yaml')) as stream:
CONSTANTS = yaml.load(stream, Loader=yaml.SafeLoader)
input_dir = os.path.join(data_dir, 'survey_data/')
output_dir = input_dir
if apply_controls:
read_tables(input_dir, control_tables)
infer(configs_dir, input_dir, output_dir)
| [
"pandas.DataFrame",
"yaml.load",
"pandas.merge",
"logging.StreamHandler",
"logging.Formatter",
"activitysim.abm.models.util.tour_frequency.set_tour_index",
"numpy.where",
"pandas.Series",
"activitysim.core.util.reindex",
"os.path.join",
"pandas.concat",
"logging.getLogger"
] | [((252, 279), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'import logging\n'), ((366, 389), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (387, 389), False, 'import logging\n'), ((24632, 24670), 'os.path.join', 'os.path.join', (['data_dir', '"""survey_data/"""'], {}), "(data_dir, 'survey_data/')\n", (24644, 24670), False, 'import os\n'), ((406, 454), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(message)s"""'], {}), "('%(levelname)s - %(message)s')\n", (423, 454), False, 'import logging\n'), ((2612, 2647), 'pandas.Series', 'pd.Series', (['"""H"""'], {'index': 'persons.index'}), "('H', index=persons.index)\n", (2621, 2647), True, 'import pandas as pd\n'), ((4198, 4231), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'persons.index'}), '(index=persons.index)\n', (4210, 4231), True, 'import pandas as pd\n'), ((4775, 4808), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'persons.index'}), '(index=persons.index)\n', (4787, 4808), True, 'import pandas as pd\n'), ((7492, 7528), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'households.index'}), '(index=households.index)\n', (7504, 7528), True, 'import pandas as pd\n'), ((9336, 9434), 'pandas.merge', 'pd.merge', (['joint_tour_participants', 'persons'], {'left_on': '"""person_id"""', 'right_index': '(True)', 'how': '"""left"""'}), "(joint_tour_participants, persons, left_on='person_id', right_index\n =True, how='left')\n", (9344, 9434), True, 'import pandas as pd\n'), ((11114, 11226), 'pandas.merge', 'pd.merge', (["tours[['start', 'end']]", 'tdd_alts'], {'left_on': "['start', 'end']", 'right_on': "['start', 'end']", 'how': '"""left"""'}), "(tours[['start', 'end']], tdd_alts, left_on=['start', 'end'],\n right_on=['start', 'end'], how='left')\n", (11122, 11226), True, 'import pandas as pd\n'), ((13216, 13279), 'activitysim.core.util.reindex', 'reindex', (['temp_point_persons.person_id', 'joint_tours.household_id'], {}), '(temp_point_persons.person_id, joint_tours.household_id)\n', (13223, 13279), False, 'from activitysim.core.util import reindex\n'), ((13632, 13695), 'pandas.Series', 'pd.Series', (['joint_tours.index'], {'index': 'joint_tours[SURVEY_TOUR_ID]'}), '(joint_tours.index, index=joint_tours[SURVEY_TOUR_ID])\n', (13641, 13695), True, 'import pandas as pd\n'), ((13758, 13828), 'activitysim.core.util.reindex', 'reindex', (['asim_tour_id', 'patched_joint_tour_participants[SURVEY_TOUR_ID]'], {}), '(asim_tour_id, patched_joint_tour_participants[SURVEY_TOUR_ID])\n', (13765, 13828), False, 'from activitysim.core.util import reindex\n'), ((14709, 14777), 'activitysim.core.util.reindex', 'reindex', (['persons.mandatory_tour_frequency', 'mandatory_tours.person_id'], {}), '(persons.mandatory_tour_frequency, mandatory_tours.person_id)\n', (14716, 14777), False, 'from activitysim.core.util import reindex\n'), ((15683, 15750), 'activitysim.core.util.reindex', 'reindex', (['parent_tours.tour_num', 'atwork_tours[SURVEY_PARENT_TOUR_ID]'], {}), '(parent_tours.tour_num, atwork_tours[SURVEY_PARENT_TOUR_ID])\n', (15690, 15750), False, 'from activitysim.core.util import reindex\n'), ((15993, 16064), 'pandas.Series', 'pd.Series', (['mandatory_tours.index'], {'index': 'mandatory_tours[SURVEY_TOUR_ID]'}), '(mandatory_tours.index, index=mandatory_tours[SURVEY_TOUR_ID])\n', (16002, 16064), True, 'import pandas as pd\n'), ((16105, 16163), 'activitysim.core.util.reindex', 'reindex', (['asim_tour_id', 'atwork_tours[SURVEY_PARENT_TOUR_ID]'], {}), '(asim_tour_id, atwork_tours[SURVEY_PARENT_TOUR_ID])\n', (16112, 16163), False, 'from activitysim.core.util import reindex\n'), ((16555, 16631), 'pandas.concat', 'pd.concat', (['[mandatory_tours, joint_tours, non_mandatory_tours, atwork_tours]'], {}), '([mandatory_tours, joint_tours, non_mandatory_tours, atwork_tours])\n', (16564, 16631), True, 'import pandas as pd\n'), ((18031, 18075), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'work_tours[ASIM_TOUR_ID]'}), '(index=work_tours[ASIM_TOUR_ID])\n', (18043, 18075), True, 'import pandas as pd\n'), ((24577, 24618), 'yaml.load', 'yaml.load', (['stream'], {'Loader': 'yaml.SafeLoader'}), '(stream, Loader=yaml.SafeLoader)\n', (24586, 24618), False, 'import yaml\n'), ((10179, 10225), 'numpy.where', 'np.where', (['tour_has_children', '"""mixed"""', '"""adults"""'], {}), "(tour_has_children, 'mixed', 'adults')\n", (10187, 10225), True, 'import numpy as np\n'), ((11930, 12019), 'activitysim.abm.models.util.tour_frequency.set_tour_index', 'tf.set_tour_index', (['tours'], {'parent_tour_num_col': 'parent_tour_num_col', 'is_joint': 'is_joint'}), '(tours, parent_tour_num_col=parent_tour_num_col, is_joint=\n is_joint)\n', (11947, 12019), True, 'from activitysim.abm.models.util import tour_frequency as tf\n'), ((17141, 17211), 'os.path.join', 'os.path.join', (['configs_dir', '"""atwork_subtour_frequency_alternatives.csv"""'], {}), "(configs_dir, 'atwork_subtour_frequency_alternatives.csv')\n", (17153, 17211), False, 'import os\n'), ((24012, 24059), 'os.path.join', 'os.path.join', (['output_dir', "outputs['households']"], {}), "(output_dir, outputs['households'])\n", (24024, 24059), False, 'import os\n'), ((24092, 24136), 'os.path.join', 'os.path.join', (['output_dir', "outputs['persons']"], {}), "(output_dir, outputs['persons'])\n", (24104, 24136), False, 'import os\n'), ((24167, 24209), 'os.path.join', 'os.path.join', (['output_dir', "outputs['tours']"], {}), "(output_dir, outputs['tours'])\n", (24179, 24209), False, 'import os\n'), ((24259, 24319), 'os.path.join', 'os.path.join', (['output_dir', "outputs['joint_tour_participants']"], {}), "(output_dir, outputs['joint_tour_participants'])\n", (24271, 24319), False, 'import os\n'), ((24505, 24548), 'os.path.join', 'os.path.join', (['configs_dir', '"""constants.yaml"""'], {}), "(configs_dir, 'constants.yaml')\n", (24517, 24548), False, 'import os\n'), ((3717, 3791), 'os.path.join', 'os.path.join', (['configs_dir', '"""non_mandatory_tour_frequency_alternatives.csv"""'], {}), "(configs_dir, 'non_mandatory_tour_frequency_alternatives.csv')\n", (3729, 3791), False, 'import os\n'), ((6957, 7023), 'os.path.join', 'os.path.join', (['configs_dir', '"""joint_tour_frequency_alternatives.csv"""'], {}), "(configs_dir, 'joint_tour_frequency_alternatives.csv')\n", (6969, 7023), False, 'import os\n'), ((10537, 10610), 'os.path.join', 'os.path.join', (['configs_dir', '"""tour_departure_and_duration_alternatives.csv"""'], {}), "(configs_dir, 'tour_departure_and_duration_alternatives.csv')\n", (10549, 10610), False, 'import os\n'), ((14804, 14855), 'activitysim.core.util.reindex', 'reindex', (['persons.pemploy', 'mandatory_tours.person_id'], {}), '(persons.pemploy, mandatory_tours.person_id)\n', (14811, 14855), False, 'from activitysim.core.util import reindex\n'), ((19472, 19526), 'activitysim.core.util.reindex', 'reindex', (['atwork_subtour_frequency', 'tours[ASIM_TOUR_ID]'], {}), '(atwork_subtour_frequency, tours[ASIM_TOUR_ID])\n', (19479, 19526), False, 'from activitysim.core.util import reindex\n'), ((19681, 19723), 'os.path.join', 'os.path.join', (['input_dir', "info['file_name']"], {}), "(input_dir, info['file_name'])\n", (19693, 19723), False, 'import os\n')] |
import numpy as np
import pygame, OpenGL
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.GLUT.freeglut import *
def setup_lighting():
draw_2side=False
c=[1.0,1.0,1.0]
glColor3fv(c)
mat_specular=[0.18, 0.18, 0.18, 0.18 ]
mat_shininess=[ 64 ]
global_ambient=[ 0.3, 0.3, 0.3, 0.05 ]
light0_ambient=[ 0, 0, 0, 0 ]
light0_diffuse=[ 0.85, 0.85, 0.8, 0.85 ]
light1_diffuse=[-0.01, -0.01, -0.03, -0.03 ]
light0_specular=[ 0.85, 0.85, 0.85, 0.85 ]
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, mat_specular)
glMaterialfv(GL_FRONT_AND_BACK, GL_SHININESS, mat_shininess)
glLightfv(GL_LIGHT0, GL_AMBIENT, light0_ambient)
glLightfv(GL_LIGHT0, GL_DIFFUSE, light0_diffuse)
glLightfv(GL_LIGHT0, GL_SPECULAR, light0_specular)
glLightfv(GL_LIGHT1, GL_DIFFUSE, light1_diffuse)
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, global_ambient)
glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_FALSE)
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, draw_2side)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_NORMALIZE)
class camera():
class Ortho:
# left, right, bottom, top, near, far
params=np.array([-1, 1, -1, 1, 1, -1], np.float32)
bbox=params[0:4]
nf=params[4:] # near far
| [
"numpy.array"
] | [((1399, 1442), 'numpy.array', 'np.array', (['[-1, 1, -1, 1, 1, -1]', 'np.float32'], {}), '([-1, 1, -1, 1, 1, -1], np.float32)\n', (1407, 1442), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- animation -*-
"""
Animation of a double pendulum
"""
from numpy import sin, cos, pi, array
import time
import gr
try:
from time import perf_counter
except ImportError:
from time import clock as perf_counter
g = 9.8 # gravitational constant
def rk4(x, h, y, f):
k1 = h * f(x, y)
k2 = h * f(x + 0.5 * h, y + 0.5 * k1)
k3 = h * f(x + 0.5 * h, y + 0.5 * k2)
k4 = h * f(x + h, y + k3)
return x + h, y + (k1 + 2 * (k2 + k3) + k4) / 6.0
def pendulum_derivs(t, state):
# The following derivation is from:
# http://scienceworld.wolfram.com/physics/DoublePendulum.html
t1, w1, t2, w2 = state
a = (m1 + m2) * l1
b = m2 * l2 * cos(t1 - t2)
c = m2 * l1 * cos(t1 - t2)
d = m2 * l2
e = -m2 * l2 * w2**2 * sin(t1 - t2) - g * (m1 + m2) * sin(t1)
f = m2 * l1 * w1**2 * sin(t1 - t2) - m2 * g * sin(t2)
return array([w1, (e*d-b*f) / (a*d-c*b), w2, (a*f-c*e) / (a*d-c*b)])
def pendulum(theta, length, mass):
l = length[0] + length[1]
gr.clearws()
gr.setviewport(0, 1, 0, 1)
gr.setwindow(-l, l, -l, l)
gr.setmarkertype(gr.MARKERTYPE_SOLID_CIRCLE)
gr.setmarkercolorind(86)
pivot = [0, 0.775] # draw pivot point
gr.fillarea([-0.2, 0.2, 0.2, -0.2], [0.75, 0.75, 0.8, 0.8])
for i in range(2):
x = [pivot[0], pivot[0] + sin(theta[i]) * length[i]]
y = [pivot[1], pivot[1] - cos(theta[i]) * length[i]]
gr.polyline(x, y) # draw rod
gr.setmarkersize(3 * mass[i])
gr.polymarker([x[1]], [y[1]]) # draw bob
pivot = [x[1], y[1]]
gr.updatews()
return
l1 = 1.2 # length of rods
l2 = 1.0
m1 = 1.0 # weights of bobs
m2 = 1.5
t1 = 100.0 # inintial angles
t2 = -20.0
w1 = 0.0
w2 = 0.0
t = 0
dt = 0.04
state = array([t1, w1, t2, w2]) * pi / 180
now = perf_counter()
while t < 30:
start = now
t, state = rk4(t, dt, state, pendulum_derivs)
t1, w1, t2, w2 = state
pendulum([t1, t2], [l1, l2], [m1, m2])
now = perf_counter()
if start + dt > now:
time.sleep(start + dt - now)
| [
"gr.updatews",
"gr.setviewport",
"gr.setmarkertype",
"gr.polyline",
"time.clock",
"time.sleep",
"gr.setwindow",
"gr.fillarea",
"gr.polymarker",
"numpy.sin",
"numpy.array",
"gr.clearws",
"numpy.cos",
"gr.setmarkersize",
"gr.setmarkercolorind"
] | [((1878, 1892), 'time.clock', 'perf_counter', ([], {}), '()\n', (1890, 1892), True, 'from time import clock as perf_counter\n'), ((899, 988), 'numpy.array', 'array', (['[w1, (e * d - b * f) / (a * d - c * b), w2, (a * f - c * e) / (a * d - c * b)]'], {}), '([w1, (e * d - b * f) / (a * d - c * b), w2, (a * f - c * e) / (a * d -\n c * b)])\n', (904, 988), False, 'from numpy import sin, cos, pi, array\n'), ((1031, 1043), 'gr.clearws', 'gr.clearws', ([], {}), '()\n', (1041, 1043), False, 'import gr\n'), ((1048, 1074), 'gr.setviewport', 'gr.setviewport', (['(0)', '(1)', '(0)', '(1)'], {}), '(0, 1, 0, 1)\n', (1062, 1074), False, 'import gr\n'), ((1079, 1105), 'gr.setwindow', 'gr.setwindow', (['(-l)', 'l', '(-l)', 'l'], {}), '(-l, l, -l, l)\n', (1091, 1105), False, 'import gr\n'), ((1110, 1154), 'gr.setmarkertype', 'gr.setmarkertype', (['gr.MARKERTYPE_SOLID_CIRCLE'], {}), '(gr.MARKERTYPE_SOLID_CIRCLE)\n', (1126, 1154), False, 'import gr\n'), ((1159, 1183), 'gr.setmarkercolorind', 'gr.setmarkercolorind', (['(86)'], {}), '(86)\n', (1179, 1183), False, 'import gr\n'), ((1254, 1313), 'gr.fillarea', 'gr.fillarea', (['[-0.2, 0.2, 0.2, -0.2]', '[0.75, 0.75, 0.8, 0.8]'], {}), '([-0.2, 0.2, 0.2, -0.2], [0.75, 0.75, 0.8, 0.8])\n', (1265, 1313), False, 'import gr\n'), ((1640, 1653), 'gr.updatews', 'gr.updatews', ([], {}), '()\n', (1651, 1653), False, 'import gr\n'), ((2056, 2070), 'time.clock', 'perf_counter', ([], {}), '()\n', (2068, 2070), True, 'from time import clock as perf_counter\n'), ((703, 715), 'numpy.cos', 'cos', (['(t1 - t2)'], {}), '(t1 - t2)\n', (706, 715), False, 'from numpy import sin, cos, pi, array\n'), ((734, 746), 'numpy.cos', 'cos', (['(t1 - t2)'], {}), '(t1 - t2)\n', (737, 746), False, 'from numpy import sin, cos, pi, array\n'), ((1467, 1484), 'gr.polyline', 'gr.polyline', (['x', 'y'], {}), '(x, y)\n', (1478, 1484), False, 'import gr\n'), ((1522, 1551), 'gr.setmarkersize', 'gr.setmarkersize', (['(3 * mass[i])'], {}), '(3 * mass[i])\n', (1538, 1551), False, 'import gr\n'), ((1560, 1589), 'gr.polymarker', 'gr.polymarker', (['[x[1]]', '[y[1]]'], {}), '([x[1]], [y[1]])\n', (1573, 1589), False, 'import gr\n'), ((1836, 1859), 'numpy.array', 'array', (['[t1, w1, t2, w2]'], {}), '([t1, w1, t2, w2])\n', (1841, 1859), False, 'from numpy import sin, cos, pi, array\n'), ((2104, 2132), 'time.sleep', 'time.sleep', (['(start + dt - now)'], {}), '(start + dt - now)\n', (2114, 2132), False, 'import time\n'), ((790, 802), 'numpy.sin', 'sin', (['(t1 - t2)'], {}), '(t1 - t2)\n', (793, 802), False, 'from numpy import sin, cos, pi, array\n'), ((821, 828), 'numpy.sin', 'sin', (['t1'], {}), '(t1)\n', (824, 828), False, 'from numpy import sin, cos, pi, array\n'), ((856, 868), 'numpy.sin', 'sin', (['(t1 - t2)'], {}), '(t1 - t2)\n', (859, 868), False, 'from numpy import sin, cos, pi, array\n'), ((880, 887), 'numpy.sin', 'sin', (['t2'], {}), '(t2)\n', (883, 887), False, 'from numpy import sin, cos, pi, array\n'), ((1371, 1384), 'numpy.sin', 'sin', (['theta[i]'], {}), '(theta[i])\n', (1374, 1384), False, 'from numpy import sin, cos, pi, array\n'), ((1432, 1445), 'numpy.cos', 'cos', (['theta[i]'], {}), '(theta[i])\n', (1435, 1445), False, 'from numpy import sin, cos, pi, array\n')] |
import networkx as nx
import numpy as np
from scipy.optimize import minimize
from cirq import PauliString, Pauli, Simulator, GridQubit
from .qaoa import QAOA
from .pauli_operations import CirqPauliSum, add_pauli_strings
def print_fun(x):
print(x)
class CirqMaxCutSolver:
"""
CirqMaxCutSolver creates the cost operators and the mixing operators for the input graph
and returns a QAOA object that solves the Maxcut problem for the input graph
Parameters
----------
steps : (int) number of mixing and cost function steps to use. Default=1
qubit_pairs : (list of GridQubit pairs) represents the edges of the graph on which Maxcut
is to be solved
minimizer_kwargs : (optional) (dict) arguments to pass to the minimizer. Default={}.
vqe_option : (optional) arguments for VQE run.
"""
def __init__(self, qubit_pairs, steps=1, minimizer_kwargs=None,
vqe_option=None):
self.steps = steps
self.graph = self.create_input_graph(qubit_pairs=qubit_pairs)
self.cost_operators = self.create_cost_operators()
self.driver_operators = self.create_driver_operators()
self.minimizer_kwargs = minimizer_kwargs or {'method': 'Nelder-Mead',
'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2,
'disp': False}}
self.vqe_option = vqe_option or {'disp': print_fun, 'return_all': True}
def create_input_graph(self, qubit_pairs):
"""
Creates graph from list of GridQubit pairs
Parameters
----------
qubit_pairs : (list of GridQubit pairs) representing edges of the graph to be constructed
Returns
-------
graph : (Graph object) represents the graph containing edges defined in qubit_pairs
"""
if not isinstance(qubit_pairs, nx.Graph) and isinstance(qubit_pairs, list):
maxcut_graph = nx.Graph()
for qubit_pair in qubit_pairs:
maxcut_graph.add_edge(*qubit_pair)
graph = maxcut_graph.copy()
return graph
def create_cost_operators(self):
"""
Creates family of phase separation operators that depend on the objective function to be optimized
Returns
-------
cost_operators : (list) cost clauses for the graph on which Maxcut needs to be solved
"""
cost_operators = []
for i, j in self.graph.edges():
qubit_map_i = {i: Pauli.by_index(2)}
qubit_map_j = {j: Pauli.by_index(2)}
pauli_z_term = PauliString(
qubit_map_i, coefficient=0.5)*PauliString(qubit_map_j)
pauli_identity_term = PauliString(coefficient=-0.5)
cost_pauli_sum = add_pauli_strings(
[pauli_z_term, pauli_identity_term])
cost_operators.append(cost_pauli_sum)
return cost_operators
def create_driver_operators(self):
"""
Creates family of mixing operators that depend on the domain of the problem and its structure
Returns
-------
driver_operators : (list) mixing clauses for the graph on which Maxcut needs to be solved
"""
driver_operators = []
for i in self.graph.nodes():
qubit_map_i = {i: Pauli.by_index(0)}
driver_operators.append(CirqPauliSum(
[PauliString(qubit_map_i, coefficient=-1.0)]))
return driver_operators
def solve_max_cut_qaoa(self):
"""
Initialzes a QAOA object with the required information for performing Maxcut on the input graph
Returns
-------
qaoa_inst : (QAOA object) represents all information for running the QAOA algorthm to find the
ground state of the list of cost clauses.
"""
qaoa_inst = QAOA(list(self.graph.nodes()), steps=self.steps, cost_ham=self.cost_operators,
ref_ham=self.driver_operators, minimizer=minimize,
minimizer_kwargs=self.minimizer_kwargs,
vqe_options=self.vqe_option)
return qaoa_inst
def define_grid_qubits(size=2):
"""
Defines qubits on a square grid of given size
Parameters
----------
size : (int) size of the grid. Default=2 ,i.e, a grid containing four qubits
(0,0), (0,1), (1,0) and (1,1)
Returns
-------
a list of GridQubits defined on a grid of given size
"""
return [GridQubit(i, j) for i in range(size) for j in range(size)]
def define_graph(qubits=[(GridQubit(0, 0), GridQubit(0, 1))], number_of_vertices=2):
"""
Creates a cycle graph as a list of GridQubit pairs for the given number of vertices
Parameters
----------
qubits : (list of GridQubits). Default is
one pair of qubits (0,0) and (0,1) representing a
two vertex graph
number_of_vertices : (int) number of vertices the cycle graph must contain.
Default=2
Returns
-------
a list of GridQubit pairs representing a cycle graph containing the given number of vertices
"""
if len(qubits) == 1:
return qubits
return [(qubits[i % number_of_vertices], qubits[(i+1) % number_of_vertices]) for i in range(number_of_vertices)]
def display_maxcut_results(qaoa_instance, maxcut_result):
"""
Displays results in the form of states and corresponding probabilities from solving
the maxcut problem using QAOA represented by the input qaoa_instance
Parameters
----------
qaoa_instance : (QAOA object) contains all information about the problem instance on which
QAOA is to be applied
maxcut_result : (SimulationTrialResults object) obtained from solving the maxcut problem on an input graph
"""
print("State\tProbability")
for state_index in range(qaoa_instance.number_of_states):
print(qaoa_instance.states[state_index], "\t", np.conj(
maxcut_result.final_state[state_index])*maxcut_result.final_state[state_index])
def solve_maxcut(qubit_pairs, steps=1):
"""
Solves the maxcut problem on the input graph
Parameters
----------
qubit_pairs : (list of GridQubit pairs) represents the graph on which maxcut is to be solved
steps : (int) number of mixing and cost function steps to use. Default=1
"""
cirqMaxCutSolver = CirqMaxCutSolver(
qubit_pairs=qubit_pairs, steps=steps)
qaoa_instance = cirqMaxCutSolver.solve_max_cut_qaoa()
betas, gammas = qaoa_instance.get_angles()
t = np.hstack((betas, gammas))
param_circuit = qaoa_instance.get_parameterized_circuit()
circuit = param_circuit(t)
sim = Simulator()
result = sim.simulate(circuit)
display_maxcut_results(qaoa_instance, result)
| [
"numpy.conj",
"cirq.PauliString",
"cirq.GridQubit",
"cirq.Simulator",
"numpy.hstack",
"networkx.Graph",
"cirq.Pauli.by_index"
] | [((7135, 7161), 'numpy.hstack', 'np.hstack', (['(betas, gammas)'], {}), '((betas, gammas))\n', (7144, 7161), True, 'import numpy as np\n'), ((7277, 7288), 'cirq.Simulator', 'Simulator', ([], {}), '()\n', (7286, 7288), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((4721, 4736), 'cirq.GridQubit', 'GridQubit', (['i', 'j'], {}), '(i, j)\n', (4730, 4736), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((2076, 2086), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2084, 2086), True, 'import networkx as nx\n'), ((2848, 2877), 'cirq.PauliString', 'PauliString', ([], {'coefficient': '(-0.5)'}), '(coefficient=-0.5)\n', (2859, 2877), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((4808, 4823), 'cirq.GridQubit', 'GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (4817, 4823), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((4825, 4840), 'cirq.GridQubit', 'GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (4834, 4840), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((2635, 2652), 'cirq.Pauli.by_index', 'Pauli.by_index', (['(2)'], {}), '(2)\n', (2649, 2652), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((2684, 2701), 'cirq.Pauli.by_index', 'Pauli.by_index', (['(2)'], {}), '(2)\n', (2698, 2701), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((2730, 2771), 'cirq.PauliString', 'PauliString', (['qubit_map_i'], {'coefficient': '(0.5)'}), '(qubit_map_i, coefficient=0.5)\n', (2741, 2771), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((2789, 2813), 'cirq.PauliString', 'PauliString', (['qubit_map_j'], {}), '(qubit_map_j)\n', (2800, 2813), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((3458, 3475), 'cirq.Pauli.by_index', 'Pauli.by_index', (['(0)'], {}), '(0)\n', (3472, 3475), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n'), ((6447, 6494), 'numpy.conj', 'np.conj', (['maxcut_result.final_state[state_index]'], {}), '(maxcut_result.final_state[state_index])\n', (6454, 6494), True, 'import numpy as np\n'), ((3544, 3586), 'cirq.PauliString', 'PauliString', (['qubit_map_i'], {'coefficient': '(-1.0)'}), '(qubit_map_i, coefficient=-1.0)\n', (3555, 3586), False, 'from cirq import PauliString, Pauli, Simulator, GridQubit\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: ls-checkpoint.py
# Author: <NAME> <<EMAIL>>
import tensorflow as tf
import numpy as np
import six
import sys
import pprint
from tensorpack.tfutils.varmanip import get_checkpoint_path
if __name__ == '__main__':
fpath = sys.argv[1]
if fpath.endswith('.npy'):
params = np.load(fpath, encoding='latin1').item()
dic = {k: v.shape for k, v in six.iteritems(params)}
elif fpath.endswith('.npz'):
params = dict(np.load(fpath))
dic = {k: v.shape for k, v in six.iteritems(params)}
else:
path = get_checkpoint_path(sys.argv[1])
reader = tf.train.NewCheckpointReader(path)
dic = reader.get_variable_to_shape_map()
pprint.pprint(dic)
| [
"numpy.load",
"tensorflow.train.NewCheckpointReader",
"pprint.pprint",
"tensorpack.tfutils.varmanip.get_checkpoint_path",
"six.iteritems"
] | [((737, 755), 'pprint.pprint', 'pprint.pprint', (['dic'], {}), '(dic)\n', (750, 755), False, 'import pprint\n'), ((599, 631), 'tensorpack.tfutils.varmanip.get_checkpoint_path', 'get_checkpoint_path', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (618, 631), False, 'from tensorpack.tfutils.varmanip import get_checkpoint_path\n'), ((649, 683), 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['path'], {}), '(path)\n', (677, 683), True, 'import tensorflow as tf\n'), ((340, 373), 'numpy.load', 'np.load', (['fpath'], {'encoding': '"""latin1"""'}), "(fpath, encoding='latin1')\n", (347, 373), True, 'import numpy as np\n'), ((419, 440), 'six.iteritems', 'six.iteritems', (['params'], {}), '(params)\n', (432, 440), False, 'import six\n'), ((497, 511), 'numpy.load', 'np.load', (['fpath'], {}), '(fpath)\n', (504, 511), True, 'import numpy as np\n'), ((551, 572), 'six.iteritems', 'six.iteritems', (['params'], {}), '(params)\n', (564, 572), False, 'import six\n')] |
#%%
from tsbooster.cv import TimeseriesHoldout
import pandas as pd
import numpy as np
#%%
def test_holdout_cv():
data = {
"time": np.arange(0, 30),
"vals": np.arange(10, 40),
"dates": pd.date_range(
pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-30"), freq="1 d"
).values,
}
df = pd.DataFrame(data)
X = df.drop("vals", axis=1)
y = df["vals"]
test_start = pd.Timestamp("2020-01-16")
cv = TimeseriesHoldout(date_column="dates", test_start=test_start)
splits = cv.split(X, y)
train_idx, test_idx = next(splits)
exp_train_idx = np.arange(0, 15)
exp_test_idx = np.arange(15, 30)
# Test length is as expected (to prevent wierd hard-to-interpret boolean error in case they are not)
assert len(train_idx) == len(exp_train_idx)
assert len(test_idx) == len(exp_test_idx)
# Assert the indexes are as expected
assert (train_idx == exp_train_idx).all()
assert (test_idx == exp_test_idx).all()
# Assert the Dates are as expected if we split using the indices
assert (X.iloc[train_idx]["dates"] < test_start).all()
assert (X.iloc[test_idx]["dates"] >= test_start).all()
# Assert the y-values are as expected if we split using the indices
assert (y[train_idx] == np.arange(10, 25)).all()
assert (y[test_idx] == np.arange(25, 40)).all()
| [
"pandas.DataFrame",
"tsbooster.cv.TimeseriesHoldout",
"pandas.Timestamp",
"numpy.arange"
] | [((340, 358), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (352, 358), True, 'import pandas as pd\n'), ((428, 454), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020-01-16"""'], {}), "('2020-01-16')\n", (440, 454), True, 'import pandas as pd\n'), ((465, 526), 'tsbooster.cv.TimeseriesHoldout', 'TimeseriesHoldout', ([], {'date_column': '"""dates"""', 'test_start': 'test_start'}), "(date_column='dates', test_start=test_start)\n", (482, 526), False, 'from tsbooster.cv import TimeseriesHoldout\n'), ((614, 630), 'numpy.arange', 'np.arange', (['(0)', '(15)'], {}), '(0, 15)\n', (623, 630), True, 'import numpy as np\n'), ((650, 667), 'numpy.arange', 'np.arange', (['(15)', '(30)'], {}), '(15, 30)\n', (659, 667), True, 'import numpy as np\n'), ((143, 159), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {}), '(0, 30)\n', (152, 159), True, 'import numpy as np\n'), ((177, 194), 'numpy.arange', 'np.arange', (['(10)', '(40)'], {}), '(10, 40)\n', (186, 194), True, 'import numpy as np\n'), ((240, 266), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (252, 266), True, 'import pandas as pd\n'), ((268, 294), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020-01-30"""'], {}), "('2020-01-30')\n", (280, 294), True, 'import pandas as pd\n'), ((1289, 1306), 'numpy.arange', 'np.arange', (['(10)', '(25)'], {}), '(10, 25)\n', (1298, 1306), True, 'import numpy as np\n'), ((1341, 1358), 'numpy.arange', 'np.arange', (['(25)', '(40)'], {}), '(25, 40)\n', (1350, 1358), True, 'import numpy as np\n')] |
import numpy as np
def iterative_mean(i_iter, current_mean, x):
"""Iteratively calculates mean using
http://www.heikohoffmann.de/htmlthesis/node134.html. Originally implemented
in treeexplainer https://github.com/andosa/treeexplainer/pull/24
:param i_iter: [int > 0] Current iteration.
:param current_mean: [ndarray] Current value of mean.
:param x: [ndarray] New value to be added to mean.
:return: [ndarray] Updated mean.
"""
return current_mean + ((x - current_mean) / (i_iter + 1))
def divide0(a, b, replace_with):
"""Divide two numbers but replace its result if division is not possible,
e.g., when dividing a number by 0. No type-checking or agreement between
dimensions is performed. Be careful!
:param a: [ndarray or int or float] Numerator.
:param b: [ndarray or int or float] Denominator.
:param replace_with: [int or float] Return this number if a/b is not defined.
:return: [ndarray or int or float] Result of division, cast by numpy to the
best data type to hold it.
"""
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide(a, b)
if isinstance(c, np.ndarray):
c[np.logical_not(np.isfinite(c))] = replace_with
else:
if not np.isfinite(c):
c = replace_with
return c
| [
"numpy.true_divide",
"numpy.errstate",
"numpy.isfinite"
] | [((1079, 1125), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1090, 1125), True, 'import numpy as np\n'), ((1139, 1159), 'numpy.true_divide', 'np.true_divide', (['a', 'b'], {}), '(a, b)\n', (1153, 1159), True, 'import numpy as np\n'), ((1292, 1306), 'numpy.isfinite', 'np.isfinite', (['c'], {}), '(c)\n', (1303, 1306), True, 'import numpy as np\n'), ((1227, 1241), 'numpy.isfinite', 'np.isfinite', (['c'], {}), '(c)\n', (1238, 1241), True, 'import numpy as np\n')] |
import enum
import time
import gpflow as gpf
import numpy as np
import tensorflow as tf
from absl import flags
from absl.flags import FLAGS
from gpflow import config
from gpflow.kernels import SquaredExponential
from gpflow.models import GPR
from tensorflow_probability.python.experimental.mcmc import ProgressBarReducer, WithReductions, \
make_tqdm_progress_bar_fn
from tensorflow_probability.python.mcmc import HamiltonianMonteCarlo, MetropolisAdjustedLangevinAlgorithm, \
NoUTurnSampler, sample_chain
from pssgp.kernels import Matern12, Matern32, Matern52, RBF, Periodic
from pssgp.model import StateSpaceGP
class MCMC(enum.Enum):
HMC = "HMC"
MALA = "MALA"
NUTS = "NUTS"
class ModelEnum(enum.Enum):
GP = "GP"
SSGP = "SSGP"
PSSGP = "PSSGP"
class CovarianceEnum(enum.Enum):
Matern12 = 'Matern12'
Matern32 = 'Matern32'
Matern52 = 'Matern52'
RBF = "RBF"
QP = "QP"
flags.DEFINE_string("device", "/cpu:0", "Device on which to run")
def get_simple_covariance_function(covariance_enum, **kwargs):
if not isinstance(covariance_enum, CovarianceEnum):
covariance_enum = CovarianceEnum(covariance_enum)
if covariance_enum == CovarianceEnum.Matern12:
return Matern12(**kwargs)
if covariance_enum == CovarianceEnum.Matern32:
return Matern32(**kwargs)
if covariance_enum == CovarianceEnum.Matern52:
return Matern52(**kwargs)
if covariance_enum == CovarianceEnum.RBF:
return RBF(**kwargs)
if covariance_enum == CovarianceEnum.QP:
base_kernel = SquaredExponential(kwargs.pop("variance", 1.), kwargs.pop("lengthscales", 1.))
return Periodic(base_kernel, **kwargs)
def get_model(model_enum, data, noise_variance, covariance_function, max_parallel=10000):
if not isinstance(model_enum, ModelEnum):
model_enum = ModelEnum(model_enum)
if model_enum == ModelEnum.GP:
gp_model = GPR(data, covariance_function, None, noise_variance)
elif model_enum == ModelEnum.SSGP:
gp_model = StateSpaceGP(data, covariance_function, noise_variance, parallel=False)
elif model_enum == ModelEnum.PSSGP:
gp_model = StateSpaceGP(data, covariance_function, noise_variance, parallel=True, max_parallel=max_parallel)
else:
raise ValueError("model not supported")
return gp_model
def run_one_mcmc(n_training, gp_model):
num_burnin_steps = FLAGS.n_burnin
num_samples = FLAGS.n_samples
mcmc_helper, run_chain_fn = get_run_chain_fn(gp_model, num_samples, num_burnin_steps)
try:
tic = time.time()
result, is_accepted = run_chain_fn()
print(np.mean(is_accepted))
run_time = time.time() - tic
parameter_samples = mcmc_helper.convert_to_constrained_values(result)
except Exception as e: # noqa: It's not clear what the error returned by TF could be, so well...
run_time = float("nan")
parameter_samples = [np.nan * np.ones((num_samples,), dtype=config.default_float()) for _ in
gp_model.trainable_parameters]
print(f"{FLAGS.model}-{FLAGS.cov} failed with n_training={n_training} and error: \n {e}")
return run_time, dict(zip(gpf.utilities.parameter_dict(gp_model), parameter_samples))
def get_run_chain_fn(gp_model, num_samples, num_burnin_steps):
mcmc_helper = gpf.optimizers.SamplingHelper(
gp_model.log_posterior_density, gp_model.trainable_parameters)
if FLAGS.mcmc == MCMC.HMC.value:
mcmc = HamiltonianMonteCarlo(
target_log_prob_fn=mcmc_helper.target_log_prob_fn,
num_leapfrog_steps=FLAGS.n_leapfrogs,
step_size=FLAGS.step_size
)
elif FLAGS.mcmc == MCMC.MALA.value:
mcmc = MetropolisAdjustedLangevinAlgorithm(
target_log_prob_fn=mcmc_helper.target_log_prob_fn,
step_size=FLAGS.step_size
)
elif FLAGS.mcmc == MCMC.NUTS.value:
mcmc = NoUTurnSampler(
target_log_prob_fn=mcmc_helper.target_log_prob_fn,
step_size=FLAGS.step_size
)
else:
raise ValueError(f"mcmc must be a {MCMC} enum, {FLAGS.mcmc} was passed")
pbar = ProgressBarReducer(num_samples + num_burnin_steps,
make_tqdm_progress_bar_fn(f"{FLAGS.model}-{FLAGS.mcmc}", True))
pbar.initialize(None)
mcmc = WithReductions(mcmc, pbar)
@tf.function
def run_chain_fn():
return sample_chain(
num_results=num_samples,
num_burnin_steps=num_burnin_steps,
current_state=mcmc_helper.current_state,
kernel=mcmc,
trace_fn=lambda _, pkr: pkr.inner_results.is_accepted
)
return mcmc_helper, run_chain_fn
| [
"gpflow.config.default_float",
"pssgp.kernels.Matern32",
"numpy.mean",
"pssgp.kernels.Matern52",
"pssgp.kernels.Periodic",
"tensorflow_probability.python.experimental.mcmc.make_tqdm_progress_bar_fn",
"tensorflow_probability.python.mcmc.HamiltonianMonteCarlo",
"tensorflow_probability.python.mcmc.sample... | [((925, 990), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""device"""', '"""/cpu:0"""', '"""Device on which to run"""'], {}), "('device', '/cpu:0', 'Device on which to run')\n", (944, 990), False, 'from absl import flags\n'), ((3350, 3447), 'gpflow.optimizers.SamplingHelper', 'gpf.optimizers.SamplingHelper', (['gp_model.log_posterior_density', 'gp_model.trainable_parameters'], {}), '(gp_model.log_posterior_density, gp_model.\n trainable_parameters)\n', (3379, 3447), True, 'import gpflow as gpf\n'), ((4359, 4385), 'tensorflow_probability.python.experimental.mcmc.WithReductions', 'WithReductions', (['mcmc', 'pbar'], {}), '(mcmc, pbar)\n', (4373, 4385), False, 'from tensorflow_probability.python.experimental.mcmc import ProgressBarReducer, WithReductions, make_tqdm_progress_bar_fn\n'), ((1236, 1254), 'pssgp.kernels.Matern12', 'Matern12', ([], {}), '(**kwargs)\n', (1244, 1254), False, 'from pssgp.kernels import Matern12, Matern32, Matern52, RBF, Periodic\n'), ((1321, 1339), 'pssgp.kernels.Matern32', 'Matern32', ([], {}), '(**kwargs)\n', (1329, 1339), False, 'from pssgp.kernels import Matern12, Matern32, Matern52, RBF, Periodic\n'), ((1406, 1424), 'pssgp.kernels.Matern52', 'Matern52', ([], {}), '(**kwargs)\n', (1414, 1424), False, 'from pssgp.kernels import Matern12, Matern32, Matern52, RBF, Periodic\n'), ((1486, 1499), 'pssgp.kernels.RBF', 'RBF', ([], {}), '(**kwargs)\n', (1489, 1499), False, 'from pssgp.kernels import Matern12, Matern32, Matern52, RBF, Periodic\n'), ((1661, 1692), 'pssgp.kernels.Periodic', 'Periodic', (['base_kernel'], {}), '(base_kernel, **kwargs)\n', (1669, 1692), False, 'from pssgp.kernels import Matern12, Matern32, Matern52, RBF, Periodic\n'), ((1928, 1980), 'gpflow.models.GPR', 'GPR', (['data', 'covariance_function', 'None', 'noise_variance'], {}), '(data, covariance_function, None, noise_variance)\n', (1931, 1980), False, 'from gpflow.models import GPR\n'), ((2574, 2585), 'time.time', 'time.time', ([], {}), '()\n', (2583, 2585), False, 'import time\n'), ((3505, 3646), 'tensorflow_probability.python.mcmc.HamiltonianMonteCarlo', 'HamiltonianMonteCarlo', ([], {'target_log_prob_fn': 'mcmc_helper.target_log_prob_fn', 'num_leapfrog_steps': 'FLAGS.n_leapfrogs', 'step_size': 'FLAGS.step_size'}), '(target_log_prob_fn=mcmc_helper.target_log_prob_fn,\n num_leapfrog_steps=FLAGS.n_leapfrogs, step_size=FLAGS.step_size)\n', (3526, 3646), False, 'from tensorflow_probability.python.mcmc import HamiltonianMonteCarlo, MetropolisAdjustedLangevinAlgorithm, NoUTurnSampler, sample_chain\n'), ((4257, 4319), 'tensorflow_probability.python.experimental.mcmc.make_tqdm_progress_bar_fn', 'make_tqdm_progress_bar_fn', (['f"""{FLAGS.model}-{FLAGS.mcmc}"""', '(True)'], {}), "(f'{FLAGS.model}-{FLAGS.mcmc}', True)\n", (4282, 4319), False, 'from tensorflow_probability.python.experimental.mcmc import ProgressBarReducer, WithReductions, make_tqdm_progress_bar_fn\n'), ((4443, 4632), 'tensorflow_probability.python.mcmc.sample_chain', 'sample_chain', ([], {'num_results': 'num_samples', 'num_burnin_steps': 'num_burnin_steps', 'current_state': 'mcmc_helper.current_state', 'kernel': 'mcmc', 'trace_fn': '(lambda _, pkr: pkr.inner_results.is_accepted)'}), '(num_results=num_samples, num_burnin_steps=num_burnin_steps,\n current_state=mcmc_helper.current_state, kernel=mcmc, trace_fn=lambda _,\n pkr: pkr.inner_results.is_accepted)\n', (4455, 4632), False, 'from tensorflow_probability.python.mcmc import HamiltonianMonteCarlo, MetropolisAdjustedLangevinAlgorithm, NoUTurnSampler, sample_chain\n'), ((2039, 2110), 'pssgp.model.StateSpaceGP', 'StateSpaceGP', (['data', 'covariance_function', 'noise_variance'], {'parallel': '(False)'}), '(data, covariance_function, noise_variance, parallel=False)\n', (2051, 2110), False, 'from pssgp.model import StateSpaceGP\n'), ((2645, 2665), 'numpy.mean', 'np.mean', (['is_accepted'], {}), '(is_accepted)\n', (2652, 2665), True, 'import numpy as np\n'), ((2686, 2697), 'time.time', 'time.time', ([], {}), '()\n', (2695, 2697), False, 'import time\n'), ((3744, 3862), 'tensorflow_probability.python.mcmc.MetropolisAdjustedLangevinAlgorithm', 'MetropolisAdjustedLangevinAlgorithm', ([], {'target_log_prob_fn': 'mcmc_helper.target_log_prob_fn', 'step_size': 'FLAGS.step_size'}), '(target_log_prob_fn=mcmc_helper.\n target_log_prob_fn, step_size=FLAGS.step_size)\n', (3779, 3862), False, 'from tensorflow_probability.python.mcmc import HamiltonianMonteCarlo, MetropolisAdjustedLangevinAlgorithm, NoUTurnSampler, sample_chain\n'), ((2170, 2271), 'pssgp.model.StateSpaceGP', 'StateSpaceGP', (['data', 'covariance_function', 'noise_variance'], {'parallel': '(True)', 'max_parallel': 'max_parallel'}), '(data, covariance_function, noise_variance, parallel=True,\n max_parallel=max_parallel)\n', (2182, 2271), False, 'from pssgp.model import StateSpaceGP\n'), ((3207, 3245), 'gpflow.utilities.parameter_dict', 'gpf.utilities.parameter_dict', (['gp_model'], {}), '(gp_model)\n', (3235, 3245), True, 'import gpflow as gpf\n'), ((3947, 4044), 'tensorflow_probability.python.mcmc.NoUTurnSampler', 'NoUTurnSampler', ([], {'target_log_prob_fn': 'mcmc_helper.target_log_prob_fn', 'step_size': 'FLAGS.step_size'}), '(target_log_prob_fn=mcmc_helper.target_log_prob_fn, step_size\n =FLAGS.step_size)\n', (3961, 4044), False, 'from tensorflow_probability.python.mcmc import HamiltonianMonteCarlo, MetropolisAdjustedLangevinAlgorithm, NoUTurnSampler, sample_chain\n'), ((2985, 3007), 'gpflow.config.default_float', 'config.default_float', ([], {}), '()\n', (3005, 3007), False, 'from gpflow import config\n')] |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rotor parameters."""
from makani.config import mconfig
from makani.control import system_types as m
import numpy as np
@mconfig.Config(deps={
'flight_plan': 'common.flight_plan',
'propellers': 'prop.propellers',
'wing_serial': 'common.wing_serial',
})
def MakeParams(params):
# Motor rotor moment-of-inertia [kg-m^2].
yasa_rotor_moment_of_inertia = 0.33
bottom_row = [m.kMotorSbo, m.kMotorSbi, m.kMotorPbi, m.kMotorPbo]
# Assign propeller versions.
propeller_versions = [None for _ in range(m.kNumMotors)]
if params['wing_serial'] == m.kWingSerial01:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] in [m.kWingSerial04Crosswind]:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] == m.kWingSerial04Hover:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] in [m.kWingSerial05Crosswind]:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] == m.kWingSerial05Hover:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] in [m.kWingSerial06Crosswind]:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] == m.kWingSerial06Hover:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] in [m.kWingSerial07Crosswind]:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
elif params['wing_serial'] == m.kWingSerial07Hover:
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSti] = m.kPropVersionRev1Trimmed
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
else:
assert False, 'Unknown wing serial.'
rotors = [None for _ in range(m.kNumMotors)]
for r in range(m.kNumMotors):
rotors[r] = {
# Normal vector to the propeller plane.
'axis': [np.cos(np.deg2rad(3.0)), 0.0, np.sin(np.deg2rad(3.0))],
# Direction cosine matrix from body to rotor frame.
'dcm_b2r': {'d': [[np.cos(np.deg2rad(-3.0)), 0.0,
np.sin(np.deg2rad(-3.0))],
[0.0, 1.0, 0.0],
[-np.sin(np.deg2rad(-3.0)), 0.0,
np.cos(np.deg2rad(-3.0))]]},
# Local pressure coefficient [#] at the rotor position. The
# pressure coefficient, C_P, is related to local airspeed
# through the equation:
#
# C_P = 1 - (v / v_freestream)^2
#
# There is a significant difference in airspeeds between the top
# and bottom propellers caused by the lift of the wing. These
# pressure coefficients are derived from CFD with the slatted
# kite at 4 deg alpha (https://goo.gl/yfkJJS)
'local_pressure_coeff': 0.1448 if r in bottom_row else -0.1501,
# The rotor direction, diameter [m] and moment of inertia [kg
# m^2] are set from the corresponding propeller's information.
'version': propeller_versions[r],
'dir': params['propellers'][propeller_versions[r]]['dir'],
'D': params['propellers'][propeller_versions[r]]['D'],
'I': (yasa_rotor_moment_of_inertia +
params['propellers'][propeller_versions[r]]['I']),
}
# We check that the rotor axis is normalized. because it is used
# to determine the force-moment conversion matrix in
# rotor_control.py.
assert abs(np.linalg.norm(rotors[r]['axis']) - 1.0) < 1e-9
# Rotor positions [m].
#
# Updated on 2015-01-22 based on the COM positions given by the Mass
# and Balance spreadsheet.
rotors[m.kMotorSbo]['pos'] = [1.613, 3.639, 1.597]
rotors[m.kMotorSbi]['pos'] = [1.613, 1.213, 1.597]
rotors[m.kMotorPbi]['pos'] = [1.613, -1.213, 1.597]
rotors[m.kMotorPbo]['pos'] = [1.613, -3.639, 1.597]
rotors[m.kMotorPto]['pos'] = [1.960, -3.639, -1.216]
rotors[m.kMotorPti]['pos'] = [1.960, -1.213, -1.216]
rotors[m.kMotorSti]['pos'] = [1.960, 1.213, -1.216]
rotors[m.kMotorSto]['pos'] = [1.960, 3.639, -1.216]
return rotors
| [
"makani.config.mconfig.Config",
"numpy.linalg.norm",
"numpy.deg2rad"
] | [((715, 847), 'makani.config.mconfig.Config', 'mconfig.Config', ([], {'deps': "{'flight_plan': 'common.flight_plan', 'propellers': 'prop.propellers',\n 'wing_serial': 'common.wing_serial'}"}), "(deps={'flight_plan': 'common.flight_plan', 'propellers':\n 'prop.propellers', 'wing_serial': 'common.wing_serial'})\n", (729, 847), False, 'from makani.config import mconfig\n'), ((6583, 6598), 'numpy.deg2rad', 'np.deg2rad', (['(3.0)'], {}), '(3.0)\n', (6593, 6598), True, 'import numpy as np\n'), ((6613, 6628), 'numpy.deg2rad', 'np.deg2rad', (['(3.0)'], {}), '(3.0)\n', (6623, 6628), True, 'import numpy as np\n'), ((8129, 8162), 'numpy.linalg.norm', 'np.linalg.norm', (["rotors[r]['axis']"], {}), "(rotors[r]['axis'])\n", (8143, 8162), True, 'import numpy as np\n'), ((6727, 6743), 'numpy.deg2rad', 'np.deg2rad', (['(-3.0)'], {}), '(-3.0)\n', (6737, 6743), True, 'import numpy as np\n'), ((6785, 6801), 'numpy.deg2rad', 'np.deg2rad', (['(-3.0)'], {}), '(-3.0)\n', (6795, 6801), True, 'import numpy as np\n'), ((6941, 6957), 'numpy.deg2rad', 'np.deg2rad', (['(-3.0)'], {}), '(-3.0)\n', (6951, 6957), True, 'import numpy as np\n'), ((6883, 6899), 'numpy.deg2rad', 'np.deg2rad', (['(-3.0)'], {}), '(-3.0)\n', (6893, 6899), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 17:47:09 2019
@author: avelinojaver
"""
import numpy as np
import tables
_filters = tables.Filters(
complevel=5,
complib='blosc:lz4',
shuffle=True,
fletcher32=True)
def save_data(save_name, src_files, images, centroids = None, masks = None, contours = None):
max_length = max(len(x[1]) for x in src_files)
src_files_rec = np.array(src_files, [('file_id', np.int32), ('file', f'S{max_length}')])
save_name.parent.mkdir(exist_ok=True, parents=True)
with tables.File(str(save_name), 'w') as fid:
fid.create_carray('/', 'images', obj = images, chunkshape = (1, *images[0].shape), filters = _filters)
fid.create_table('/', 'src_files', obj = src_files_rec, filters = _filters)
if masks is not None:
fid.create_carray('/', 'masks', obj = masks, chunkshape = (1, *masks[0].shape), filters = _filters)
if centroids is not None:
dtypes_centroids = [('file_id', np.int32), ('nuclei_id', np.int32), ('cx', np.float32), ('cy', np.float32) ]
if len(centroids[0]) == 5:
dtypes_centroids += [('type_id', np.int32)]
centroids_rec = np.array(centroids, dtype = dtypes_centroids)
fid.create_table('/', 'localizations', obj = centroids_rec, filters = _filters)
if contours is not None:
contours_rec = np.array(contours, dtype = [('file_id', np.int32), ('nuclei_id', np.int32), ('x', np.float32), ('y', np.float32) ])
fid.create_table('/', 'contours', obj = contours_rec, filters = _filters)
def save_data_single(save_name, image, centroids = None, mask = None, contours = None):
save_name.parent.mkdir(exist_ok=True, parents=True)
with tables.File(str(save_name), 'w') as fid:
fid.create_carray('/', 'img', obj = image, filters = _filters)
if mask is not None:
fid.create_carray('/', 'mask', obj = mask, filters = _filters)
if centroids is not None:
dtypes_centroids = [('nuclei_id', np.int32), ('cx', np.float32), ('cy', np.float32) ]
if len(centroids[0]) == len(dtypes_centroids) + 1:
dtypes_centroids += [('type_id', np.int32)]
centroids_rec = np.array(centroids, dtype = dtypes_centroids)
fid.create_table('/', 'coords', obj = centroids_rec, filters = _filters)
if contours is not None:
contours_rec = np.array(contours, dtype = [('nuclei_id', np.int32), ('x', np.float32), ('y', np.float32) ])
fid.create_table('/', 'contours', obj = contours_rec, filters = _filters) | [
"tables.Filters",
"numpy.array"
] | [((160, 239), 'tables.Filters', 'tables.Filters', ([], {'complevel': '(5)', 'complib': '"""blosc:lz4"""', 'shuffle': '(True)', 'fletcher32': '(True)'}), "(complevel=5, complib='blosc:lz4', shuffle=True, fletcher32=True)\n", (174, 239), False, 'import tables\n'), ((444, 516), 'numpy.array', 'np.array', (['src_files', "[('file_id', np.int32), ('file', f'S{max_length}')]"], {}), "(src_files, [('file_id', np.int32), ('file', f'S{max_length}')])\n", (452, 516), True, 'import numpy as np\n'), ((1303, 1346), 'numpy.array', 'np.array', (['centroids'], {'dtype': 'dtypes_centroids'}), '(centroids, dtype=dtypes_centroids)\n', (1311, 1346), True, 'import numpy as np\n'), ((1510, 1627), 'numpy.array', 'np.array', (['contours'], {'dtype': "[('file_id', np.int32), ('nuclei_id', np.int32), ('x', np.float32), ('y',\n np.float32)]"}), "(contours, dtype=[('file_id', np.int32), ('nuclei_id', np.int32), (\n 'x', np.float32), ('y', np.float32)])\n", (1518, 1627), True, 'import numpy as np\n'), ((2420, 2463), 'numpy.array', 'np.array', (['centroids'], {'dtype': 'dtypes_centroids'}), '(centroids, dtype=dtypes_centroids)\n', (2428, 2463), True, 'import numpy as np\n'), ((2620, 2713), 'numpy.array', 'np.array', (['contours'], {'dtype': "[('nuclei_id', np.int32), ('x', np.float32), ('y', np.float32)]"}), "(contours, dtype=[('nuclei_id', np.int32), ('x', np.float32), ('y',\n np.float32)])\n", (2628, 2713), True, 'import numpy as np\n')] |
def demo():
'''
Get jpg image for UGC 01962, view it,
and remove temporary jpg file.
'''
jpg = SdssJpg(37.228, 0.37)
jpg.show()
def simg(ra=37.228,dec=0.37,
scale=0.396, width=512, height=512,
savename=None, DR=14, init_download=True,show=True):
'''
Get jpg image for UGC 01962, view it,
and remove temporary jpg file.
'''
jpg = SdssJpg(ra, dec, scale=scale,width=width,height=height,
savename=savename,DR=DR,init_download=init_download)
if show == True:
jpg.show()
def DownFile(file, scale=0.396, width=512, height=512, DR=14, show=False):
import os
import numpy as np
dir="sdssdown"
(dir,ext) = file.split(".")
print(dir,ext)
# creating directories
if not os.path.exists(dir):
os.makedirs(dir)
name, ra, dec = np.genfromtxt(file, delimiter="", unpack=True,dtype="U")
name = name.astype(str)
ra = ra.astype(float)
dec = dec.astype(float)
for idx, item in enumerate(name):
namfil=dir + "/"
namefil=name[idx] + ".jpg"
namfil= namfil + namefil
simg(ra=ra[idx], dec=dec[idx], show=show, savename=namfil, scale=scale, width=width, height=height, DR=DR)
class SdssJpg(object):
'''
Class for an SDSS jpg image.
See http://skyservice.pha.jhu.edu/dr10/imgcutout/imgcutout.asmx
for more info.
RA, DEC - J2000, degrees
SCALE - plate scale in arsec per pixel
WIDTH, HEIGHT - size of image in pixels
SAVENAME - if none provided, defaults to 'sdss.jpg'
DR - integer value for SDSS data release.
'''
def __init__(self, ra, dec,
scale=0.3515625, width=512, height=512,
savename=None, DR=14, init_download=True):
self.ra = ra
self.dec = dec
self.scale = scale
self.width = width
self.height = height
self.DR = DR
if savename==None:
savename = 'sdss.jpg'
self.savename = savename
if init_download: self.download()
def __del__(self):
from os import system
# remove the temporary jpg file
# system('rm '+self.savename)
def download(self):
from urllib.request import urlretrieve
from PIL import Image
from numpy import array, uint8
url = 'http://skyservice.pha.jhu.edu/dr%i/ImgCutout/getjpeg.aspx?'%self.DR
url += 'ra=%0.5f&dec=%0.5f&'%(self.ra, self.dec)
url += 'scale=%0.5f&'%self.scale
url += 'width=%i&height=%i'%(self.width, self.height)
print(url)
urlretrieve(url, self.savename)
self.data = array(Image.open(self.savename), dtype=uint8)
def show(self):
import matplotlib.pylab as pl
pl.imshow(self.data)
pl.ion()
pl.show()
def study25(filecsv='tmp.csv'):
'''
Dumb function for viewing images of 25 galaxies
whose (RA, DEC) positions are in tmp.csv
'''
from pandas.io.parsers import read_csv
import matplotlib.pylab as pl
pl.ion()
x=read_csv(filecsv)
print("tipo ", type(x))
print("llaves ",x.keys())
pl.figure(1)
pl.clf()
for i in range(25):
pl.subplot(5,5,i+1)
jpg=SdssJpg(x["ra"][i],x["dec"][i], width=64, height=64)
pl.axis('off');
pl.imshow(jpg.data)
# pl.title("25 galaxias")
| [
"pandas.io.parsers.read_csv",
"matplotlib.pylab.show",
"os.makedirs",
"matplotlib.pylab.subplot",
"matplotlib.pylab.imshow",
"os.path.exists",
"matplotlib.pylab.clf",
"numpy.genfromtxt",
"matplotlib.pylab.axis",
"PIL.Image.open",
"urllib.request.urlretrieve",
"matplotlib.pylab.ion",
"matplot... | [((826, 883), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""""""', 'unpack': '(True)', 'dtype': '"""U"""'}), "(file, delimiter='', unpack=True, dtype='U')\n", (839, 883), True, 'import numpy as np\n'), ((3034, 3042), 'matplotlib.pylab.ion', 'pl.ion', ([], {}), '()\n', (3040, 3042), True, 'import matplotlib.pylab as pl\n'), ((3049, 3066), 'pandas.io.parsers.read_csv', 'read_csv', (['filecsv'], {}), '(filecsv)\n', (3057, 3066), False, 'from pandas.io.parsers import read_csv\n'), ((3130, 3142), 'matplotlib.pylab.figure', 'pl.figure', (['(1)'], {}), '(1)\n', (3139, 3142), True, 'import matplotlib.pylab as pl\n'), ((3147, 3155), 'matplotlib.pylab.clf', 'pl.clf', ([], {}), '()\n', (3153, 3155), True, 'import matplotlib.pylab as pl\n'), ((759, 778), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (773, 778), False, 'import os\n'), ((788, 804), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (799, 804), False, 'import os\n'), ((2586, 2617), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'self.savename'], {}), '(url, self.savename)\n', (2597, 2617), False, 'from urllib.request import urlretrieve\n'), ((2751, 2771), 'matplotlib.pylab.imshow', 'pl.imshow', (['self.data'], {}), '(self.data)\n', (2760, 2771), True, 'import matplotlib.pylab as pl\n'), ((2780, 2788), 'matplotlib.pylab.ion', 'pl.ion', ([], {}), '()\n', (2786, 2788), True, 'import matplotlib.pylab as pl\n'), ((2797, 2806), 'matplotlib.pylab.show', 'pl.show', ([], {}), '()\n', (2804, 2806), True, 'import matplotlib.pylab as pl\n'), ((3188, 3211), 'matplotlib.pylab.subplot', 'pl.subplot', (['(5)', '(5)', '(i + 1)'], {}), '(5, 5, i + 1)\n', (3198, 3211), True, 'import matplotlib.pylab as pl\n'), ((3281, 3295), 'matplotlib.pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (3288, 3295), True, 'import matplotlib.pylab as pl\n'), ((3305, 3324), 'matplotlib.pylab.imshow', 'pl.imshow', (['jpg.data'], {}), '(jpg.data)\n', (3314, 3324), True, 'import matplotlib.pylab as pl\n'), ((2644, 2669), 'PIL.Image.open', 'Image.open', (['self.savename'], {}), '(self.savename)\n', (2654, 2669), False, 'from PIL import Image\n')] |
# Assess the convergence of the harmonics as the integration domain increases
import numpy as np
from IPython import embed
import pickle
import matplotlib
import matplotlib.pyplot as plt
import itertools
# Name of transducer
transducer_name = 'H131'
power = 100
material = 'liver'
# How many harmonics have been computed
n_harms = 5
nPerLam = 20
# Set up plot
# Plotting convergence errors
matplotlib.rcParams.update({'font.size': 24})
plt.rc('font', family='serif')
plt.rc('text', usetex=True)
fig = plt.figure(figsize=(10, 7))
# fig = plt.figure()
ax = fig.gca()
marker = itertools.cycle(('ro-', 'bs-', 'gd-', 'mp-'))
# marker = itertools.cycle(('ko-','ko--', 'bs-','bs--', 'rd-','rd--',
# 'mp-','mp--'))
# filename = 'results/Pierre_H101_water_harmonic1.pickle'
# filename = 'results/H101_water_harmonic1.pickle'
filename = 'results/' + transducer_name + '_power' + str(power) + \
'_' + material + '_harmonic1.pickle'
# filename = 'results/' + transducer_name + '_power' + str(power) + \
# '_' + material + '_harmonic1_nPerLam' + str(nPerLam) + '.pickle'
with open(filename, 'rb') as f:
bits = pickle.load(f)
line_1 = bits[0]
x_line = bits[1]
total = line_1
total_snip = np.zeros_like(total)
total_snip += total
norms = [np.linalg.norm(line_1)]
for i_harm in range(0, n_harms - 1):
# Load pickle file
# filename = 'results/H101_water_harmonic' + str(i_harm+2) + '.pickle'
# filename = 'results/Pierre_H101_water_harmonic' + str(i_harm+2) + '.pickle'
filename = 'results/' + transducer_name + '_power' + str(power) + \
'_' + material + '_harmonic' + str(i_harm + 2) + '.pickle'
# filename = 'results/' + transducer_name + '_power' + str(power) + \
# '_' + material + '_harmonic' + str(i_harm + 2) + '_nPerLam' + str(nPerLam) + '.pickle'
with open(filename, 'rb') as f:
VARS = pickle.load(f)
# Field along the central axis line
line = VARS[0]
# Add to total field
total += line[-1, :]
# Array of the tolerances considered in the convergence experiment
TOL = VARS[1]
xMinVals = VARS[2]
xMaxVals = VARS[3]
yMinVals = VARS[4]
yMaxVals = VARS[5]
roc = VARS[6]
print('ROC = ', roc)
k1 = VARS[7]
lam = 2 * np.pi / k1
# Preallocate array of relative errors to be computed
rel_errors = np.zeros(line.shape[0]-1)
# Distances
WX = np.zeros(line.shape[0]-1)
WY = np.zeros(line.shape[0]-1)
# Compute errors
count = 0
for i in range(line.shape[0]-1):
rel_errors[i] = np.linalg.norm(line[-1, :]-line[i, :]) / \
np.linalg.norm(line_1)
# rel_errors[i] = np.linalg.norm(line[-1, :]-line[i, :]) / \
# np.linalg.norm(line[-1, :])
# rel_errors[i] = np.abs(np.max(np.abs(line[-1, :]))-
# np.max(np.abs(line[i, :]))) / \
# np.max(np.abs(line[-1, :]))
# WX[i] = (roc - xMinVals[i]) / (roc - xMinVals[-1])
WX[i] = (xMaxVals[i] - xMinVals[i]) / (xMaxVals[-1] - xMinVals[-1])
WY[i] = yMinVals[i] / yMinVals[-1]
if (rel_errors[i] < 1e-2):
# if TOL[i] <2e-3:
if (count == 0):
count += 1
print('HARMONIC ',i_harm+2)
print('x coord of left edge of box:', xMinVals[i])
print('y coord of base of box:', yMinVals[i])
lammy = lam / (i_harm + 2)
print('x dist in wavelengths: ', (roc - xMinVals[i]) / lam)
print('y dist in wavelengths: ', (0 - yMinVals[i]) / lam)
total_snip += line[i, :]
norms.append(np.linalg.norm(line[i, :]))
print('Fraction of y domain = ', WY[i])
print('Fraction of x domain = ', WX[i])
# from IPython import embed;embed()
ROT = ((roc - xMinVals[-1]) * lammy / (lam/2) + xMaxVals[-1] - roc) / (xMaxVals[-1]-xMinVals[-1])
# ROT = ((roc - xMinVals[-1]) * lammy / (lam/2)) / (roc-xMinVals[-1])
print('Rule of thumb x = ', ROT)
plt.semilogy(np.flip(WX), 100*np.flip(rel_errors), next(marker), linewidth=2)
# plt.semilogy(np.flip(WY), 100*np.flip(rel_errors), next(marker), linewidth=2)
# plt.loglog(np.flip(TOL[:-1]), np.flip(rel_errors)*100, next(marker), linewidth=2)
# from IPython import embed;embed()
plt.grid(True)
# plt.loglog(np.flip(TOL[:-1]), norms[2]/norms[0]*100*(np.flip(TOL[:-1]))**0.5, 'k--', linewidth=2)
# plt.semilogy(np.flip(WX), 1e-4*np.flip(WX)**(-2), 'k--', linewidth=2)
# plt.ylim([7e-6, 1e0])
# plt.yticks([1e-3, 1e-2, 1e-1,1e0,1e1], ('0.001','0.01','0.1','1','10'))
plt.xlim([0, 1.01])
# plt.xticks(np.array([1,2,3,4,5]))
# plt.text(3e-4, 4e-1, r'$10\frac{|p_i|}{|p_1|}\sqrt{Q_0}$',
# {'color': 'k', 'fontsize': 20})
# legend
# plt.legend((r'$p_2$', r'$p_3$',r'$p_4$',r'$p_5$'),
# shadow=False, loc=(0.84, 0.03), handlelength=1.5, fontsize=20)
plt.legend((r'$p_2$', r'$p_3$',r'$p_4$',r'$p_5$'),
shadow=False, loc=(0.03, 0.03), handlelength=1.5, fontsize=20)
# plt.xlabel(r'$Q_0$')
plt.xlabel(r'Fraction of reference domain ($x$)')
plt.ylabel('Error (\%)')
filename = 'results/domain_convergence_space_x_' + material + transducer_name + '_power' + str(power) + '.pdf'
fig.savefig(filename)
plt.close()
# Plot harmonics
plt.rc('text', usetex=True)
fig = plt.figure(figsize=(9, 7))
ax = fig.gca()
for i_harm in range(0, n_harms):
# Load pickle file
# filename = 'results/Pierre_H101_water_harmonic' + str(i_harm+1) + '.pickle'
# filename = 'results/H101_water_harmonic' + str(i_harm+2) + '.pickle'
filename = 'results/' + transducer_name + '_power' + str(power) + \
'_' + material + '_harmonic' + str(i_harm + 1) + '_nPerLam' + str(nPerLam) +'.pickle'
with open(filename, 'rb') as f:
VARS = pickle.load(f)
# Field along the central axis line
line = VARS[0]
# from IPython import embed; embed()
if (i_harm == 0):
plt.plot(x_line, np.abs(line))
else:
plt.plot(x_line, np.abs(line[-1, :]))
# plt.plot(x_line, np.abs(line[10, :]))
plt.grid(True)
fig.savefig('results/harms.png')
plt.close()
# Plot harmonics
plt.rc('text', usetex=True)
fig = plt.figure(figsize=(14, 8))
ax = fig.gca()
for i_harm in range(0, 1):
# Load pickle file
# filename = 'results/Pierre_H101_water_harmonic' + str(i_harm+2) + '.pickle'
# filename = 'results/H101_water_harmonic' + str(i_harm+2) + '.pickle'
filename = 'results/' + transducer_name + '_power' + str(power) + \
'_' + material + '_harmonic' + str(i_harm + 2) + '_nPerLam' + str(nPerLam) +'.pickle'
with open(filename, 'rb') as f:
VARS = pickle.load(f)
# Field along the central axis line
line = VARS[0]
plt.plot(np.abs(line[-10:-1, :]).T)
fig.savefig('results/harm_conv.png')
plt.close()
# Plot total field
fig = plt.figure(figsize=(14, 8))
ax = fig.gca()
# plt.plot(x_line, np.real(total_snip))
plt.plot(x_line[600:], np.real(total[600:]))
fig.savefig('results/total.png')
plt.close()
# Compute error of total_snip
error_total = np.linalg.norm(total - total_snip) / np.linalg.norm(total)
print('Relative error of truncated domain approx = ', error_total)
| [
"matplotlib.pyplot.xlim",
"numpy.zeros_like",
"numpy.flip",
"numpy.abs",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.zeros",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.linalg.norm",
"matplotlib.pyplot.rc",
"... | [((393, 438), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 24}"], {}), "({'font.size': 24})\n", (419, 438), False, 'import matplotlib\n'), ((439, 469), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (445, 469), True, 'import matplotlib.pyplot as plt\n'), ((470, 497), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (476, 497), True, 'import matplotlib.pyplot as plt\n'), ((504, 531), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (514, 531), True, 'import matplotlib.pyplot as plt\n'), ((577, 622), 'itertools.cycle', 'itertools.cycle', (["('ro-', 'bs-', 'gd-', 'mp-')"], {}), "(('ro-', 'bs-', 'gd-', 'mp-'))\n", (592, 622), False, 'import itertools\n'), ((1235, 1255), 'numpy.zeros_like', 'np.zeros_like', (['total'], {}), '(total)\n', (1248, 1255), True, 'import numpy as np\n'), ((4451, 4465), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4459, 4465), True, 'import matplotlib.pyplot as plt\n'), ((4737, 4756), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1.01]'], {}), '([0, 1.01])\n', (4745, 4756), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5154), 'matplotlib.pyplot.legend', 'plt.legend', (["('$p_2$', '$p_3$', '$p_4$', '$p_5$')"], {'shadow': '(False)', 'loc': '(0.03, 0.03)', 'handlelength': '(1.5)', 'fontsize': '(20)'}), "(('$p_2$', '$p_3$', '$p_4$', '$p_5$'), shadow=False, loc=(0.03, \n 0.03), handlelength=1.5, fontsize=20)\n", (5048, 5154), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5235), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction of reference domain ($x$)"""'], {}), "('Fraction of reference domain ($x$)')\n", (5197, 5235), True, 'import matplotlib.pyplot as plt\n'), ((5237, 5262), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error (\\\\%)"""'], {}), "('Error (\\\\%)')\n", (5247, 5262), True, 'import matplotlib.pyplot as plt\n'), ((5396, 5407), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5405, 5407), True, 'import matplotlib.pyplot as plt\n'), ((5427, 5454), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (5433, 5454), True, 'import matplotlib.pyplot as plt\n'), ((5461, 5487), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (5471, 5487), True, 'import matplotlib.pyplot as plt\n'), ((6270, 6281), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6279, 6281), True, 'import matplotlib.pyplot as plt\n'), ((6300, 6327), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (6306, 6327), True, 'import matplotlib.pyplot as plt\n'), ((6334, 6361), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8)'}), '(figsize=(14, 8))\n', (6344, 6361), True, 'import matplotlib.pyplot as plt\n'), ((6962, 6973), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6971, 6973), True, 'import matplotlib.pyplot as plt\n'), ((7001, 7028), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8)'}), '(figsize=(14, 8))\n', (7011, 7028), True, 'import matplotlib.pyplot as plt\n'), ((7163, 7174), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7172, 7174), True, 'import matplotlib.pyplot as plt\n'), ((1156, 1170), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1167, 1170), False, 'import pickle\n'), ((1286, 1308), 'numpy.linalg.norm', 'np.linalg.norm', (['line_1'], {}), '(line_1)\n', (1300, 1308), True, 'import numpy as np\n'), ((2374, 2401), 'numpy.zeros', 'np.zeros', (['(line.shape[0] - 1)'], {}), '(line.shape[0] - 1)\n', (2382, 2401), True, 'import numpy as np\n'), ((2426, 2453), 'numpy.zeros', 'np.zeros', (['(line.shape[0] - 1)'], {}), '(line.shape[0] - 1)\n', (2434, 2453), True, 'import numpy as np\n'), ((2461, 2488), 'numpy.zeros', 'np.zeros', (['(line.shape[0] - 1)'], {}), '(line.shape[0] - 1)\n', (2469, 2488), True, 'import numpy as np\n'), ((6221, 6235), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6229, 6235), True, 'import matplotlib.pyplot as plt\n'), ((7108, 7128), 'numpy.real', 'np.real', (['total[600:]'], {}), '(total[600:])\n', (7115, 7128), True, 'import numpy as np\n'), ((7220, 7254), 'numpy.linalg.norm', 'np.linalg.norm', (['(total - total_snip)'], {}), '(total - total_snip)\n', (7234, 7254), True, 'import numpy as np\n'), ((7257, 7278), 'numpy.linalg.norm', 'np.linalg.norm', (['total'], {}), '(total)\n', (7271, 7278), True, 'import numpy as np\n'), ((1898, 1912), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1909, 1912), False, 'import pickle\n'), ((4173, 4184), 'numpy.flip', 'np.flip', (['WX'], {}), '(WX)\n', (4180, 4184), True, 'import numpy as np\n'), ((5939, 5953), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5950, 5953), False, 'import pickle\n'), ((6807, 6821), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6818, 6821), False, 'import pickle\n'), ((2584, 2624), 'numpy.linalg.norm', 'np.linalg.norm', (['(line[-1, :] - line[i, :])'], {}), '(line[-1, :] - line[i, :])\n', (2598, 2624), True, 'import numpy as np\n'), ((2651, 2673), 'numpy.linalg.norm', 'np.linalg.norm', (['line_1'], {}), '(line_1)\n', (2665, 2673), True, 'import numpy as np\n'), ((4190, 4209), 'numpy.flip', 'np.flip', (['rel_errors'], {}), '(rel_errors)\n', (4197, 4209), True, 'import numpy as np\n'), ((6103, 6115), 'numpy.abs', 'np.abs', (['line'], {}), '(line)\n', (6109, 6115), True, 'import numpy as np\n'), ((6152, 6171), 'numpy.abs', 'np.abs', (['line[-1, :]'], {}), '(line[-1, :])\n', (6158, 6171), True, 'import numpy as np\n'), ((6897, 6920), 'numpy.abs', 'np.abs', (['line[-10:-1, :]'], {}), '(line[-10:-1, :])\n', (6903, 6920), True, 'import numpy as np\n'), ((3715, 3741), 'numpy.linalg.norm', 'np.linalg.norm', (['line[i, :]'], {}), '(line[i, :])\n', (3729, 3741), True, 'import numpy as np\n')] |
"""Fitting routines."""
import dataclasses
from typing import Callable, Generic, Optional, Tuple, TypeVar
import numpy as np
from .npt_compat import ArrayLike, NDArray1D, NDArray2D
T = TypeVar("T")
@dataclasses.dataclass(frozen=True, repr=True)
class Model(Generic[T]):
"""Fitted model."""
f: Callable[..., T]
popt: NDArray1D
perr: NDArray1D
pcov: NDArray2D
chi2: float
ndf: int
p_value: float
def __call__(self, x: T) -> T:
"""Perform interpolation/extrapolation."""
return self.f(x, *self.popt)
def fit(
f: Callable[..., T],
xdata: ArrayLike,
ydata: ArrayLike,
yerr: ArrayLike,
*,
p0: Optional[ArrayLike] = None,
bounds: Optional[Tuple[ArrayLike, ArrayLike]] = (-np.inf, np.inf),
) -> Model[T]:
"""Fit a function to data."""
import scipy.optimize
import scipy.stats.distributions
popt, pcov = scipy.optimize.curve_fit(
f, xdata, ydata, p0=p0, sigma=yerr, absolute_sigma=True, bounds=bounds
)
perr = np.sqrt(np.diag(pcov))
chi2 = np.sum(((f(xdata, *popt) - ydata) / yerr) ** 2) # type: ignore[operator]
ndf = len(xdata) - len(popt) # type: ignore[arg-type]
p_value = scipy.stats.distributions.chi2.sf(chi2, ndf)
return Model(f, popt, perr, pcov, chi2, ndf, p_value)
| [
"numpy.diag",
"typing.TypeVar",
"dataclasses.dataclass"
] | [((189, 201), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (196, 201), False, 'from typing import Callable, Generic, Optional, Tuple, TypeVar\n'), ((205, 250), 'dataclasses.dataclass', 'dataclasses.dataclass', ([], {'frozen': '(True)', 'repr': '(True)'}), '(frozen=True, repr=True)\n', (226, 250), False, 'import dataclasses\n'), ((1032, 1045), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (1039, 1045), True, 'import numpy as np\n')] |
import os
import sys
import logging
import netCDF4 as nc
import numpy as np
import concurrent.futures
import pandas as pd
from skimage.draw import polygon
from pathlib import Path
from skimage.transform import resize
from sen3r import commons
dd = commons.DefaultDicts()
utils = commons.Utils()
class NcEngine:
"""
Provide methods to manipulate NetCDF4 data from Sentinel-3 OLCI products.
:input_nc_folder: This is the first param.
:parent_log: This is a second param.
"""
def __init__(self, input_nc_folder=None, parent_log=None, product='wfr'):
self.log = parent_log
self.nc_folder = Path(input_nc_folder)
self.nc_base_name = os.path.basename(input_nc_folder).split('.')[0]
self.product = product.lower()
self.netcdf_valid_band_list = self.get_valid_band_files(rad_only=False)
if self.product.lower() == 'wfr':
self.log.info(f'{os.getpid()} - Initializing geometries for: {self.nc_base_name}')
geo_coord = nc.Dataset(self.nc_folder / 'geo_coordinates.nc')
self.g_lat = geo_coord['latitude'][:]
self.g_lon = geo_coord['longitude'][:]
# Load and resize tie LON/LAT Bands using the geo_coordinates.nc file dimensions: (4091, 4865)
tie_geo = nc.Dataset(self.nc_folder / 'tie_geo_coordinates.nc')
self.t_lat = tie_geo['latitude'][:]
self.t_lat = resize(self.t_lat, (self.g_lat.shape[0], self.g_lat.shape[1]), anti_aliasing=False)
self.t_lon = tie_geo['longitude'][:]
self.t_lon = resize(self.t_lon, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=False)
# Load and resize Sun Geometry Angle Bands using the geo_coordinates.nc file dimensions: (4091, 4865)
t_geometries = nc.Dataset(self.nc_folder / 'tie_geometries.nc')
self.OAA = t_geometries['OAA'][:]
self.OAA = resize(self.OAA, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=False)
self.OZA = t_geometries['OZA'][:]
self.OZA = resize(self.OZA, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=False)
self.SAA = t_geometries['SAA'][:]
self.SAA = resize(self.SAA, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=False)
self.SZA = t_geometries['SZA'][:]
self.SZA = resize(self.SZA, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=False)
elif self.product.lower() == 'syn':
dsgeo = nc.Dataset(self.nc_folder / 'geolocation.nc')
self.g_lat = dsgeo['lat'][:]
self.g_lon = dsgeo['lon'][:]
else:
self.log.info(f'Invalid product: {self.product.upper()}.')
sys.exit(1)
def __repr__(self):
return f'{type(self.t_lat)}, ' \
f'{type(self.t_lon)}, ' \
f'{type(self.g_lat)}, ' \
f'{type(self.g_lon)}, ' \
f'{type(self.OAA)}, ' \
f'{type(self.OZA)}, ' \
f'{type(self.SAA)},' \
f'{type(self.SZA)},' \
f'nc_base_name:{self.nc_base_name}'
def get_valid_band_files(self, rad_only=True):
"""
Search inside the .SEN3 image folder for files ended with .nc; If rad_only is True,
only reflectance bands are returned, otherwise return everything ended with .nc extension.
"""
if self.nc_folder is None:
self.log.info('Unable to find files. NetCDF image folder is not defined during NcExplorer class instance.')
sys.exit(1)
sentinel_images_path = self.nc_folder
# retrieve all files in folder
files = os.listdir(sentinel_images_path)
# extract only NetCDFs from the file list
nc_files = [f for f in files if f.endswith('.nc')]
# extract only the radiometric bands from the NetCDF list
nc_bands = [b for b in nc_files if b.startswith('Oa')]
if rad_only:
return nc_bands
else:
return nc_files
def latlon_2_xy_poly(self, poly_path, go_parallel=True):
"""
Given an input polygon and image, return a dataframe containing
the data of the image that falls inside the polygon.
"""
self.log.info(f'Converting the polygon coordinates into a matrix x,y poly...')
# I) Convert the lon/lat polygon into a x/y poly:
xy_vert, ll_vert = self._lat_lon_2_xy(poly_path=poly_path, parallel=go_parallel)
return xy_vert, ll_vert
def _lat_lon_2_xy(self, poly_path, geojson=True, parallel=True):
"""
Takes in a polygon file and return a dataframe containing
the data in each band that falls inside the polygon.
"""
# self._test_initialized()
if parallel:
gpc = ParallelCoord()
xy_vertices = [gpc.parallel_get_xy_poly(self.g_lat, self.g_lon, vert) for vert in poly_path]
else:
xy_vertices = [utils.get_x_y_poly(self.g_lat, self.g_lon, vert) for vert in poly_path]
return xy_vertices, poly_path
def get_raster_mask(self, xy_vertices):
"""
Creates a boolean mask of 0 and 1 with the polygons using the nc resolution.
"""
# self._test_initialized()
# Generate extraction mask
img = np.zeros(self.g_lon.shape)
cc = np.ndarray(shape=(0,), dtype='int64')
rr = np.ndarray(shape=(0,), dtype='int64')
for vert in xy_vertices:
t_rr, t_cc = polygon(vert[:, 0], vert[:, 1], self.g_lon.shape)
img[t_rr, t_cc] = 1
cc = np.append(cc, t_cc)
rr = np.append(rr, t_rr)
return img, cc, rr
def get_rgb_from_poly(self, xy_vertices):
# II) Get the bounding box:
xmin, xmax, ymin, ymax = utils.bbox(xy_vertices)
# III) Get only the RGB bands:
if self.product.lower() == 'wfr':
ds = nc.Dataset(self.nc_folder / 'Oa08_reflectance.nc')
red = ds['Oa08_reflectance'][:]
ds = nc.Dataset(self.nc_folder / 'Oa06_reflectance.nc')
green = ds['Oa06_reflectance'][:]
ds = nc.Dataset(self.nc_folder / 'Oa03_reflectance.nc')
blue = ds['Oa03_reflectance'][:]
elif self.product.lower() == 'syn':
ds = nc.Dataset(self.nc_folder / 'Syn_Oa08_reflectance.nc')
red = ds['SDR_Oa08'][:]
ds = nc.Dataset(self.nc_folder / 'Syn_Oa06_reflectance.nc')
green = ds['SDR_Oa06'][:]
ds = nc.Dataset(self.nc_folder / 'Syn_Oa03_reflectance.nc')
blue = ds['SDR_Oa03'][:]
else:
self.log.info(f'Invalid product: {self.product.upper()}.')
sys.exit(1)
# IV) Subset the bands using the bbox:
red = red[ymin:ymax, xmin:xmax]
green = green[ymin:ymax, xmin:xmax]
blue = blue[ymin:ymax, xmin:xmax]
# V) Stack the bands vertically:
# https://stackoverflow.com/questions/10443295/combine-3-separate-numpy-arrays-to-an-rgb-image-in-python
rgb_uint8 = (np.dstack((red, green, blue)) * 255.999).astype(np.uint8)
return red, green, blue, rgb_uint8
class ParallelCoord:
@staticmethod
def vect_dist_subtraction(coord_pair, grid):
subtraction = coord_pair - grid
dist = np.linalg.norm(subtraction, axis=2)
result = np.where(dist == dist.min())
target_x_y = [result[0][0], result[1][0]]
return target_x_y
def parallel_get_xy_poly(self, lat_arr, lon_arr, polyline):
# Stack LAT and LON in the Z axis
grid = np.concatenate([lat_arr[..., None], lon_arr[..., None]], axis=2)
# Polyline is a GeoJSON coordinate array
polyline = polyline.squeeze() # squeeze removes one of the dimensions of the array
# https://numpy.org/doc/stable/reference/generated/numpy.squeeze.html
# Generate a list containing the lat, lon coordinates for each point of the input poly
coord_vect_pairs = []
for i in range(polyline.shape[0]):
coord_vect_pairs.append(np.array([polyline[i, 1], polyline[i, 0]]).reshape(1, 1, -1))
# for future reference
# https://stackoverflow.com/questions/6832554/multiprocessing-how-do-i-share-a-dict-among-multiple-processes
cores = utils.get_available_cores()
with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
try:
result = list(executor.map(self.vect_dist_subtraction, coord_vect_pairs, [grid]*len(coord_vect_pairs)))
except concurrent.futures.process.BrokenProcessPool as ex:
self.log.info(f"{ex} This might be caused by limited system resources. "
f"Try increasing system memory or disable concurrent processing. ")
return np.array(result)
class ParallelBandExtract:
def __init__(self, parent_log=None):
if parent_log:
self.log = parent_log
def _get_band_in_nc(self, file_n_band, rr, cc):
print(f'{os.getpid()} | Extracting band: {file_n_band[1]} from file: {file_n_band[0]}.\n')
# logging.info(f'{os.getpid()} | Extracting band: {file_n_band[1]} from file: {file_n_band[0]}.\n')
# self.log.info(f'{os.getpid()} | Extracting band: {file_n_band[1]} from file: {file_n_band[0]}.\n')
result = {}
# load NetCDF folder + nc_file_name
ds = nc.Dataset(file_n_band[0])
# load the nc_band_name as a matrix and unmask its values
band = ds[file_n_band[1]][:].data
# extract the values of the matrix and return as a dict entry
result[file_n_band[1]] = [band[x, y] for x, y in zip(rr, cc)]
return result
def nc_2_df(self, rr, cc, oaa, oza, saa, sza, lon, lat, nc_folder, wfr_files_p, parent_log=None):
"""
Given an input polygon and image, return a dataframe containing
the data of the image that falls inside the polygon.
"""
if parent_log:
self.log = logging.getLogger(name=parent_log)
wfr_files_p = [(os.path.join(nc_folder, nc_file), nc_band) for nc_file, nc_band in wfr_files_p]
# Generate initial df
custom_subset = {'x': rr, 'y': cc}
df = pd.DataFrame(custom_subset)
df['lat'] = [lat[x, y] for x, y in zip(df['x'], df['y'])]
df['lon'] = [lon[x, y] for x, y in zip(df['x'], df['y'])]
df['OAA'] = [oaa[x, y] for x, y in zip(df['x'], df['y'])]
df['OZA'] = [oza[x, y] for x, y in zip(df['x'], df['y'])]
df['SAA'] = [saa[x, y] for x, y in zip(df['x'], df['y'])]
df['SZA'] = [sza[x, y] for x, y in zip(df['x'], df['y'])]
cores = utils.get_available_cores()
# Populate the initial DF with the output from the other bands
with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
try:
list_of_bands = list(executor.map(
self._get_band_in_nc, wfr_files_p,
[rr] * len(wfr_files_p),
[cc] * len(wfr_files_p)
))
except concurrent.futures.process.BrokenProcessPool as ex:
self.log.info(f"{ex} This might be caused by limited system resources. "
f"Try increasing system memory or disable concurrent processing. ")
# For every returned dict inside the list, grab only the Key and append it at the final DF
for b in list_of_bands:
for key, val in b.items():
df[key] = val
# DROP NODATA
idx_names = df[df['Oa08_reflectance'] == 65535.0].index
df.drop(idx_names, inplace=True)
return df
| [
"sen3r.commons.DefaultDicts",
"pathlib.Path",
"numpy.linalg.norm",
"skimage.transform.resize",
"os.path.join",
"numpy.ndarray",
"netCDF4.Dataset",
"pandas.DataFrame",
"sen3r.commons.Utils",
"numpy.append",
"numpy.dstack",
"skimage.draw.polygon",
"os.path.basename",
"os.listdir",
"sys.exi... | [((249, 271), 'sen3r.commons.DefaultDicts', 'commons.DefaultDicts', ([], {}), '()\n', (269, 271), False, 'from sen3r import commons\n'), ((280, 295), 'sen3r.commons.Utils', 'commons.Utils', ([], {}), '()\n', (293, 295), False, 'from sen3r import commons\n'), ((630, 651), 'pathlib.Path', 'Path', (['input_nc_folder'], {}), '(input_nc_folder)\n', (634, 651), False, 'from pathlib import Path\n'), ((3702, 3734), 'os.listdir', 'os.listdir', (['sentinel_images_path'], {}), '(sentinel_images_path)\n', (3712, 3734), False, 'import os\n'), ((5362, 5388), 'numpy.zeros', 'np.zeros', (['self.g_lon.shape'], {}), '(self.g_lon.shape)\n', (5370, 5388), True, 'import numpy as np\n'), ((5402, 5439), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0,)', 'dtype': '"""int64"""'}), "(shape=(0,), dtype='int64')\n", (5412, 5439), True, 'import numpy as np\n'), ((5453, 5490), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0,)', 'dtype': '"""int64"""'}), "(shape=(0,), dtype='int64')\n", (5463, 5490), True, 'import numpy as np\n'), ((7375, 7410), 'numpy.linalg.norm', 'np.linalg.norm', (['subtraction'], {'axis': '(2)'}), '(subtraction, axis=2)\n', (7389, 7410), True, 'import numpy as np\n'), ((7655, 7719), 'numpy.concatenate', 'np.concatenate', (['[lat_arr[..., None], lon_arr[..., None]]'], {'axis': '(2)'}), '([lat_arr[..., None], lon_arr[..., None]], axis=2)\n', (7669, 7719), True, 'import numpy as np\n'), ((8896, 8912), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (8904, 8912), True, 'import numpy as np\n'), ((9488, 9514), 'netCDF4.Dataset', 'nc.Dataset', (['file_n_band[0]'], {}), '(file_n_band[0])\n', (9498, 9514), True, 'import netCDF4 as nc\n'), ((10318, 10345), 'pandas.DataFrame', 'pd.DataFrame', (['custom_subset'], {}), '(custom_subset)\n', (10330, 10345), True, 'import pandas as pd\n'), ((1009, 1058), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'geo_coordinates.nc')"], {}), "(self.nc_folder / 'geo_coordinates.nc')\n", (1019, 1058), True, 'import netCDF4 as nc\n'), ((1290, 1343), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'tie_geo_coordinates.nc')"], {}), "(self.nc_folder / 'tie_geo_coordinates.nc')\n", (1300, 1343), True, 'import netCDF4 as nc\n'), ((1418, 1505), 'skimage.transform.resize', 'resize', (['self.t_lat', '(self.g_lat.shape[0], self.g_lat.shape[1])'], {'anti_aliasing': '(False)'}), '(self.t_lat, (self.g_lat.shape[0], self.g_lat.shape[1]),\n anti_aliasing=False)\n', (1424, 1505), False, 'from skimage.transform import resize\n'), ((1576, 1663), 'skimage.transform.resize', 'resize', (['self.t_lon', '(self.g_lon.shape[0], self.g_lon.shape[1])'], {'anti_aliasing': '(False)'}), '(self.t_lon, (self.g_lon.shape[0], self.g_lon.shape[1]),\n anti_aliasing=False)\n', (1582, 1663), False, 'from skimage.transform import resize\n'), ((1802, 1850), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'tie_geometries.nc')"], {}), "(self.nc_folder / 'tie_geometries.nc')\n", (1812, 1850), True, 'import netCDF4 as nc\n'), ((1921, 2007), 'skimage.transform.resize', 'resize', (['self.OAA', '(self.g_lon.shape[0], self.g_lon.shape[1])'], {'anti_aliasing': '(False)'}), '(self.OAA, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=\n False)\n', (1927, 2007), False, 'from skimage.transform import resize\n'), ((2072, 2158), 'skimage.transform.resize', 'resize', (['self.OZA', '(self.g_lon.shape[0], self.g_lon.shape[1])'], {'anti_aliasing': '(False)'}), '(self.OZA, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=\n False)\n', (2078, 2158), False, 'from skimage.transform import resize\n'), ((2223, 2309), 'skimage.transform.resize', 'resize', (['self.SAA', '(self.g_lon.shape[0], self.g_lon.shape[1])'], {'anti_aliasing': '(False)'}), '(self.SAA, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=\n False)\n', (2229, 2309), False, 'from skimage.transform import resize\n'), ((2374, 2460), 'skimage.transform.resize', 'resize', (['self.SZA', '(self.g_lon.shape[0], self.g_lon.shape[1])'], {'anti_aliasing': '(False)'}), '(self.SZA, (self.g_lon.shape[0], self.g_lon.shape[1]), anti_aliasing=\n False)\n', (2380, 2460), False, 'from skimage.transform import resize\n'), ((3587, 3598), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3595, 3598), False, 'import sys\n'), ((5550, 5599), 'skimage.draw.polygon', 'polygon', (['vert[:, 0]', 'vert[:, 1]', 'self.g_lon.shape'], {}), '(vert[:, 0], vert[:, 1], self.g_lon.shape)\n', (5557, 5599), False, 'from skimage.draw import polygon\n'), ((5649, 5668), 'numpy.append', 'np.append', (['cc', 't_cc'], {}), '(cc, t_cc)\n', (5658, 5668), True, 'import numpy as np\n'), ((5686, 5705), 'numpy.append', 'np.append', (['rr', 't_rr'], {}), '(rr, t_rr)\n', (5695, 5705), True, 'import numpy as np\n'), ((5974, 6024), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'Oa08_reflectance.nc')"], {}), "(self.nc_folder / 'Oa08_reflectance.nc')\n", (5984, 6024), True, 'import netCDF4 as nc\n'), ((6086, 6136), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'Oa06_reflectance.nc')"], {}), "(self.nc_folder / 'Oa06_reflectance.nc')\n", (6096, 6136), True, 'import netCDF4 as nc\n'), ((6200, 6250), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'Oa03_reflectance.nc')"], {}), "(self.nc_folder / 'Oa03_reflectance.nc')\n", (6210, 6250), True, 'import netCDF4 as nc\n'), ((10091, 10125), 'logging.getLogger', 'logging.getLogger', ([], {'name': 'parent_log'}), '(name=parent_log)\n', (10108, 10125), False, 'import logging\n'), ((2521, 2566), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'geolocation.nc')"], {}), "(self.nc_folder / 'geolocation.nc')\n", (2531, 2566), True, 'import netCDF4 as nc\n'), ((2747, 2758), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2755, 2758), False, 'import sys\n'), ((6358, 6412), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'Syn_Oa08_reflectance.nc')"], {}), "(self.nc_folder / 'Syn_Oa08_reflectance.nc')\n", (6368, 6412), True, 'import netCDF4 as nc\n'), ((6466, 6520), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'Syn_Oa06_reflectance.nc')"], {}), "(self.nc_folder / 'Syn_Oa06_reflectance.nc')\n", (6476, 6520), True, 'import netCDF4 as nc\n'), ((6576, 6630), 'netCDF4.Dataset', 'nc.Dataset', (["(self.nc_folder / 'Syn_Oa03_reflectance.nc')"], {}), "(self.nc_folder / 'Syn_Oa03_reflectance.nc')\n", (6586, 6630), True, 'import netCDF4 as nc\n'), ((6765, 6776), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6773, 6776), False, 'import sys\n'), ((10151, 10183), 'os.path.join', 'os.path.join', (['nc_folder', 'nc_file'], {}), '(nc_folder, nc_file)\n', (10163, 10183), False, 'import os\n'), ((680, 713), 'os.path.basename', 'os.path.basename', (['input_nc_folder'], {}), '(input_nc_folder)\n', (696, 713), False, 'import os\n'), ((7127, 7156), 'numpy.dstack', 'np.dstack', (['(red, green, blue)'], {}), '((red, green, blue))\n', (7136, 7156), True, 'import numpy as np\n'), ((9112, 9123), 'os.getpid', 'os.getpid', ([], {}), '()\n', (9121, 9123), False, 'import os\n'), ((919, 930), 'os.getpid', 'os.getpid', ([], {}), '()\n', (928, 930), False, 'import os\n'), ((8145, 8187), 'numpy.array', 'np.array', (['[polyline[i, 1], polyline[i, 0]]'], {}), '([polyline[i, 1], polyline[i, 0]])\n', (8153, 8187), True, 'import numpy as np\n')] |
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
from actions.action import Action
import actions.condition_ops as cond
# WaitYears
class WaitYears(Action):
def __init__(self, features):
super().__init__(name='WaitYears',
description='Wait x amount of years',
type='Numeric',
features=features,
target_features=['age_in_years'],
init_p=[0])
def apply(self, instance, p, use_tensor=True):
param = self.get_param(p, use_tensor)
return self.features['age_in_years'].change_feature_value(instance, param, use_tensor)
def get_cost(self, instance, new_instance, use_tensor=True):
new_age = self.features['age_in_years'].get_feature_value(new_instance, use_tensor,
space='x')
old_age = self.features['age_in_years'].get_feature_value(instance, use_tensor, space='x')
cost = tf.abs(new_age - old_age) if use_tensor else np.abs(new_age - old_age)
return self.return_cost(instance, new_instance, cost, use_tensor)
def postcondition(self, instance, new_instance, use_tensor=True):
new_age = self.features['age_in_years'].get_feature_value(new_instance, use_tensor,
space='x')
old_age = self.features['age_in_years'].get_feature_value(instance, use_tensor, space='x')
return cond.op_and(cond.op_gt(new_age, old_age, use_tensor, scale=100.),
cond.op_lt(new_age, 120, use_tensor, scale=100.), use_tensor)
# Categorical Action
class Naturalize(Action):
def __init__(self, features):
super().__init__(name='Naturalize',
description='Become citizen',
type='Categoric',
num_params=0,
features=features,
target_features=['foreign_worker'],
init_p=[])
def apply(self, instance, p, use_tensor=True):
return self.features['foreign_worker'].change_feature_value(instance, 1, use_tensor)
def get_cost(self, instance, new_instance, use_tensor=True):
return self.return_cost(instance, new_instance, 5., use_tensor)
def precondition(self, instance, use_tensor=True):
return cond.op_neq(self.features['foreign_worker'].get_feature_value(instance, use_tensor),
1., use_tensor)
class GetUnskilledJob(Action):
def __init__(self, features):
super().__init__(name='GetUnskilledJob',
description='Get an unskilled job',
type='Categoric',
num_params=0,
features=features,
target_features=['job'],
init_p=[])
def apply(self, instance, p, use_tensor=True):
return self.features['job'].change_feature_value(instance, 1, use_tensor)
def get_cost(self, instance, new_instance, use_tensor=True):
return self.return_cost(instance, new_instance, 5., use_tensor)
def precondition(self, instance, use_tensor=True):
# Must be unemployeed
return cond.op_and(
cond.op_and(cond.op_neq(self.features['job'].get_feature_value(instance, use_tensor),
1., use_tensor),
cond.op_neq(self.features['job'].get_feature_value(instance, use_tensor),
2., use_tensor),
use_tensor),
cond.op_neq(self.features['job'].get_feature_value(instance, use_tensor),
3., use_tensor),
use_tensor)
# Categorical Action
class GetGuarantor(Action):
def __init__(self, features):
super().__init__(name='GetGuarantor',
description='Get a guarantor',
type='Categoric',
num_params=0,
features=features,
target_features=['other_debtors_guarantors'],
init_p=[])
def apply(self, instance, p, use_tensor=True):
return self.features['other_debtors_guarantors'].change_feature_value(instance, 2,
use_tensor)
def get_cost(self, instance, new_instance, use_tensor=True):
return self.return_cost(instance, new_instance, 5., use_tensor)
def precondition(self, instance, use_tensor=True):
return cond.op_neq(
self.features['other_debtors_guarantors'].get_feature_value(instance, use_tensor),
2., use_tensor)
class ChangeCreditAmount(Action):
def __init__(self, features):
super().__init__(name='ChangeCreditAmount',
description='Changes Requested Loan Amount',
type='Numeric',
features=features,
target_features=['credit_amount'],
init_p=[0])
def apply(self, instance, p, use_tensor=True):
param = self.get_param(p, use_tensor)
return self.features['credit_amount'].change_feature_value(instance, param, use_tensor)
def get_cost(self, instance, new_instance, use_tensor=True):
new_credit = self.features['credit_amount'].get_feature_value(new_instance, use_tensor,
space='x')
old_credit = self.features['credit_amount'].get_feature_value(instance, use_tensor,
space='x')
change = (new_credit - old_credit) / old_credit
cost = tf.reduce_sum(tf.square(change)) if use_tensor else np.square(change)
return self.return_cost(instance, new_instance, cost, use_tensor)
def precondition(self, instance, use_tensor=True):
age = self.features['age_in_years'].get_feature_value(instance, use_tensor, space='x')
return cond.op_gt(age, 15, use_tensor)
def postcondition(self, instance, new_instance, use_tensor=True):
new_credit = self.features['credit_amount'].get_feature_value(new_instance, use_tensor,
space='x')
return cond.op_and(cond.op_gt(new_credit, 0, use_tensor, scale=100000.),
cond.op_lt(new_credit, 100000, use_tensor, scale=100000.),
use_tensor)
class ChangeLoanPeriod(Action):
def __init__(self, features):
super().__init__(name='ChangeLoanPeriod',
description='Changes Loan Period',
type='Numeric',
features=features,
target_features=['loan_duration'],
init_p=[0])
def apply(self, instance, p, use_tensor=True):
param = self.get_param(p, use_tensor)
return self.features['loan_duration'].change_feature_value(instance, param, use_tensor)
def get_cost(self, instance, new_instance, use_tensor=True):
new_period = self.features['loan_duration'].get_feature_value(new_instance, use_tensor,
space='x')
old_period = self.features['loan_duration'].get_feature_value(instance, use_tensor,
space='x')
change = (new_period - old_period) / old_period
cost = tf.reduce_sum(tf.square(change)) if use_tensor else np.square(change)
return self.return_cost(instance, new_instance, cost, use_tensor)
def postcondition(self, instance, new_instance, use_tensor=True):
new_period = self.features['loan_duration'].get_feature_value(new_instance, use_tensor,
space='x')
return cond.op_and(cond.op_gt(new_period, 0, use_tensor, scale=100.),
cond.op_lt(new_period, 120, use_tensor, scale=100.),
use_tensor)
class AdjustLoanPeriod(Action):
def __init__(self, features):
super().__init__(name='AdjustLoanPeriod',
description='Changes Loan Period but keeps total loan / period same',
type='Numeric',
features=features,
target_features=['credit_amount', 'loan_duration'],
init_p=[0])
def apply(self, instance, p, use_tensor=True):
old_credit_x = self.features['credit_amount'].get_feature_value(instance, use_tensor,
space='x')
old_period_x = self.features['loan_duration'].get_feature_value(instance, use_tensor,
space='x')
change_in_credit_z = self.get_param(p, use_tensor)
change_in_credit_x = self.features['credit_amount'].ztox(change_in_credit_z, add_mean=False)
change_in_period_x = old_period_x * (old_credit_x + change_in_credit_x) / old_credit_x
change_in_period_z = self.features['loan_duration'].xtoz(change_in_period_x, add_mean=False)
instance = self.features['credit_amount'].change_feature_value(instance, change_in_credit_z,
use_tensor)
return self.features['loan_duration'].change_feature_value(instance, change_in_period_z,
use_tensor)
def get_cost(self, instance, new_instance, use_tensor=True):
new_credit = self.features['credit_amount'].get_feature_value(new_instance, use_tensor)
old_credit = self.features['credit_amount'].get_feature_value(instance, use_tensor)
change = (new_credit - old_credit) / old_credit
cost = tf.reduce_sum(tf.square(change)) if use_tensor else np.square(change)
return self.return_cost(instance, new_instance, cost, use_tensor)
def precondition(self, instance, use_tensor=True):
old_credit_x = self.features['credit_amount'].get_feature_value(instance, use_tensor,
space='x')
return cond.op_gt(old_credit_x, 1000, use_tensor, scale=100000.)
def postcondition(self, instance, new_instance, use_tensor=True):
new_period_x = self.features['loan_duration'].get_feature_value(new_instance, use_tensor,
space='x')
new_credit_x = self.features['credit_amount'].get_feature_value(new_instance, use_tensor,
space='x')
return cond.op_and(cond.op_and(cond.op_gt(new_period_x, 0, use_tensor, scale=100.),
cond.op_lt(new_period_x, 120, use_tensor, scale=100.),
use_tensor),
cond.op_and(cond.op_gt(new_credit_x, 0, use_tensor, scale=100000.),
cond.op_lt(new_credit_x, 100000, use_tensor, scale=100000.),
use_tensor),
use_tensor)
actions = [
WaitYears,
Naturalize,
ChangeCreditAmount,
ChangeLoanPeriod,
AdjustLoanPeriod,
GetGuarantor,
GetUnskilledJob,
]
| [
"tensorflow.compat.v1.square",
"numpy.abs",
"actions.condition_ops.op_lt",
"numpy.square",
"actions.condition_ops.op_gt",
"tensorflow.compat.v1.abs",
"tensorflow.compat.v1.disable_v2_behavior"
] | [((34, 58), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (56, 58), True, 'import tensorflow.compat.v1 as tf\n'), ((6229, 6260), 'actions.condition_ops.op_gt', 'cond.op_gt', (['age', '(15)', 'use_tensor'], {}), '(age, 15, use_tensor)\n', (6239, 6260), True, 'import actions.condition_ops as cond\n'), ((10596, 10654), 'actions.condition_ops.op_gt', 'cond.op_gt', (['old_credit_x', '(1000)', 'use_tensor'], {'scale': '(100000.0)'}), '(old_credit_x, 1000, use_tensor, scale=100000.0)\n', (10606, 10654), True, 'import actions.condition_ops as cond\n'), ((1051, 1076), 'tensorflow.compat.v1.abs', 'tf.abs', (['(new_age - old_age)'], {}), '(new_age - old_age)\n', (1057, 1076), True, 'import tensorflow.compat.v1 as tf\n'), ((1096, 1121), 'numpy.abs', 'np.abs', (['(new_age - old_age)'], {}), '(new_age - old_age)\n', (1102, 1121), True, 'import numpy as np\n'), ((1562, 1615), 'actions.condition_ops.op_gt', 'cond.op_gt', (['new_age', 'old_age', 'use_tensor'], {'scale': '(100.0)'}), '(new_age, old_age, use_tensor, scale=100.0)\n', (1572, 1615), True, 'import actions.condition_ops as cond\n'), ((1643, 1692), 'actions.condition_ops.op_lt', 'cond.op_lt', (['new_age', '(120)', 'use_tensor'], {'scale': '(100.0)'}), '(new_age, 120, use_tensor, scale=100.0)\n', (1653, 1692), True, 'import actions.condition_ops as cond\n'), ((5971, 5988), 'numpy.square', 'np.square', (['change'], {}), '(change)\n', (5980, 5988), True, 'import numpy as np\n'), ((6536, 6589), 'actions.condition_ops.op_gt', 'cond.op_gt', (['new_credit', '(0)', 'use_tensor'], {'scale': '(100000.0)'}), '(new_credit, 0, use_tensor, scale=100000.0)\n', (6546, 6589), True, 'import actions.condition_ops as cond\n'), ((6617, 6675), 'actions.condition_ops.op_lt', 'cond.op_lt', (['new_credit', '(100000)', 'use_tensor'], {'scale': '(100000.0)'}), '(new_credit, 100000, use_tensor, scale=100000.0)\n', (6627, 6675), True, 'import actions.condition_ops as cond\n'), ((7808, 7825), 'numpy.square', 'np.square', (['change'], {}), '(change)\n', (7817, 7825), True, 'import numpy as np\n'), ((8175, 8225), 'actions.condition_ops.op_gt', 'cond.op_gt', (['new_period', '(0)', 'use_tensor'], {'scale': '(100.0)'}), '(new_period, 0, use_tensor, scale=100.0)\n', (8185, 8225), True, 'import actions.condition_ops as cond\n'), ((8253, 8305), 'actions.condition_ops.op_lt', 'cond.op_lt', (['new_period', '(120)', 'use_tensor'], {'scale': '(100.0)'}), '(new_period, 120, use_tensor, scale=100.0)\n', (8263, 8305), True, 'import actions.condition_ops as cond\n'), ((10256, 10273), 'numpy.square', 'np.square', (['change'], {}), '(change)\n', (10265, 10273), True, 'import numpy as np\n'), ((5933, 5950), 'tensorflow.compat.v1.square', 'tf.square', (['change'], {}), '(change)\n', (5942, 5950), True, 'import tensorflow.compat.v1 as tf\n'), ((7770, 7787), 'tensorflow.compat.v1.square', 'tf.square', (['change'], {}), '(change)\n', (7779, 7787), True, 'import tensorflow.compat.v1 as tf\n'), ((10218, 10235), 'tensorflow.compat.v1.square', 'tf.square', (['change'], {}), '(change)\n', (10227, 10235), True, 'import tensorflow.compat.v1 as tf\n'), ((11126, 11178), 'actions.condition_ops.op_gt', 'cond.op_gt', (['new_period_x', '(0)', 'use_tensor'], {'scale': '(100.0)'}), '(new_period_x, 0, use_tensor, scale=100.0)\n', (11136, 11178), True, 'import actions.condition_ops as cond\n'), ((11218, 11272), 'actions.condition_ops.op_lt', 'cond.op_lt', (['new_period_x', '(120)', 'use_tensor'], {'scale': '(100.0)'}), '(new_period_x, 120, use_tensor, scale=100.0)\n', (11228, 11272), True, 'import actions.condition_ops as cond\n'), ((11364, 11419), 'actions.condition_ops.op_gt', 'cond.op_gt', (['new_credit_x', '(0)', 'use_tensor'], {'scale': '(100000.0)'}), '(new_credit_x, 0, use_tensor, scale=100000.0)\n', (11374, 11419), True, 'import actions.condition_ops as cond\n'), ((11459, 11519), 'actions.condition_ops.op_lt', 'cond.op_lt', (['new_credit_x', '(100000)', 'use_tensor'], {'scale': '(100000.0)'}), '(new_credit_x, 100000, use_tensor, scale=100000.0)\n', (11469, 11519), True, 'import actions.condition_ops as cond\n')] |
import csv
import glob
import os
from pathlib import Path
from shutil import rmtree, copy
import numpy as np
import math
import ipdb
from embeddings import get_embeddings
def distance_(embeddings0):
# Distance based on cosine similarity
cos_similarity = np.dot(embeddings, embeddings.T)
cos_similarity = cos_similarity.clip(min=0, max=1)
return cos_similarity[0][1]
f = open('comparison_score_imposter_v1.csv', 'w', encoding='UTF8', newline='')
csv_save = csv.writer(f)
header = ['subject1', 'subject2', 'score']
csv_save.writerow(header)
folder_name = r'C:\Users\admin\PycharmProjects\pythonProject2\data\new_aligned\MIPGAN1vs2'
folder_name_2 = r'C:\Users\admin\PycharmProjects\pythonProject2\data\new_aligned\subject1'
folder_name_3 = r'C:\Users\admin\PycharmProjects\pythonProject2\data\new_aligned\subject2'
if os.path.exists(os.path.join(os.getcwd(), 'temp')):
rmtree('temp')
os.mkdir('temp')
for index, file in enumerate(os.listdir(folder_name)):
source = file
# print(source)
after_split = source.replace('.jpg', '').split('-vs-')
print(after_split[0], after_split[1])
for file2 in os.listdir(folder_name_2):
source2 = file2
after_split_subject = source2.split('.JPG')[0]
if after_split_subject == after_split[0]:
subject_1 = source2
# make directory for each images
root = 'temp\/' + str(index)
os.mkdir(root)
f1 = 'temp\/' + str(index) + '\cat'
os.mkdir(f1)
img1_path = os.path.join(folder_name_2, subject_1)
print(img1_path)
img2_path = os.path.join(folder_name_3, after_split[1]+'.JPG')
print(img2_path)
copy(img1_path, f1)
copy(img2_path, f1)
input_size = [112, 112]
embeddings = get_embeddings(
data_root=root,
model_root="checkpoint/backbone_ir50_ms1m_epoch120.pth",
input_size=input_size,
)
# obj = DeepFace.verify(img1_path, img2_path, model_name='ArcFace', detector_backend='dlib', enforce_detection=False)
# score = obj['distance']
data = [after_split[0]+'.JPG', after_split[1]+'.JPG', distance_(embeddings)]
print(data)
csv_save.writerow(data) | [
"os.mkdir",
"csv.writer",
"shutil.rmtree",
"os.getcwd",
"embeddings.get_embeddings",
"numpy.dot",
"os.path.join",
"os.listdir",
"shutil.copy"
] | [((478, 491), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (488, 491), False, 'import csv\n'), ((909, 925), 'os.mkdir', 'os.mkdir', (['"""temp"""'], {}), "('temp')\n", (917, 925), False, 'import os\n'), ((263, 295), 'numpy.dot', 'np.dot', (['embeddings', 'embeddings.T'], {}), '(embeddings, embeddings.T)\n', (269, 295), True, 'import numpy as np\n'), ((894, 908), 'shutil.rmtree', 'rmtree', (['"""temp"""'], {}), "('temp')\n", (900, 908), False, 'from shutil import rmtree, copy\n'), ((955, 978), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (965, 978), False, 'import os\n'), ((1137, 1162), 'os.listdir', 'os.listdir', (['folder_name_2'], {}), '(folder_name_2)\n', (1147, 1162), False, 'import os\n'), ((867, 878), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (876, 878), False, 'import os\n'), ((1423, 1437), 'os.mkdir', 'os.mkdir', (['root'], {}), '(root)\n', (1431, 1437), False, 'import os\n'), ((1498, 1510), 'os.mkdir', 'os.mkdir', (['f1'], {}), '(f1)\n', (1506, 1510), False, 'import os\n'), ((1535, 1573), 'os.path.join', 'os.path.join', (['folder_name_2', 'subject_1'], {}), '(folder_name_2, subject_1)\n', (1547, 1573), False, 'import os\n'), ((1627, 1679), 'os.path.join', 'os.path.join', (['folder_name_3', "(after_split[1] + '.JPG')"], {}), "(folder_name_3, after_split[1] + '.JPG')\n", (1639, 1679), False, 'import os\n'), ((1719, 1738), 'shutil.copy', 'copy', (['img1_path', 'f1'], {}), '(img1_path, f1)\n', (1723, 1738), False, 'from shutil import rmtree, copy\n'), ((1751, 1770), 'shutil.copy', 'copy', (['img2_path', 'f1'], {}), '(img2_path, f1)\n', (1755, 1770), False, 'from shutil import rmtree, copy\n'), ((1833, 1948), 'embeddings.get_embeddings', 'get_embeddings', ([], {'data_root': 'root', 'model_root': '"""checkpoint/backbone_ir50_ms1m_epoch120.pth"""', 'input_size': 'input_size'}), "(data_root=root, model_root=\n 'checkpoint/backbone_ir50_ms1m_epoch120.pth', input_size=input_size)\n", (1847, 1948), False, 'from embeddings import get_embeddings\n')] |
import re
import cv2 as cv
import numpy as np
import requests
URL_REGEX = re.compile(r"http://|https://|ftp://")
def imread(uri, flags=1):
# flags(0: grayscale, 1: color)
if isinstance(uri, str):
if URL_REGEX.match(uri):
buffer = requests.get(uri).content
nparr = np.frombuffer(buffer, np.uint8)
return cv.imdecode(nparr, flags)
return cv.imread(uri, flags)
if isinstance(uri, bytes):
nparr = np.frombuffer(uri, np.uint8)
return cv.imdecode(nparr, flags)
raise Exception(f"{type(uri)} not supported")
def convert_color_factory(src, dst):
code = getattr(cv, f"COLOR_{src.upper()}2{dst.upper()}")
def convert_color(img):
out_img = cv.cvtColor(img, code)
return out_img
return convert_color
bgr2rgb = convert_color_factory("bgr", "rgb")
rgb2bgr = convert_color_factory("rgb", "bgr")
| [
"cv2.cvtColor",
"numpy.frombuffer",
"cv2.imdecode",
"cv2.imread",
"requests.get",
"re.compile"
] | [((76, 113), 're.compile', 're.compile', (['"""http://|https://|ftp://"""'], {}), "('http://|https://|ftp://')\n", (86, 113), False, 'import re\n'), ((400, 421), 'cv2.imread', 'cv.imread', (['uri', 'flags'], {}), '(uri, flags)\n', (409, 421), True, 'import cv2 as cv\n'), ((470, 498), 'numpy.frombuffer', 'np.frombuffer', (['uri', 'np.uint8'], {}), '(uri, np.uint8)\n', (483, 498), True, 'import numpy as np\n'), ((514, 539), 'cv2.imdecode', 'cv.imdecode', (['nparr', 'flags'], {}), '(nparr, flags)\n', (525, 539), True, 'import cv2 as cv\n'), ((738, 760), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'code'], {}), '(img, code)\n', (749, 760), True, 'import cv2 as cv\n'), ((308, 339), 'numpy.frombuffer', 'np.frombuffer', (['buffer', 'np.uint8'], {}), '(buffer, np.uint8)\n', (321, 339), True, 'import numpy as np\n'), ((359, 384), 'cv2.imdecode', 'cv.imdecode', (['nparr', 'flags'], {}), '(nparr, flags)\n', (370, 384), True, 'import cv2 as cv\n'), ((262, 279), 'requests.get', 'requests.get', (['uri'], {}), '(uri)\n', (274, 279), False, 'import requests\n')] |
import warnings
from pathlib import Path
from typing import Optional
import click
import joblib
import librosa
import numpy as np
from click.types import Choice
from click_option_group import RequiredAnyOptionGroup, optgroup
from matplotlib import pyplot as plt
from ertk.dataset import get_audio_paths, write_features
from ertk.utils import PathlibPath, TqdmParallel
def calculate_spectrogram(
path: Path,
channels: str = "mean",
pre_emphasis: float = 0.95,
skip: float = 0,
length: Optional[float] = None,
window_size: float = 0.025,
window_shift: float = 0.01,
n_mels: int = 240,
clip: Optional[float] = None,
fmin: float = 0,
fmax: float = 8000,
):
audio, sr = librosa.core.load(
path, sr=None, mono=False, offset=skip, duration=length
)
if len(audio.shape) == 1:
audio = np.expand_dims(audio, 0)
window_samples = int(window_size * sr)
stride_samples = int(window_shift * sr)
# Channel fusion
if channels == "left":
audio = audio[0]
elif channels == "right":
audio = audio[1]
elif channels == "mean":
audio = np.mean(audio[:2], axis=0)
elif channels == "diff":
audio = audio[0] - audio[1]
# Padding
if length is not None and length > 0:
length_samples = int(length * sr)
if len(audio) < length_samples:
audio = np.pad(audio, (0, length_samples - len(audio)))
assert len(audio) == length_samples
# Pre-emphasis
if pre_emphasis > 0:
audio = librosa.effects.preemphasis(audio, pre_emphasis)
# Mel spectrogram
warnings.simplefilter("ignore", UserWarning)
n_fft = 2 ** int(np.ceil(np.log2(window_samples)))
melspec = librosa.feature.melspectrogram(
audio,
n_mels=n_mels,
sr=sr,
n_fft=n_fft,
hop_length=stride_samples,
win_length=window_samples,
fmin=fmin,
fmax=fmax,
)
warnings.simplefilter("default", UserWarning)
db_spectrogram = librosa.power_to_db(melspec, ref=np.max, top_db=clip)
return db_spectrogram.T
@click.command()
@click.argument("corpus", type=str)
@click.argument("input", type=PathlibPath(exists=True, dir_okay=False))
@optgroup.group("Output format", cls=RequiredAnyOptionGroup)
@optgroup.option("--output", type=Path, help="Write dataset")
@optgroup.option("--preview", type=int, help="Preview spectrogram")
@optgroup.group("Spectrogram options")
@optgroup.option("--length", type=float, help="Optional max clip length")
@optgroup.option(
"--skip", type=float, default=0, help="Optional amount to skip, in seconds"
)
@optgroup.option("--clip", type=float, help="Optional minimum power in dB")
@optgroup.option(
"--window_size",
type=float,
default=0.025,
show_default=True,
help="Window size in seconds",
)
@optgroup.option(
"--window_shift",
type=float,
default=0.010,
show_default=True,
help="Window shift in seconds",
)
@optgroup.option(
"--mel_bands", type=int, default=240, show_default=True, help="Number of mel bands"
)
@optgroup.option(
"--pre_emphasis",
type=float,
default=0.95,
show_default=True,
help="Pre-emphasis applied before processing",
)
@optgroup.option(
"--channels",
type=Choice(["left", "right", "mean", "diff"]),
default="mean",
show_default=True,
)
@optgroup.option(
"--fmin", type=float, default=0, show_default=True, help="Min mel frequency"
)
@optgroup.option(
"--fmax", type=float, default=8000, show_default=True, help="Max mel frequency"
)
def main(
corpus: str,
input: Path,
output: Optional[Path],
preview: Optional[int],
length: Optional[float],
skip: float,
clip: Optional[float],
window_size: float,
window_shift: float,
mel_bands: int,
pre_emphasis: float,
channels: str,
fmin: float,
fmax: float,
):
"""Extracts spectrograms from audio files listed in INPUT file and
creates a netCFD4 dataset holding the data. CORPUS specifies the
corpus.
"""
paths = get_audio_paths(input)
if preview is not None:
idx = preview if preview > -1 else np.random.default_rng().integers(len(paths))
spectrogram = calculate_spectrogram(
paths[idx],
channels=channels,
skip=skip,
length=length,
window_size=window_size,
pre_emphasis=pre_emphasis,
window_shift=window_shift,
n_mels=mel_bands,
clip=clip,
fmin=fmin,
fmax=fmax,
)
plt.figure()
plt.title(f"Spectrogram for {paths[idx]}.")
plt.imshow(spectrogram)
plt.show()
return
if not output:
raise ValueError("Must specify either --preview or --output options.")
specs = TqdmParallel(
total=len(paths), desc="Generating spectrograms", n_jobs=-1, verbose=1
)(
joblib.delayed(calculate_spectrogram)(
path,
channels=channels,
skip=skip,
length=length,
window_size=window_size,
pre_emphasis=pre_emphasis,
window_shift=window_shift,
n_mels=mel_bands,
clip=clip,
fmin=fmin,
fmax=fmax,
)
for path in paths
)
filenames = [x.stem for x in paths]
if output is not None:
slices = [len(x) for x in specs]
specs = np.concatenate(specs)
feature_names = [f"meldB{i + 1}" for i in range(mel_bands)]
write_features(
output,
corpus=corpus,
names=filenames,
slices=slices,
features=specs,
feature_names=feature_names,
)
print(f"Wrote dataset to {output}")
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"numpy.random.default_rng",
"librosa.core.load",
"matplotlib.pyplot.figure",
"librosa.power_to_db",
"numpy.mean",
"ertk.dataset.get_audio_paths",
"librosa.feature.melspectrogram",
"ertk.utils.PathlibPath",
"warnings.simplefilter",
"matplotlib.pyplot.imshow",
"click.c... | [((2110, 2125), 'click.command', 'click.command', ([], {}), '()\n', (2123, 2125), False, 'import click\n'), ((2127, 2161), 'click.argument', 'click.argument', (['"""corpus"""'], {'type': 'str'}), "('corpus', type=str)\n", (2141, 2161), False, 'import click\n'), ((2235, 2294), 'click_option_group.optgroup.group', 'optgroup.group', (['"""Output format"""'], {'cls': 'RequiredAnyOptionGroup'}), "('Output format', cls=RequiredAnyOptionGroup)\n", (2249, 2294), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2296, 2356), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--output"""'], {'type': 'Path', 'help': '"""Write dataset"""'}), "('--output', type=Path, help='Write dataset')\n", (2311, 2356), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2358, 2424), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--preview"""'], {'type': 'int', 'help': '"""Preview spectrogram"""'}), "('--preview', type=int, help='Preview spectrogram')\n", (2373, 2424), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2426, 2463), 'click_option_group.optgroup.group', 'optgroup.group', (['"""Spectrogram options"""'], {}), "('Spectrogram options')\n", (2440, 2463), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2465, 2537), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--length"""'], {'type': 'float', 'help': '"""Optional max clip length"""'}), "('--length', type=float, help='Optional max clip length')\n", (2480, 2537), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2539, 2636), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--skip"""'], {'type': 'float', 'default': '(0)', 'help': '"""Optional amount to skip, in seconds"""'}), "('--skip', type=float, default=0, help=\n 'Optional amount to skip, in seconds')\n", (2554, 2636), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2639, 2713), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--clip"""'], {'type': 'float', 'help': '"""Optional minimum power in dB"""'}), "('--clip', type=float, help='Optional minimum power in dB')\n", (2654, 2713), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2715, 2829), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--window_size"""'], {'type': 'float', 'default': '(0.025)', 'show_default': '(True)', 'help': '"""Window size in seconds"""'}), "('--window_size', type=float, default=0.025, show_default=\n True, help='Window size in seconds')\n", (2730, 2829), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2849, 2964), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--window_shift"""'], {'type': 'float', 'default': '(0.01)', 'show_default': '(True)', 'help': '"""Window shift in seconds"""'}), "('--window_shift', type=float, default=0.01, show_default=\n True, help='Window shift in seconds')\n", (2864, 2964), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((2985, 3089), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--mel_bands"""'], {'type': 'int', 'default': '(240)', 'show_default': '(True)', 'help': '"""Number of mel bands"""'}), "('--mel_bands', type=int, default=240, show_default=True,\n help='Number of mel bands')\n", (3000, 3089), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((3093, 3223), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--pre_emphasis"""'], {'type': 'float', 'default': '(0.95)', 'show_default': '(True)', 'help': '"""Pre-emphasis applied before processing"""'}), "('--pre_emphasis', type=float, default=0.95, show_default=\n True, help='Pre-emphasis applied before processing')\n", (3108, 3223), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((3376, 3474), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--fmin"""'], {'type': 'float', 'default': '(0)', 'show_default': '(True)', 'help': '"""Min mel frequency"""'}), "('--fmin', type=float, default=0, show_default=True, help=\n 'Min mel frequency')\n", (3391, 3474), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((3477, 3578), 'click_option_group.optgroup.option', 'optgroup.option', (['"""--fmax"""'], {'type': 'float', 'default': '(8000)', 'show_default': '(True)', 'help': '"""Max mel frequency"""'}), "('--fmax', type=float, default=8000, show_default=True, help\n ='Max mel frequency')\n", (3492, 3578), False, 'from click_option_group import RequiredAnyOptionGroup, optgroup\n'), ((717, 791), 'librosa.core.load', 'librosa.core.load', (['path'], {'sr': 'None', 'mono': '(False)', 'offset': 'skip', 'duration': 'length'}), '(path, sr=None, mono=False, offset=skip, duration=length)\n', (734, 791), False, 'import librosa\n'), ((1619, 1663), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (1640, 1663), False, 'import warnings\n'), ((1733, 1885), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['audio'], {'n_mels': 'n_mels', 'sr': 'sr', 'n_fft': 'n_fft', 'hop_length': 'stride_samples', 'win_length': 'window_samples', 'fmin': 'fmin', 'fmax': 'fmax'}), '(audio, n_mels=n_mels, sr=sr, n_fft=n_fft,\n hop_length=stride_samples, win_length=window_samples, fmin=fmin, fmax=fmax)\n', (1763, 1885), False, 'import librosa\n'), ((1957, 2002), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""', 'UserWarning'], {}), "('default', UserWarning)\n", (1978, 2002), False, 'import warnings\n'), ((2024, 2077), 'librosa.power_to_db', 'librosa.power_to_db', (['melspec'], {'ref': 'np.max', 'top_db': 'clip'}), '(melspec, ref=np.max, top_db=clip)\n', (2043, 2077), False, 'import librosa\n'), ((4076, 4098), 'ertk.dataset.get_audio_paths', 'get_audio_paths', (['input'], {}), '(input)\n', (4091, 4098), False, 'from ertk.dataset import get_audio_paths, write_features\n'), ((852, 876), 'numpy.expand_dims', 'np.expand_dims', (['audio', '(0)'], {}), '(audio, 0)\n', (866, 876), True, 'import numpy as np\n'), ((1543, 1591), 'librosa.effects.preemphasis', 'librosa.effects.preemphasis', (['audio', 'pre_emphasis'], {}), '(audio, pre_emphasis)\n', (1570, 1591), False, 'import librosa\n'), ((4598, 4610), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4608, 4610), True, 'from matplotlib import pyplot as plt\n'), ((4619, 4662), 'matplotlib.pyplot.title', 'plt.title', (['f"""Spectrogram for {paths[idx]}."""'], {}), "(f'Spectrogram for {paths[idx]}.')\n", (4628, 4662), True, 'from matplotlib import pyplot as plt\n'), ((4671, 4694), 'matplotlib.pyplot.imshow', 'plt.imshow', (['spectrogram'], {}), '(spectrogram)\n', (4681, 4694), True, 'from matplotlib import pyplot as plt\n'), ((4703, 4713), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4711, 4713), True, 'from matplotlib import pyplot as plt\n'), ((5468, 5489), 'numpy.concatenate', 'np.concatenate', (['specs'], {}), '(specs)\n', (5482, 5489), True, 'import numpy as np\n'), ((5566, 5684), 'ertk.dataset.write_features', 'write_features', (['output'], {'corpus': 'corpus', 'names': 'filenames', 'slices': 'slices', 'features': 'specs', 'feature_names': 'feature_names'}), '(output, corpus=corpus, names=filenames, slices=slices,\n features=specs, feature_names=feature_names)\n', (5580, 5684), False, 'from ertk.dataset import get_audio_paths, write_features\n'), ((2192, 2232), 'ertk.utils.PathlibPath', 'PathlibPath', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (2203, 2232), False, 'from ertk.utils import PathlibPath, TqdmParallel\n'), ((3287, 3328), 'click.types.Choice', 'Choice', (["['left', 'right', 'mean', 'diff']"], {}), "(['left', 'right', 'mean', 'diff'])\n", (3293, 3328), False, 'from click.types import Choice\n'), ((1139, 1165), 'numpy.mean', 'np.mean', (['audio[:2]'], {'axis': '(0)'}), '(audio[:2], axis=0)\n', (1146, 1165), True, 'import numpy as np\n'), ((1693, 1716), 'numpy.log2', 'np.log2', (['window_samples'], {}), '(window_samples)\n', (1700, 1716), True, 'import numpy as np\n'), ((4949, 4986), 'joblib.delayed', 'joblib.delayed', (['calculate_spectrogram'], {}), '(calculate_spectrogram)\n', (4963, 4986), False, 'import joblib\n'), ((4171, 4194), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (4192, 4194), True, 'import numpy as np\n')] |
# Built-in libaries
import argparse
import os
import logging
# External libraries
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pickle
#import rasterio
#import xarray as xr
# Local libraries
from oggm import cfg
from oggm.utils import entity_task
#from oggm.core.gis import rasterio_to_gdir
#from oggm.utils import ncDataset
import pygem.pygem_input as pygem_prms
import pygem.pygem_modelsetup as modelsetup
"""
TO-DO LIST:
- modify class_mbdata to work with shop
"""
# Module logger
log = logging.getLogger(__name__)
# Add the new name "mb_obs" to the list of things that the GlacierDirectory understands
if not 'mb_obs' in cfg.BASENAMES:
cfg.BASENAMES['mb_obs'] = ('mb_data.pkl', 'Mass balance observations')
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
hugonnnet2020_subset : int
Switch for processing hugonnet2020 data set into easier csv format (default = 0 (no))
"""
parser = argparse.ArgumentParser(description="select pre-processing options")
parser.add_argument('-hugonnet2020_subset', action='store', type=int, default=0,
help='option to process hugonnet2020 data or not (1=yes, 0=no)')
return parser
@entity_task(log, writes=['mb_obs'])
def mb_df_to_gdir(gdir, mb_dataset='Hugonnet2020'):
"""Select specific mass balance and add observations to the given glacier directory
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
where to write the data
"""
if mb_dataset in ['Hugonnet2020']:
mbdata_fp = pygem_prms.hugonnet_fp
mbdata_fn = pygem_prms.hugonnet_fn
rgiid_cn = pygem_prms.hugonnet_rgi_glacno_cn
mb_cn = pygem_prms.hugonnet_mb_cn
mberr_cn = pygem_prms.hugonnet_mb_err_cn
t1_cn = pygem_prms.hugonnet_time1_cn
t2_cn = pygem_prms.hugonnet_time2_cn
assert os.path.exists(mbdata_fp + mbdata_fn), "Error: mb dataset does not exist."
mb_df = pd.read_csv(mbdata_fp + mbdata_fn)
mb_df_rgiids = list(mb_df[rgiid_cn])
if gdir.rgi_id in mb_df_rgiids:
# RGIId index
rgiid_idx = np.where(gdir.rgi_id == mb_df[rgiid_cn])[0][0]
# Glacier-wide mass balance
mb_mwea = mb_df.loc[rgiid_idx, mb_cn]
mb_mwea_err = mb_df.loc[rgiid_idx, mberr_cn]
t1_str = mb_df.loc[rgiid_idx, t1_cn]
t2_str = mb_df.loc[rgiid_idx, t2_cn]
t1_datetime = pd.to_datetime(t1_str)
t2_datetime = pd.to_datetime(t2_str)
# t1_datetime = pd.to_datetime(pd.DataFrame({'year':[t1_str.split('-')[0]],
# 'month':[t1_str.split('-')[1]],
# 'day':[t1_str.split('-')[2]]}))[0]
# t2_datetime = pd.to_datetime(pd.DataFrame({'year':[t2_str.split('-')[0]],
# 'month':[t2_str.split('-')[1]],
# 'day':[t2_str.split('-')[2]]}))[0]
# remove one day from t2 datetime for proper indexing (ex. 2001-01-01 want to run through 2000-12-31)
t2_datetime = t2_datetime - timedelta(days=1)
# Number of years
nyears = (t2_datetime + timedelta(days=1) - t1_datetime).days / 365.25
# Record data
mbdata = {'mb_mwea': mb_mwea,
'mb_mwea_err': mb_mwea_err,
't1_str': t1_str,
't2_str': t2_str,
't1_datetime': t1_datetime,
't2_datetime': t2_datetime,
'nyears': nyears}
pkl_fn = gdir.get_filepath('mb_obs')
with open(pkl_fn, 'wb') as f:
pickle.dump(mbdata, f)
@entity_task(log, writes=['mb_obs'])
def mb_bins_to_glacierwide(gdir, mb_binned_fp=pygem_prms.mb_binned_fp):
"""Convert binned mass balance data to glacier-wide and add observations to the given glacier directory
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
where to write the data
"""
assert os.path.exists(mb_binned_fp), "Error: mb_binned_fp does not exist."
glac_str_nolead = str(int(gdir.rgi_region)) + '.' + gdir.rgi_id.split('-')[1].split('.')[1]
# If binned mb data exists, then write to glacier directory
if os.path.exists(mb_binned_fp + gdir.rgi_region + '/' + glac_str_nolead + '_mb_bins.csv'):
mb_binned_fn = mb_binned_fp + gdir.rgi_region + '/' + glac_str_nolead + '_mb_bins.csv'
else:
mb_binned_fn = None
if mb_binned_fn is not None:
mbdata_fn = gdir.get_filepath('mb_obs')
# Glacier-wide mass balance
mb_binned_df = pd.read_csv(mb_binned_fn)
area_km2_valid = mb_binned_df['z1_bin_area_valid_km2'].sum()
mb_mwea = (mb_binned_df['z1_bin_area_valid_km2'] * mb_binned_df['mb_bin_mean_mwea']).sum() / area_km2_valid
mb_mwea_err = 0.3
t1 = 2000
t2 = 2018
# Record data
mbdata = {'mb_mwea': mb_mwea,
'mb_mwea_err': mb_mwea_err,
't1': t1,
't2': t2,
'area_km2_valid': area_km2_valid}
with open(mbdata_fn, 'wb') as f:
pickle.dump(mbdata, f)
#%%
def mb_bins_to_reg_glacierwide(mb_binned_fp=pygem_prms.mb_binned_fp, O1Regions=['01']):
# Delete these import
mb_binned_fp=pygem_prms.mb_binned_fp
O1Regions=['19']
print('\n\n SPECIFYING UNCERTAINTY AS 0.3 mwea for model development - needs to be updated from mb providers!\n\n')
reg_mb_mwea_err = 0.3
mb_yrfrac_dict = {'01': [2000.419, 2018.419],
'02': [2000.128, 2012],
'03': [2000.419, 2018.419],
'04': [2000.419, 2018.419],
'05': [2000.419, 2018.419],
'06': [2000.419, 2018.419],
'07': [2000.419, 2018.419],
'08': [2000.419, 2018.419],
'09': [2000.419, 2018.419],
'10': [2000.128, 2012],
'11': [2000.128, 2013],
'12': [2000.128, 2012],
'HMA': [2000.419, 2018.419],
'16': [2000.128, 2013.128],
'17': [2000.128, 2013.128],
'18': [2000.128, 2013]}
for reg in O1Regions:
reg_fp = mb_binned_fp + reg + '/'
main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=[reg], rgi_regionsO2='all', rgi_glac_number='all')
reg_binned_fns = []
for i in os.listdir(reg_fp):
if i.endswith('_mb_bins.csv'):
reg_binned_fns.append(i)
reg_binned_fns = sorted(reg_binned_fns)
print('Region ' + reg + ' has binned data for ' + str(len(reg_binned_fns)) + ' glaciers.')
reg_mb_df_cns = ['RGIId', 'O1Region', 'O2Region', 'area_km2', 'mb_mwea', 'mb_mwea_err', 't1', 't2', 'perc_valid']
reg_mb_df = pd.DataFrame(np.zeros((main_glac_rgi.shape[0], len(reg_mb_df_cns))), columns=reg_mb_df_cns)
reg_mb_df.loc[:,:] = np.nan
reg_mb_df.loc[:, 'RGIId'] = main_glac_rgi['RGIId']
reg_mb_df.loc[:, 'O1Region'] = main_glac_rgi['O1Region']
reg_mb_df.loc[:, 'O2Region'] = main_glac_rgi['O2Region']
reg_mb_df.loc[:, 'area_km2'] = main_glac_rgi['Area']
# Process binned files
for nfn, reg_binned_fn in enumerate(reg_binned_fns):
if nfn%500 == 0:
print(' ', nfn, reg_binned_fn)
mb_binned_df = pd.read_csv(reg_fp + reg_binned_fn)
glac_str = reg_binned_fn.split('_')[0]
glac_rgiid = 'RGI60-' + glac_str.split('.')[0].zfill(2) + '.' + glac_str.split('.')[1]
rgi_idx = np.where(main_glac_rgi['RGIId'] == glac_rgiid)[0][0]
area_km2_valid = mb_binned_df['z1_bin_area_valid_km2'].sum()
mb_mwea = (mb_binned_df['z1_bin_area_valid_km2'] * mb_binned_df['mb_bin_mean_mwea']).sum() / area_km2_valid
mb_mwea_err = reg_mb_mwea_err
t1 = mb_yrfrac_dict[reg][0]
t2 = mb_yrfrac_dict[reg][1]
perc_valid = area_km2_valid / reg_mb_df.loc[rgi_idx,'area_km2'] * 100
reg_mb_df.loc[rgi_idx,'mb_mwea'] = mb_mwea
reg_mb_df.loc[rgi_idx,'mb_mwea_err'] = mb_mwea_err
reg_mb_df.loc[rgi_idx,'t1'] = t1
reg_mb_df.loc[rgi_idx,'t2'] = t2
reg_mb_df.loc[rgi_idx,'perc_valid'] = perc_valid
#%%
# Quality control
O2Regions = list(set(list(main_glac_rgi['O2Region'].values)))
O2Regions_mb_mwea_dict = {}
rgiid_outliers = []
for O2Region in O2Regions:
reg_mb_df_subset = reg_mb_df[reg_mb_df['O2Region'] == O2Region]
reg_mb_df_subset = reg_mb_df_subset.dropna(subset=['mb_mwea'])
# Use 1.5*IQR to remove outliers
reg_mb_mwea_25 = np.percentile(reg_mb_df_subset['mb_mwea'], 25)
reg_mb_mwea_50 = np.percentile(reg_mb_df_subset['mb_mwea'], 50)
reg_mb_mwea_75 = np.percentile(reg_mb_df_subset['mb_mwea'], 75)
reg_mb_mwea_iqr = reg_mb_mwea_75 - reg_mb_mwea_25
print(np.round(reg_mb_mwea_25,2), np.round(reg_mb_mwea_50,2), np.round(reg_mb_mwea_75,2),
np.round(reg_mb_mwea_iqr,2))
reg_mb_mwea_bndlow = reg_mb_mwea_25 - 1.5 * reg_mb_mwea_iqr
reg_mb_mwea_bndhigh = reg_mb_mwea_75 + 1.5 * reg_mb_mwea_iqr
# Record RGIIds that are outliers
rgiid_outliers.extend(reg_mb_df_subset[(reg_mb_df_subset['mb_mwea'] < reg_mb_mwea_bndlow) |
(reg_mb_df_subset['mb_mwea'] > reg_mb_mwea_bndhigh)]['RGIId'].values)
# Select non-outliers and record mean
reg_mb_df_subset_qc = reg_mb_df_subset[(reg_mb_df_subset['mb_mwea'] >= reg_mb_mwea_bndlow) &
(reg_mb_df_subset['mb_mwea'] <= reg_mb_mwea_bndhigh)]
reg_mb_mwea_qc_mean = reg_mb_df_subset_qc['mb_mwea'].mean()
O2Regions_mb_mwea_dict[O2Region] = reg_mb_mwea_qc_mean
#%%
print('CREATE DICTIONARY FOR RGIIDs with nan values or those that are outliers')
# print(A['mb_mwea'].mean(), A['mb_mwea'].std(), A['mb_mwea'].min(), A['mb_mwea'].max())
# print(reg_mb_mwea, reg_mb_mwea_std)
#%%
reg_mb_fn = reg + '_mb_glacwide_all.csv'
reg_mb_df.to_csv(mb_binned_fp + reg_mb_fn, index=False)
print('TO-DO LIST:')
print(' - quality control based on 3-sigma filter like Shean')
print(' - extrapolate for missing or outlier glaciers by region')
#%%
if __name__ == '__main__':
parser = getparser()
args = parser.parse_args()
if args.hugonnet2020_subset == 1:
mbdata_fullfn = pygem_prms.hugonnet_fp + 'df_pergla_global_10yr_20yr.csv'
mb_df = pd.read_csv(mbdata_fullfn)
# Pre-process Hugonnet2020 data to easier format of data we want
df_20yr = mb_df[mb_df['period'] == '2000-01-01_2020-01-01'].copy()
df_20yr['t1'] = np.nan
df_20yr['t2'] = np.nan
df_20yr['t1'] = [x.split('_')[0] for x in df_20yr['period'].values]
df_20yr['t2'] = [x.split('_')[1] for x in df_20yr['period'].values]
# Export results
df_20yr_fn = 'df_pergla_global_20yr.csv'
df_20yr.to_csv(pygem_prms.hugonnet_fp + df_20yr_fn, index=False) | [
"pickle.dump",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.exists",
"pygem.pygem_modelsetup.selectglaciersrgitable",
"numpy.percentile",
"numpy.where",
"pandas.to_datetime",
"datetime.timedelta",
"oggm.utils.entity_task",
"numpy.round",
"os.listdir",
"logging.getLogger"
] | [((535, 562), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (552, 562), False, 'import logging\n'), ((1309, 1344), 'oggm.utils.entity_task', 'entity_task', (['log'], {'writes': "['mb_obs']"}), "(log, writes=['mb_obs'])\n", (1320, 1344), False, 'from oggm.utils import entity_task\n'), ((3837, 3872), 'oggm.utils.entity_task', 'entity_task', (['log'], {'writes': "['mb_obs']"}), "(log, writes=['mb_obs'])\n", (3848, 3872), False, 'from oggm.utils import entity_task\n'), ((1033, 1101), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""select pre-processing options"""'}), "(description='select pre-processing options')\n", (1056, 1101), False, 'import argparse\n'), ((1980, 2017), 'os.path.exists', 'os.path.exists', (['(mbdata_fp + mbdata_fn)'], {}), '(mbdata_fp + mbdata_fn)\n', (1994, 2017), False, 'import os\n'), ((2072, 2106), 'pandas.read_csv', 'pd.read_csv', (['(mbdata_fp + mbdata_fn)'], {}), '(mbdata_fp + mbdata_fn)\n', (2083, 2106), True, 'import pandas as pd\n'), ((4189, 4217), 'os.path.exists', 'os.path.exists', (['mb_binned_fp'], {}), '(mb_binned_fp)\n', (4203, 4217), False, 'import os\n'), ((4434, 4525), 'os.path.exists', 'os.path.exists', (["(mb_binned_fp + gdir.rgi_region + '/' + glac_str_nolead + '_mb_bins.csv')"], {}), "(mb_binned_fp + gdir.rgi_region + '/' + glac_str_nolead +\n '_mb_bins.csv')\n", (4448, 4525), False, 'import os\n'), ((2550, 2572), 'pandas.to_datetime', 'pd.to_datetime', (['t1_str'], {}), '(t1_str)\n', (2564, 2572), True, 'import pandas as pd\n'), ((2595, 2617), 'pandas.to_datetime', 'pd.to_datetime', (['t2_str'], {}), '(t2_str)\n', (2609, 2617), True, 'import pandas as pd\n'), ((4815, 4840), 'pandas.read_csv', 'pd.read_csv', (['mb_binned_fn'], {}), '(mb_binned_fn)\n', (4826, 4840), True, 'import pandas as pd\n'), ((6620, 6722), 'pygem.pygem_modelsetup.selectglaciersrgitable', 'modelsetup.selectglaciersrgitable', ([], {'rgi_regionsO1': '[reg]', 'rgi_regionsO2': '"""all"""', 'rgi_glac_number': '"""all"""'}), "(rgi_regionsO1=[reg], rgi_regionsO2='all',\n rgi_glac_number='all')\n", (6653, 6722), True, 'import pygem.pygem_modelsetup as modelsetup\n'), ((6773, 6791), 'os.listdir', 'os.listdir', (['reg_fp'], {}), '(reg_fp)\n', (6783, 6791), False, 'import os\n'), ((11305, 11331), 'pandas.read_csv', 'pd.read_csv', (['mbdata_fullfn'], {}), '(mbdata_fullfn)\n', (11316, 11331), True, 'import pandas as pd\n'), ((3277, 3294), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3286, 3294), False, 'from datetime import datetime, timedelta\n'), ((3811, 3833), 'pickle.dump', 'pickle.dump', (['mbdata', 'f'], {}), '(mbdata, f)\n', (3822, 3833), False, 'import pickle\n'), ((5364, 5386), 'pickle.dump', 'pickle.dump', (['mbdata', 'f'], {}), '(mbdata, f)\n', (5375, 5386), False, 'import pickle\n'), ((7782, 7817), 'pandas.read_csv', 'pd.read_csv', (['(reg_fp + reg_binned_fn)'], {}), '(reg_fp + reg_binned_fn)\n', (7793, 7817), True, 'import pandas as pd\n'), ((9180, 9226), 'numpy.percentile', 'np.percentile', (["reg_mb_df_subset['mb_mwea']", '(25)'], {}), "(reg_mb_df_subset['mb_mwea'], 25)\n", (9193, 9226), True, 'import numpy as np\n'), ((9256, 9302), 'numpy.percentile', 'np.percentile', (["reg_mb_df_subset['mb_mwea']", '(50)'], {}), "(reg_mb_df_subset['mb_mwea'], 50)\n", (9269, 9302), True, 'import numpy as np\n'), ((9332, 9378), 'numpy.percentile', 'np.percentile', (["reg_mb_df_subset['mb_mwea']", '(75)'], {}), "(reg_mb_df_subset['mb_mwea'], 75)\n", (9345, 9378), True, 'import numpy as np\n'), ((2227, 2267), 'numpy.where', 'np.where', (['(gdir.rgi_id == mb_df[rgiid_cn])'], {}), '(gdir.rgi_id == mb_df[rgiid_cn])\n', (2235, 2267), True, 'import numpy as np\n'), ((9472, 9499), 'numpy.round', 'np.round', (['reg_mb_mwea_25', '(2)'], {}), '(reg_mb_mwea_25, 2)\n', (9480, 9499), True, 'import numpy as np\n'), ((9500, 9527), 'numpy.round', 'np.round', (['reg_mb_mwea_50', '(2)'], {}), '(reg_mb_mwea_50, 2)\n', (9508, 9527), True, 'import numpy as np\n'), ((9528, 9555), 'numpy.round', 'np.round', (['reg_mb_mwea_75', '(2)'], {}), '(reg_mb_mwea_75, 2)\n', (9536, 9555), True, 'import numpy as np\n'), ((9575, 9603), 'numpy.round', 'np.round', (['reg_mb_mwea_iqr', '(2)'], {}), '(reg_mb_mwea_iqr, 2)\n', (9583, 9603), True, 'import numpy as np\n'), ((7990, 8036), 'numpy.where', 'np.where', (["(main_glac_rgi['RGIId'] == glac_rgiid)"], {}), "(main_glac_rgi['RGIId'] == glac_rgiid)\n", (7998, 8036), True, 'import numpy as np\n'), ((3353, 3370), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3362, 3370), False, 'from datetime import datetime, timedelta\n')] |
import unittest
import numpy as np
from fastestimator.op.numpyop.univariate import Reshape
from fastestimator.test.unittest_util import is_equal
class TestReshape(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.single_input = [np.array([1, 2, 3, 4])]
cls.single_output = [np.array([[1, 2], [3, 4]])]
cls.multi_input = [np.array([2, 2]), np.array([1, 2])]
cls.multi_output = [np.array([[2, 2]]), np.array([[1, 2]])]
def test_single_input(self):
op = Reshape(inputs='x', outputs='x', shape=(2, 2))
data = op.forward(data=self.single_input, state={})
self.assertTrue(is_equal(data, self.single_output))
def test_multi_input(self):
op = Reshape(inputs='x', outputs='x', shape=(1, 2))
data = op.forward(data=self.multi_input, state={})
self.assertTrue(is_equal(data, self.multi_output))
| [
"fastestimator.test.unittest_util.is_equal",
"fastestimator.op.numpyop.univariate.Reshape",
"numpy.array"
] | [((516, 562), 'fastestimator.op.numpyop.univariate.Reshape', 'Reshape', ([], {'inputs': '"""x"""', 'outputs': '"""x"""', 'shape': '(2, 2)'}), "(inputs='x', outputs='x', shape=(2, 2))\n", (523, 562), False, 'from fastestimator.op.numpyop.univariate import Reshape\n'), ((729, 775), 'fastestimator.op.numpyop.univariate.Reshape', 'Reshape', ([], {'inputs': '"""x"""', 'outputs': '"""x"""', 'shape': '(1, 2)'}), "(inputs='x', outputs='x', shape=(1, 2))\n", (736, 775), False, 'from fastestimator.op.numpyop.univariate import Reshape\n'), ((257, 279), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (265, 279), True, 'import numpy as np\n'), ((310, 336), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (318, 336), True, 'import numpy as np\n'), ((365, 381), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (373, 381), True, 'import numpy as np\n'), ((383, 399), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (391, 399), True, 'import numpy as np\n'), ((429, 447), 'numpy.array', 'np.array', (['[[2, 2]]'], {}), '([[2, 2]])\n', (437, 447), True, 'import numpy as np\n'), ((449, 467), 'numpy.array', 'np.array', (['[[1, 2]]'], {}), '([[1, 2]])\n', (457, 467), True, 'import numpy as np\n'), ((647, 681), 'fastestimator.test.unittest_util.is_equal', 'is_equal', (['data', 'self.single_output'], {}), '(data, self.single_output)\n', (655, 681), False, 'from fastestimator.test.unittest_util import is_equal\n'), ((859, 892), 'fastestimator.test.unittest_util.is_equal', 'is_equal', (['data', 'self.multi_output'], {}), '(data, self.multi_output)\n', (867, 892), False, 'from fastestimator.test.unittest_util import is_equal\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common; # DNNC path setup
import deepC.dnnc as dc
import numpy as np
import unittest, random, math
def temp_softsign(x):
return (x / (1 + np.abs(x)));
def temp_erf(x):
y = np.vectorize(math.erf)(x).astype(np.float32)
return y
class nnScalarOperatorsTest(unittest.TestCase):
def setUp(self):
self.random_number1 = random.randrange(20, 50, 3)
self.random_number2 = random.randrange(200, 500, 1)
self.random_number3 = random.randrange(10, 500, 2)
# self.np_a = np.array(self.random_number1).astype(np.float32)
# self.np_b = np.array(self.random_number2).astype(np.float32)
# self.dc_a = dc.array([self.random_number1])
# self.dc_b = dc.array([self.random_number2])
self.np_a = self.random_number1
self.np_b = self.random_number2
self.dc_a = self.random_number1
self.dc_b = self.random_number2
def test_nnScalar_asin (self):
np.testing.assert_allclose(np.arcsin(1), dc.asin(1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsin(0), dc.asin(0), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsin(-1), dc.asin(-1), rtol=1e-3, atol=1e-3)
def test_nnScalar_acos (self):
np.testing.assert_allclose(np.arccos(1), dc.acos(1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccos(0), dc.acos(0), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccos(-1), dc.acos(-1), rtol=1e-3, atol=1e-3)
def test_nnScalar_atan (self):
np.testing.assert_allclose(np.arctan(self.random_number1), dc.atan(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arctan(self.random_number2), dc.atan(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arctan(self.random_number3), dc.atan(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_asinh (self):
np.testing.assert_allclose(np.arcsinh(self.random_number1), dc.asinh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsinh(self.random_number2), dc.asinh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsinh(self.random_number3), dc.asinh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_acosh (self):
np.testing.assert_allclose(np.arccosh(self.random_number1), dc.acosh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccosh(self.random_number2), dc.acosh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccosh(self.random_number3), dc.acosh(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_atanh (self):
# np.testing.assert_allclose(np.arctanh(self.random_number1), dc.atanh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.arctanh(self.random_number2), dc.atanh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.arctanh(self.random_number3), dc.atanh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_sin (self):
np.testing.assert_allclose(np.sin(self.random_number1), dc.sin(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sin(self.random_number2), dc.sin(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sin(self.random_number3), dc.sin(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_cos (self):
np.testing.assert_allclose(np.cos(self.random_number1), dc.cos(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.cos(self.random_number2), dc.cos(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.cos(self.random_number3), dc.cos(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_tan (self):
np.testing.assert_allclose(np.tan(self.random_number1), dc.tan(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tan(self.random_number2), dc.tan(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tan(self.random_number3), dc.tan(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_sinh (self):
# np.testing.assert_allclose(np.sinh(self.random_number1), dc.sinh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.sinh(self.random_number2), dc.sinh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.sinh(self.random_number3), dc.sinh(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_cosh (self):
# np.testing.assert_allclose(np.cosh(self.random_number1), dc.cosh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.cosh(self.random_number2), dc.cosh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.cosh(self.random_number3), dc.cosh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_tanh (self):
np.testing.assert_allclose(np.tanh(self.random_number1), dc.tanh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tanh(self.random_number2), dc.tanh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tanh(self.random_number3), dc.tanh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_erf (self):
np.testing.assert_allclose(temp_erf(self.random_number1), dc.erf(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_erf(self.random_number2), dc.erf(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_erf(self.random_number3), dc.erf(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_exp (self):
# np.testing.assert_allclose(np.exp(self.random_number1), dc.exp(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.exp(self.random_number2), dc.exp(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.exp(self.random_number3), dc.exp(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_log (self):
np.testing.assert_allclose(np.log(self.random_number1), dc.log(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.log(self.random_number2), dc.log(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.log(self.random_number3), dc.log(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_logical_not (self):
np.testing.assert_allclose(np.logical_not(self.random_number1), dc.logical_not(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.logical_not(self.random_number2), dc.logical_not(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.logical_not(self.random_number3), dc.logical_not(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_sign (self):
np.testing.assert_allclose(np.sign(self.random_number1), dc.sign(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sign(self.random_number2), dc.sign(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sign(self.random_number3), dc.sign(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_softsign (self):
np.testing.assert_allclose(temp_softsign(self.random_number1), dc.softsign(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_softsign(self.random_number2), dc.softsign(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_softsign(self.random_number3), dc.softsign(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_max (self):
npr = np.maximum(self.np_a, self.np_b)
dcr = dc.max([self.dc_a,self.dc_b])
np.testing.assert_allclose(npr, np.array(dcr).astype(np.float32),rtol=1e-3, atol=1e-3)
def test_nnScalar_min (self):
npr = np.minimum(self.np_a, self.np_b)
dcr = dc.min([self.dc_a,self.dc_b])
np.testing.assert_allclose(npr, np.array(dcr).astype(np.float32),rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
| [
"deepC.dnnc.sign",
"numpy.maximum",
"numpy.abs",
"numpy.arccosh",
"numpy.sin",
"deepC.dnnc.erf",
"deepC.dnnc.cos",
"unittest.main",
"deepC.dnnc.tan",
"numpy.logical_not",
"deepC.dnnc.asinh",
"numpy.arcsin",
"deepC.dnnc.log",
"numpy.tan",
"numpy.arcsinh",
"numpy.arccos",
"deepC.dnnc.l... | [((9260, 9275), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9273, 9275), False, 'import unittest, random, math\n'), ((1283, 1310), 'random.randrange', 'random.randrange', (['(20)', '(50)', '(3)'], {}), '(20, 50, 3)\n', (1299, 1310), False, 'import unittest, random, math\n'), ((1341, 1370), 'random.randrange', 'random.randrange', (['(200)', '(500)', '(1)'], {}), '(200, 500, 1)\n', (1357, 1370), False, 'import unittest, random, math\n'), ((1401, 1429), 'random.randrange', 'random.randrange', (['(10)', '(500)', '(2)'], {}), '(10, 500, 2)\n', (1417, 1429), False, 'import unittest, random, math\n'), ((8779, 8811), 'numpy.maximum', 'np.maximum', (['self.np_a', 'self.np_b'], {}), '(self.np_a, self.np_b)\n', (8789, 8811), True, 'import numpy as np\n'), ((8826, 8856), 'deepC.dnnc.max', 'dc.max', (['[self.dc_a, self.dc_b]'], {}), '([self.dc_a, self.dc_b])\n', (8832, 8856), True, 'import deepC.dnnc as dc\n'), ((9000, 9032), 'numpy.minimum', 'np.minimum', (['self.np_a', 'self.np_b'], {}), '(self.np_a, self.np_b)\n', (9010, 9032), True, 'import numpy as np\n'), ((9047, 9077), 'deepC.dnnc.min', 'dc.min', (['[self.dc_a, self.dc_b]'], {}), '([self.dc_a, self.dc_b])\n', (9053, 9077), True, 'import deepC.dnnc as dc\n'), ((1086, 1095), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1092, 1095), True, 'import numpy as np\n'), ((1911, 1923), 'numpy.arcsin', 'np.arcsin', (['(1)'], {}), '(1)\n', (1920, 1923), True, 'import numpy as np\n'), ((1925, 1935), 'deepC.dnnc.asin', 'dc.asin', (['(1)'], {}), '(1)\n', (1932, 1935), True, 'import deepC.dnnc as dc\n'), ((1994, 2006), 'numpy.arcsin', 'np.arcsin', (['(0)'], {}), '(0)\n', (2003, 2006), True, 'import numpy as np\n'), ((2008, 2018), 'deepC.dnnc.asin', 'dc.asin', (['(0)'], {}), '(0)\n', (2015, 2018), True, 'import deepC.dnnc as dc\n'), ((2077, 2090), 'numpy.arcsin', 'np.arcsin', (['(-1)'], {}), '(-1)\n', (2086, 2090), True, 'import numpy as np\n'), ((2092, 2103), 'deepC.dnnc.asin', 'dc.asin', (['(-1)'], {}), '(-1)\n', (2099, 2103), True, 'import deepC.dnnc as dc\n'), ((2198, 2210), 'numpy.arccos', 'np.arccos', (['(1)'], {}), '(1)\n', (2207, 2210), True, 'import numpy as np\n'), ((2212, 2222), 'deepC.dnnc.acos', 'dc.acos', (['(1)'], {}), '(1)\n', (2219, 2222), True, 'import deepC.dnnc as dc\n'), ((2281, 2293), 'numpy.arccos', 'np.arccos', (['(0)'], {}), '(0)\n', (2290, 2293), True, 'import numpy as np\n'), ((2295, 2305), 'deepC.dnnc.acos', 'dc.acos', (['(0)'], {}), '(0)\n', (2302, 2305), True, 'import deepC.dnnc as dc\n'), ((2364, 2377), 'numpy.arccos', 'np.arccos', (['(-1)'], {}), '(-1)\n', (2373, 2377), True, 'import numpy as np\n'), ((2379, 2390), 'deepC.dnnc.acos', 'dc.acos', (['(-1)'], {}), '(-1)\n', (2386, 2390), True, 'import deepC.dnnc as dc\n'), ((2485, 2515), 'numpy.arctan', 'np.arctan', (['self.random_number1'], {}), '(self.random_number1)\n', (2494, 2515), True, 'import numpy as np\n'), ((2517, 2545), 'deepC.dnnc.atan', 'dc.atan', (['self.random_number1'], {}), '(self.random_number1)\n', (2524, 2545), True, 'import deepC.dnnc as dc\n'), ((2604, 2634), 'numpy.arctan', 'np.arctan', (['self.random_number2'], {}), '(self.random_number2)\n', (2613, 2634), True, 'import numpy as np\n'), ((2636, 2664), 'deepC.dnnc.atan', 'dc.atan', (['self.random_number2'], {}), '(self.random_number2)\n', (2643, 2664), True, 'import deepC.dnnc as dc\n'), ((2723, 2753), 'numpy.arctan', 'np.arctan', (['self.random_number3'], {}), '(self.random_number3)\n', (2732, 2753), True, 'import numpy as np\n'), ((2755, 2783), 'deepC.dnnc.atan', 'dc.atan', (['self.random_number3'], {}), '(self.random_number3)\n', (2762, 2783), True, 'import deepC.dnnc as dc\n'), ((2879, 2910), 'numpy.arcsinh', 'np.arcsinh', (['self.random_number1'], {}), '(self.random_number1)\n', (2889, 2910), True, 'import numpy as np\n'), ((2912, 2941), 'deepC.dnnc.asinh', 'dc.asinh', (['self.random_number1'], {}), '(self.random_number1)\n', (2920, 2941), True, 'import deepC.dnnc as dc\n'), ((3000, 3031), 'numpy.arcsinh', 'np.arcsinh', (['self.random_number2'], {}), '(self.random_number2)\n', (3010, 3031), True, 'import numpy as np\n'), ((3033, 3062), 'deepC.dnnc.asinh', 'dc.asinh', (['self.random_number2'], {}), '(self.random_number2)\n', (3041, 3062), True, 'import deepC.dnnc as dc\n'), ((3121, 3152), 'numpy.arcsinh', 'np.arcsinh', (['self.random_number3'], {}), '(self.random_number3)\n', (3131, 3152), True, 'import numpy as np\n'), ((3154, 3183), 'deepC.dnnc.asinh', 'dc.asinh', (['self.random_number3'], {}), '(self.random_number3)\n', (3162, 3183), True, 'import deepC.dnnc as dc\n'), ((3279, 3310), 'numpy.arccosh', 'np.arccosh', (['self.random_number1'], {}), '(self.random_number1)\n', (3289, 3310), True, 'import numpy as np\n'), ((3312, 3341), 'deepC.dnnc.acosh', 'dc.acosh', (['self.random_number1'], {}), '(self.random_number1)\n', (3320, 3341), True, 'import deepC.dnnc as dc\n'), ((3400, 3431), 'numpy.arccosh', 'np.arccosh', (['self.random_number2'], {}), '(self.random_number2)\n', (3410, 3431), True, 'import numpy as np\n'), ((3433, 3462), 'deepC.dnnc.acosh', 'dc.acosh', (['self.random_number2'], {}), '(self.random_number2)\n', (3441, 3462), True, 'import deepC.dnnc as dc\n'), ((3521, 3552), 'numpy.arccosh', 'np.arccosh', (['self.random_number3'], {}), '(self.random_number3)\n', (3531, 3552), True, 'import numpy as np\n'), ((3554, 3583), 'deepC.dnnc.acosh', 'dc.acosh', (['self.random_number3'], {}), '(self.random_number3)\n', (3562, 3583), True, 'import deepC.dnnc as dc\n'), ((4085, 4112), 'numpy.sin', 'np.sin', (['self.random_number1'], {}), '(self.random_number1)\n', (4091, 4112), True, 'import numpy as np\n'), ((4114, 4141), 'deepC.dnnc.sin', 'dc.sin', (['self.random_number1'], {}), '(self.random_number1)\n', (4120, 4141), True, 'import deepC.dnnc as dc\n'), ((4200, 4227), 'numpy.sin', 'np.sin', (['self.random_number2'], {}), '(self.random_number2)\n', (4206, 4227), True, 'import numpy as np\n'), ((4229, 4256), 'deepC.dnnc.sin', 'dc.sin', (['self.random_number2'], {}), '(self.random_number2)\n', (4235, 4256), True, 'import deepC.dnnc as dc\n'), ((4315, 4342), 'numpy.sin', 'np.sin', (['self.random_number3'], {}), '(self.random_number3)\n', (4321, 4342), True, 'import numpy as np\n'), ((4344, 4371), 'deepC.dnnc.sin', 'dc.sin', (['self.random_number3'], {}), '(self.random_number3)\n', (4350, 4371), True, 'import deepC.dnnc as dc\n'), ((4465, 4492), 'numpy.cos', 'np.cos', (['self.random_number1'], {}), '(self.random_number1)\n', (4471, 4492), True, 'import numpy as np\n'), ((4494, 4521), 'deepC.dnnc.cos', 'dc.cos', (['self.random_number1'], {}), '(self.random_number1)\n', (4500, 4521), True, 'import deepC.dnnc as dc\n'), ((4580, 4607), 'numpy.cos', 'np.cos', (['self.random_number2'], {}), '(self.random_number2)\n', (4586, 4607), True, 'import numpy as np\n'), ((4609, 4636), 'deepC.dnnc.cos', 'dc.cos', (['self.random_number2'], {}), '(self.random_number2)\n', (4615, 4636), True, 'import deepC.dnnc as dc\n'), ((4695, 4722), 'numpy.cos', 'np.cos', (['self.random_number3'], {}), '(self.random_number3)\n', (4701, 4722), True, 'import numpy as np\n'), ((4724, 4751), 'deepC.dnnc.cos', 'dc.cos', (['self.random_number3'], {}), '(self.random_number3)\n', (4730, 4751), True, 'import deepC.dnnc as dc\n'), ((4845, 4872), 'numpy.tan', 'np.tan', (['self.random_number1'], {}), '(self.random_number1)\n', (4851, 4872), True, 'import numpy as np\n'), ((4874, 4901), 'deepC.dnnc.tan', 'dc.tan', (['self.random_number1'], {}), '(self.random_number1)\n', (4880, 4901), True, 'import deepC.dnnc as dc\n'), ((4960, 4987), 'numpy.tan', 'np.tan', (['self.random_number2'], {}), '(self.random_number2)\n', (4966, 4987), True, 'import numpy as np\n'), ((4989, 5016), 'deepC.dnnc.tan', 'dc.tan', (['self.random_number2'], {}), '(self.random_number2)\n', (4995, 5016), True, 'import deepC.dnnc as dc\n'), ((5075, 5102), 'numpy.tan', 'np.tan', (['self.random_number3'], {}), '(self.random_number3)\n', (5081, 5102), True, 'import numpy as np\n'), ((5104, 5131), 'deepC.dnnc.tan', 'dc.tan', (['self.random_number3'], {}), '(self.random_number3)\n', (5110, 5131), True, 'import deepC.dnnc as dc\n'), ((6016, 6044), 'numpy.tanh', 'np.tanh', (['self.random_number1'], {}), '(self.random_number1)\n', (6023, 6044), True, 'import numpy as np\n'), ((6046, 6074), 'deepC.dnnc.tanh', 'dc.tanh', (['self.random_number1'], {}), '(self.random_number1)\n', (6053, 6074), True, 'import deepC.dnnc as dc\n'), ((6133, 6161), 'numpy.tanh', 'np.tanh', (['self.random_number2'], {}), '(self.random_number2)\n', (6140, 6161), True, 'import numpy as np\n'), ((6163, 6191), 'deepC.dnnc.tanh', 'dc.tanh', (['self.random_number2'], {}), '(self.random_number2)\n', (6170, 6191), True, 'import deepC.dnnc as dc\n'), ((6250, 6278), 'numpy.tanh', 'np.tanh', (['self.random_number3'], {}), '(self.random_number3)\n', (6257, 6278), True, 'import numpy as np\n'), ((6280, 6308), 'deepC.dnnc.tanh', 'dc.tanh', (['self.random_number3'], {}), '(self.random_number3)\n', (6287, 6308), True, 'import deepC.dnnc as dc\n'), ((6433, 6460), 'deepC.dnnc.erf', 'dc.erf', (['self.random_number1'], {}), '(self.random_number1)\n', (6439, 6460), True, 'import deepC.dnnc as dc\n'), ((6550, 6577), 'deepC.dnnc.erf', 'dc.erf', (['self.random_number2'], {}), '(self.random_number2)\n', (6556, 6577), True, 'import deepC.dnnc as dc\n'), ((6667, 6694), 'deepC.dnnc.erf', 'dc.erf', (['self.random_number3'], {}), '(self.random_number3)\n', (6673, 6694), True, 'import deepC.dnnc as dc\n'), ((7176, 7203), 'numpy.log', 'np.log', (['self.random_number1'], {}), '(self.random_number1)\n', (7182, 7203), True, 'import numpy as np\n'), ((7205, 7232), 'deepC.dnnc.log', 'dc.log', (['self.random_number1'], {}), '(self.random_number1)\n', (7211, 7232), True, 'import deepC.dnnc as dc\n'), ((7291, 7318), 'numpy.log', 'np.log', (['self.random_number2'], {}), '(self.random_number2)\n', (7297, 7318), True, 'import numpy as np\n'), ((7320, 7347), 'deepC.dnnc.log', 'dc.log', (['self.random_number2'], {}), '(self.random_number2)\n', (7326, 7347), True, 'import deepC.dnnc as dc\n'), ((7406, 7433), 'numpy.log', 'np.log', (['self.random_number3'], {}), '(self.random_number3)\n', (7412, 7433), True, 'import numpy as np\n'), ((7435, 7462), 'deepC.dnnc.log', 'dc.log', (['self.random_number3'], {}), '(self.random_number3)\n', (7441, 7462), True, 'import deepC.dnnc as dc\n'), ((7564, 7599), 'numpy.logical_not', 'np.logical_not', (['self.random_number1'], {}), '(self.random_number1)\n', (7578, 7599), True, 'import numpy as np\n'), ((7601, 7636), 'deepC.dnnc.logical_not', 'dc.logical_not', (['self.random_number1'], {}), '(self.random_number1)\n', (7615, 7636), True, 'import deepC.dnnc as dc\n'), ((7695, 7730), 'numpy.logical_not', 'np.logical_not', (['self.random_number2'], {}), '(self.random_number2)\n', (7709, 7730), True, 'import numpy as np\n'), ((7732, 7767), 'deepC.dnnc.logical_not', 'dc.logical_not', (['self.random_number2'], {}), '(self.random_number2)\n', (7746, 7767), True, 'import deepC.dnnc as dc\n'), ((7826, 7861), 'numpy.logical_not', 'np.logical_not', (['self.random_number3'], {}), '(self.random_number3)\n', (7840, 7861), True, 'import numpy as np\n'), ((7863, 7898), 'deepC.dnnc.logical_not', 'dc.logical_not', (['self.random_number3'], {}), '(self.random_number3)\n', (7877, 7898), True, 'import deepC.dnnc as dc\n'), ((7993, 8021), 'numpy.sign', 'np.sign', (['self.random_number1'], {}), '(self.random_number1)\n', (8000, 8021), True, 'import numpy as np\n'), ((8023, 8051), 'deepC.dnnc.sign', 'dc.sign', (['self.random_number1'], {}), '(self.random_number1)\n', (8030, 8051), True, 'import deepC.dnnc as dc\n'), ((8110, 8138), 'numpy.sign', 'np.sign', (['self.random_number2'], {}), '(self.random_number2)\n', (8117, 8138), True, 'import numpy as np\n'), ((8140, 8168), 'deepC.dnnc.sign', 'dc.sign', (['self.random_number2'], {}), '(self.random_number2)\n', (8147, 8168), True, 'import deepC.dnnc as dc\n'), ((8227, 8255), 'numpy.sign', 'np.sign', (['self.random_number3'], {}), '(self.random_number3)\n', (8234, 8255), True, 'import numpy as np\n'), ((8257, 8285), 'deepC.dnnc.sign', 'dc.sign', (['self.random_number3'], {}), '(self.random_number3)\n', (8264, 8285), True, 'import deepC.dnnc as dc\n'), ((8420, 8452), 'deepC.dnnc.softsign', 'dc.softsign', (['self.random_number1'], {}), '(self.random_number1)\n', (8431, 8452), True, 'import deepC.dnnc as dc\n'), ((8547, 8579), 'deepC.dnnc.softsign', 'dc.softsign', (['self.random_number2'], {}), '(self.random_number2)\n', (8558, 8579), True, 'import deepC.dnnc as dc\n'), ((8674, 8706), 'deepC.dnnc.softsign', 'dc.softsign', (['self.random_number3'], {}), '(self.random_number3)\n', (8685, 8706), True, 'import deepC.dnnc as dc\n'), ((1125, 1147), 'numpy.vectorize', 'np.vectorize', (['math.erf'], {}), '(math.erf)\n', (1137, 1147), True, 'import numpy as np\n'), ((8896, 8909), 'numpy.array', 'np.array', (['dcr'], {}), '(dcr)\n', (8904, 8909), True, 'import numpy as np\n'), ((9117, 9130), 'numpy.array', 'np.array', (['dcr'], {}), '(dcr)\n', (9125, 9130), True, 'import numpy as np\n')] |
import argparse
from collections import Counter
import json
import logging
import os
import operator
import random
import sys
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from sklearn.linear_model import LinearRegression
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
logger = logging.getLogger(__name__)
def main():
if args.target_index == "first":
args.target_index = -(args.context_length - 1)
elif args.target_index == "last":
args.target_index = 0
elif args.target_index == "middle":
assert args.context_length % 2 == 0
args.target_index = -(int(args.context_length / 2) - 1)
else:
args.target_index = -int(args.target_index)
logger.info("Input Arguments:")
print(json.dumps(vars(args), indent=4, sort_keys=True))
# Set the random seed manually for reproducibility.
random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
logger.warning("GPU available but not running with "
"CUDA (use --cuda to turn on.)")
else:
torch.cuda.manual_seed(args.seed)
word2idx = {}
train_data_list = []
logger.info("Reading and indexing train data at {}".format(
args.train_path))
train_frequency_counter = Counter()
with open(args.train_path) as train_file:
for line in train_file:
words = line.split() + ['<eos>']
for word in words:
# Get frequencies for each of the words.
train_frequency_counter[word] += 1
# Assign each word an index.
if word not in word2idx:
word2idx[word] = len(word2idx)
# Add the indexed words to the train data.
train_data_list.extend([word2idx[word] for word in words])
# Create the uniform data
if args.mode == "uniform":
logger.info("Creating uniformly sampled data."
"{} context length, {} vocab size".format(
args.context_length, args.vocab_size))
# Set Vocabulary size
vocab_size = args.vocab_size
# Generate the data indices
data = torch.LongTensor(
args.num_examples, args.context_length).random_(
0, vocab_size)
# Generate train labels by slicing.
labels = data[:, args.target_index - 1]
# Wrap the data and targets in TensorDataset.
dataset = TensorDataset(data, labels)
# Create the adversarial data.
else:
num_least_common = 100
least_common_words = [
word2idx[word] for word, count in
train_frequency_counter.most_common()[:-num_least_common - 1:-1]]
adversarial_np_data = np.random.choice(
a=np.array(least_common_words),
size=(args.num_examples, args.context_length),
replace=True)
# Turn test data into a LongTensor. shape: (num_examples, context_len).
data = torch.LongTensor(adversarial_np_data)
# Generate train labels by slicing.
labels = data[:, args.target_index - 1]
# Wrap the data and targets in TensorDataset.
dataset = TensorDataset(data, labels)
logger.info("Number of train examples: {}".format(len(dataset)))
device = torch.device("cuda:0" if args.cuda else "cpu")
# Load the best saved model.
with open(args.load_model, "rb") as model_file:
model = torch.load(model_file,
map_location=lambda storage, loc: storage)
model = model.to(device)
# Run the dataset through the RNN and collect
# hidden states at each timestep.
dataloader = DataLoader(dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers,
pin_memory=args.cuda)
if type(model.rnn).__name__ != "LSTM":
raise ValueError("This script is only compatible with LSTMs for now.")
logger.info("Extracting cell states for test data.")
model_lstmcell = nn.LSTMCell(model.rnn.input_size, model.rnn.hidden_size)
# Load weights into LSTMCell
lstmcell_state_dict = {
"weight_ih": model.rnn.state_dict()["weight_ih_l0"],
"weight_hh": model.rnn.state_dict()["weight_hh_l0"],
"bias_ih": model.rnn.state_dict()["bias_ih_l0"],
"bias_hh": model.rnn.state_dict()["bias_hh_l0"]
}
model_lstmcell.load_state_dict(lstmcell_state_dict)
model_lstmcell = model_lstmcell.to(device)
all_cell_states = []
all_labels = []
with torch.no_grad():
for batch_idx, data_tuple in tqdm(enumerate(dataloader)):
data, targets = data_tuple
data = data.to(device)
# Embed the data. Shape: (batch_size, context_length,
# embedding_dim)
embedded_data = model.embedding(data)
# Shape: (num_layers, batch_size, hidden_size)
(hidden_state, cell_state) = init_hidden(
model.rnn, embedded_data.size(0))
# Run the data through RNN.
for idx, embedded_word in enumerate(embedded_data.transpose(0, 1)):
# Embedded word shape: (batch_size, embedding_dim)
hidden_state, cell_state = model_lstmcell(
embedded_word, (hidden_state, cell_state))
all_cell_states.extend([x.detach().tolist() for
x in cell_state])
all_labels.extend([idx + 1 for x in cell_state])
assert len(all_cell_states) == len(all_labels)
# Filter all cell states and all labels to only include cell
# states correspoding to labels that are less than or equal to
# args.context_length - abs(args.target_index)
regression_cell_states = []
regression_labels = []
for cell_state, label in zip(all_cell_states, all_labels):
if label <= args.context_length - abs(args.target_index):
regression_cell_states.append(cell_state)
regression_labels.append(label)
assert len(regression_cell_states) == len(regression_labels)
# Try to fit this initial linear regression
logger.info("Fitting linear regression from entire cell "
"state vector to timestep information. "
"{} examples in total".format(len(regression_cell_states)))
np_regression_cell_states = np.array(regression_cell_states)
np_regression_labels = np.array(regression_labels)
all_neurons_linear_regression = LinearRegression()
all_neurons_linear_regression.fit(np_regression_cell_states,
np_regression_labels)
all_neurons_r2 = all_neurons_linear_regression.score(
np_regression_cell_states, np_regression_labels)
print("R^2 of model when using entire cell state vector "
"to predict timestep information: {}".format(all_neurons_r2))
logger.info("Fitting linear regression from each neuron to "
"timestep information")
# List of lists. Length of outer list: number of neurons.
# Length of inner list: # of examples
num_hidden_neurons = len(regression_cell_states[0])
regression_neuron_activations = []
for i in range(num_hidden_neurons):
regression_neuron_activations.append(
[cell_state[i] for cell_state in regression_cell_states])
assert len(regression_neuron_activations) == num_hidden_neurons
assert len(regression_neuron_activations[0]) == len(regression_labels)
# For each neuron, fit a linear regression
neuron_r2s = {}
for idx, neuron_activations in enumerate(regression_neuron_activations):
np_neuron_activations = np.array(neuron_activations).reshape(-1, 1)
neuron_r2 = LinearRegression().fit(
np_neuron_activations, np_regression_labels).score(
np_neuron_activations, np_regression_labels)
neuron_r2s[idx] = neuron_r2
logger.info("Top 10 neurons indices and their R^2 to timestep information")
for idx, r2 in sorted(neuron_r2s.items(), key=operator.itemgetter(1),
reverse=True)[:10]:
print("Index: {}, R2: {}".format(idx, r2))
def init_hidden(rnn, batch_size):
hidden_size = rnn.hidden_size
weight = next(rnn.parameters()).data
return (weight.new(batch_size, hidden_size).zero_(),
weight.new(batch_size, hidden_size).zero_())
def index_evaluation_data(evaluation_path, word2idx, context_length,
target_index, shuffle):
evaluation_data = []
with open(evaluation_path) as evaluation_file:
for line in evaluation_file:
words = line.split() + ['<eos>']
# Add the indexed words to the train data
evaluation_data.extend([word2idx[word] for word in words])
if shuffle:
# Shuffle evaluation data
random.shuffle(evaluation_data)
# Turn evaluation data into a Tensor
evaluation_data = torch.LongTensor(evaluation_data)
# Turn evaluation data into examples. shape: (num_examples, context_len)
evaluation_data = torch.stack(
[evaluation_data[i: i + context_length] for i in
range(len(evaluation_data) - context_length + 1)])
return evaluation_data
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)s - %(levelname)s "
"- %(name)s - %(message)s",
level=logging.INFO)
# Path to project root directory
project_root = os.path.abspath(os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir)))
parser = argparse.ArgumentParser(
description=("Train a RNN to predict the past, but with random "
"embeddings. Can optionally choose to freeze the "
"embeddings or output layer."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--load-model", type=str,
help=("A model to load and evaluate on test data."))
parser.add_argument("--train-path", type=str,
default=os.path.join(project_root, "data",
"valid.txt"),
help=("Path to the data to use to find "
"counting neurons."))
parser.add_argument("--mode", type=str,
choices=["uniform", "adversarial"], default="adversarial",
help=("The dataset to train the RNN on. uniform "
"indicates that the data is sampled uniformly. "
"Adversarial indicates that the data is sampled "
"uniformly from the 100 rarest tokens in the "
"train-path."))
parser.add_argument("--num-examples", type=int, default=10000,
help=("The number of examples to use."))
parser.add_argument("--target-index", type=str, default="middle",
help=("The index of the input history to predict. "
"0 is the last token in the sequence (most "
"recently seen token), -1 is the penultimate, "
"-2 is the 2nd to last, etc. If a positive "
"number is provided, then it is negated."
"If \"last\", we predict the last token. "
"If \"middle\", we predict the middle token. "
"If \"first\", we predict the first token."))
parser.add_argument("--context-length", type=int, required=True,
help=("The sequence length of the inputs"))
parser.add_argument("--vocab-size", type=int, default=10000,
help=("The number of unique tokens in "
"the synthetic vocabulary."))
parser.add_argument("--batch-size", type=int, default=128,
help="Batch size to use in the model.")
parser.add_argument("--seed", type=int, default=0,
help="Random seed to use.")
parser.add_argument("--num-workers", type=int, default=4,
help="The number of processes the use the load data.")
parser.add_argument("--cuda", action="store_true",
help="Use the GPU.")
args = parser.parse_args()
main()
| [
"argparse.ArgumentParser",
"random.shuffle",
"torch.nn.LSTMCell",
"torch.utils.data.TensorDataset",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"os.path.dirname",
"torch.load",
"random.seed",
"collections.Counter",
"torch.manual_seed",
"os.path.realpath"... | [((382, 409), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (399, 409), False, 'import logging\n'), ((952, 974), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (963, 974), False, 'import random\n'), ((979, 1007), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (996, 1007), False, 'import torch\n'), ((1015, 1040), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1038, 1040), False, 'import torch\n'), ((1417, 1426), 'collections.Counter', 'Counter', ([], {}), '()\n', (1424, 1426), False, 'from collections import Counter\n'), ((3306, 3333), 'torch.utils.data.TensorDataset', 'TensorDataset', (['data', 'labels'], {}), '(data, labels)\n', (3319, 3333), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3417, 3463), 'torch.device', 'torch.device', (["('cuda:0' if args.cuda else 'cpu')"], {}), "('cuda:0' if args.cuda else 'cpu')\n", (3429, 3463), False, 'import torch\n'), ((3793, 3912), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': 'args.cuda'}), '(dataset, batch_size=args.batch_size, shuffle=False, num_workers=\n args.num_workers, pin_memory=args.cuda)\n', (3803, 3912), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((4166, 4222), 'torch.nn.LSTMCell', 'nn.LSTMCell', (['model.rnn.input_size', 'model.rnn.hidden_size'], {}), '(model.rnn.input_size, model.rnn.hidden_size)\n', (4177, 4222), False, 'from torch import nn\n'), ((6534, 6566), 'numpy.array', 'np.array', (['regression_cell_states'], {}), '(regression_cell_states)\n', (6542, 6566), True, 'import numpy as np\n'), ((6594, 6621), 'numpy.array', 'np.array', (['regression_labels'], {}), '(regression_labels)\n', (6602, 6621), True, 'import numpy as np\n'), ((6658, 6676), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6674, 6676), False, 'from sklearn.linear_model import LinearRegression\n'), ((9115, 9148), 'torch.LongTensor', 'torch.LongTensor', (['evaluation_data'], {}), '(evaluation_data)\n', (9131, 9148), False, 'import torch\n'), ((9438, 9545), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)\n", (9457, 9545), False, 'import logging\n'), ((9776, 10005), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a RNN to predict the past, but with random embeddings. Can optionally choose to freeze the embeddings or output layer."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Train a RNN to predict the past, but with random embeddings. Can optionally choose to freeze the embeddings or output layer.'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (9799, 10005), False, 'import argparse\n'), ((337, 362), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (352, 362), False, 'import os\n'), ((2588, 2615), 'torch.utils.data.TensorDataset', 'TensorDataset', (['data', 'labels'], {}), '(data, labels)\n', (2601, 2615), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3119, 3156), 'torch.LongTensor', 'torch.LongTensor', (['adversarial_np_data'], {}), '(adversarial_np_data)\n', (3135, 3156), False, 'import torch\n'), ((3565, 3630), 'torch.load', 'torch.load', (['model_file'], {'map_location': '(lambda storage, loc: storage)'}), '(model_file, map_location=lambda storage, loc: storage)\n', (3575, 3630), False, 'import torch\n'), ((4684, 4699), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4697, 4699), False, 'import torch\n'), ((9020, 9051), 'random.shuffle', 'random.shuffle', (['evaluation_data'], {}), '(evaluation_data)\n', (9034, 9051), False, 'import random\n'), ((1219, 1252), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1241, 1252), False, 'import torch\n'), ((10272, 10319), 'os.path.join', 'os.path.join', (['project_root', '"""data"""', '"""valid.txt"""'], {}), "(project_root, 'data', 'valid.txt')\n", (10284, 10319), False, 'import os\n'), ((2314, 2370), 'torch.LongTensor', 'torch.LongTensor', (['args.num_examples', 'args.context_length'], {}), '(args.num_examples, args.context_length)\n', (2330, 2370), False, 'import torch\n'), ((2909, 2937), 'numpy.array', 'np.array', (['least_common_words'], {}), '(least_common_words)\n', (2917, 2937), True, 'import numpy as np\n'), ((7832, 7860), 'numpy.array', 'np.array', (['neuron_activations'], {}), '(neuron_activations)\n', (7840, 7860), True, 'import numpy as np\n'), ((8212, 8234), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (8231, 8234), False, 'import operator\n'), ((9720, 9746), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (9736, 9746), False, 'import os\n'), ((7896, 7914), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7912, 7914), False, 'from sklearn.linear_model import LinearRegression\n')] |
from sklearn.metrics import multilabel_confusion_matrix, accuracy_score
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import tensorflow as tf
import numpy as np
import os
from matplotlib import pyplot as plt
from gestures import actions
MODEL_NAME = 'action'
TFLITE_NAME = 'model.tflite'
words = actions
# Path for exported data, numpy arrays
DATA_PATH = os.path.join('MP_Data')
# Videos are going to be 30 frames in lengh
sequence_length = 20
label_map = {label: num for num, label in enumerate(words)}
sequences, labels = [], []
for action in words:
folder_path = os.path.join(DATA_PATH, action)
file_names = os.listdir(folder_path)
len_data = len(file_names)
print(action)
print(len_data)
for file_name in file_names:
window = []
for frame_num in range(sequence_length):
res = np.load(os.path.join(DATA_PATH, action, file_name, "{}.npy".format(frame_num)))
window.append(res)
sequences.append(window)
labels.append(label_map[action])
X = np.array(sequences)
y = to_categorical(labels).astype(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10)
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)
model = Sequential()
model.add(LSTM(64, return_sequences=True,
activation='relu', input_shape=(20, 258)))
model.add(LSTM(128, return_sequences=True, activation='relu'))
model.add(LSTM(64, return_sequences=False, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(words.shape[0], activation='softmax'))
model.compile(optimizer='Adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=300)
model.save(MODEL_NAME)
converter = tf.lite.TFLiteConverter.from_saved_model(MODEL_NAME)
tflite_model = converter.convert()
with open(TFLITE_NAME, 'wb') as f:
f.write(tflite_model) | [
"os.listdir",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.LSTM",
"os.path.join",
"tensorflow.k... | [((563, 586), 'os.path.join', 'os.path.join', (['"""MP_Data"""'], {}), "('MP_Data')\n", (575, 586), False, 'import os\n'), ((1234, 1253), 'numpy.array', 'np.array', (['sequences'], {}), '(sequences)\n', (1242, 1253), True, 'import numpy as np\n'), ((1328, 1365), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (1344, 1365), False, 'from sklearn.model_selection import train_test_split\n'), ((1379, 1440), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""loss"""', 'patience': '(10)'}), "(monitor='loss', patience=10)\n", (1411, 1440), True, 'import tensorflow as tf\n'), ((1449, 1461), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1459, 1461), False, 'from tensorflow.keras.models import Sequential\n'), ((1996, 2048), 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (2036, 2048), True, 'import tensorflow as tf\n'), ((782, 813), 'os.path.join', 'os.path.join', (['DATA_PATH', 'action'], {}), '(DATA_PATH, action)\n', (794, 813), False, 'import os\n'), ((831, 854), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (841, 854), False, 'import os\n'), ((1472, 1545), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)', 'activation': '"""relu"""', 'input_shape': '(20, 258)'}), "(64, return_sequences=True, activation='relu', input_shape=(20, 258))\n", (1476, 1545), False, 'from tensorflow.keras.layers import LSTM, Dense\n'), ((1567, 1618), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(128)'], {'return_sequences': '(True)', 'activation': '"""relu"""'}), "(128, return_sequences=True, activation='relu')\n", (1571, 1618), False, 'from tensorflow.keras.layers import LSTM, Dense\n'), ((1630, 1681), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(False)', 'activation': '"""relu"""'}), "(64, return_sequences=False, activation='relu')\n", (1634, 1681), False, 'from tensorflow.keras.layers import LSTM, Dense\n'), ((1693, 1721), 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1698, 1721), False, 'from tensorflow.keras.layers import LSTM, Dense\n'), ((1733, 1761), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1738, 1761), False, 'from tensorflow.keras.layers import LSTM, Dense\n'), ((1773, 1816), 'tensorflow.keras.layers.Dense', 'Dense', (['words.shape[0]'], {'activation': '"""softmax"""'}), "(words.shape[0], activation='softmax')\n", (1778, 1816), False, 'from tensorflow.keras.layers import LSTM, Dense\n'), ((1258, 1280), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['labels'], {}), '(labels)\n', (1272, 1280), False, 'from tensorflow.keras.utils import to_categorical\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import numpy as np
import scipy.interpolate as interp
__all__ = ['ideal_eos', 'FreeStreamer']
__version__ = '1.0.1'
"""
References:
[1] <NAME>, <NAME>, <NAME>
Pre-equilibrium evolution effects on heavy-ion collision observables
PRC 91 064906 (2015)
arXiv:1504.02160 [nucl-th]
http://inspirehep.net/record/1358669
[2] <NAME>, <NAME>, <NAME>, <NAME>
Free-streaming approximation in early dynamics
of relativistic heavy-ion collisions
PRC 80 034902 (2009)
arXiv:0812.3393 [nucl-th]
http://inspirehep.net/record/805616
"""
def ideal_eos(e):
"""
Ideal equation of state: P = e/3
"""
return e/3
class FreeStreamer(object):
"""
Free streaming and Landau matching for boost-invariant hydrodynamic initial
conditions.
Parameters:
initial -- square (n, n) array containing the initial state
grid_max -- x and y max of the grid in fm (see online readme)
time -- time to free stream in fm
After creating a FreeStreamer object, extract the various hydro quantities
using its methods
Tuv, energy_density, flow_velocity, shear_tensor, bulk_pressure
See the online readme and the docstring of each method.
"""
def __init__(self, initial, grid_max, time):
initial = np.asarray(initial)
if initial.ndim != 2 or initial.shape[0] != initial.shape[1]:
raise ValueError('initial must be a square array')
nsteps = initial.shape[0]
# grid_max is the outer edge of the outermost grid cell;
# xymax is the midpoint of the same cell.
# They are different by half a cell width, i.e. grid_max/nsteps.
xymax = grid_max*(1 - 1/nsteps)
# Initialize the 2D interpolating splines.
# Need both linear and cubic splines -- see below.
# The scipy class has the x and y dimensions reversed,
# so give it the transpose of the initial state.
xy = np.linspace(-xymax, xymax, nsteps)
spline1, spline3 = (
interp.RectBivariateSpline(xy, xy, initial.T, kx=k, ky=k)
for k in [1, 3]
)
# Prepare for evaluating the T^μν integrals, Eq. (7) in [1] and
# Eq. (10) in [2]. For each grid cell, there are six integrals
# (for the six independent components of T^μν), each of which is a
# line integral around a circle of radius tau_0.
# The only way to do this with reasonable speed in python is to
# pre-determine the integration points and vectorize the calculation.
# Among the usual fixed-point (non-adaptive) integration rules, the
# trapezoid rule was found to converge faster than both the Simpson
# rule and Gauss-Legendre quadrature.
# Set the number of points so the arc length of each step is roughly
# the size of a grid cell. Clip the number of points to a reasonable
# range.
npoints = min(max(int(np.ceil(np.pi*time*nsteps/grid_max)), 30), 100)
phi = np.linspace(0, 2*np.pi, npoints, endpoint=False)
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
# Cache the x and y evaluation points for the integrals.
# X and Y are (nsteps, npoints) arrays.
X = np.subtract.outer(xy, time*cos_phi)
Y = np.subtract.outer(xy, time*sin_phi)
# Create lists of the upper-triangle indices and corresponding weight
# functions for the integrals.
u, v, K = zip(*[
(0, 0, np.ones_like(phi)),
(0, 1, cos_phi),
(0, 2, sin_phi),
(1, 1, cos_phi*cos_phi),
(1, 2, cos_phi*sin_phi),
(2, 2, sin_phi*sin_phi),
])
# K (6, npoints) contains the weights for each integral.
K = np.array(K)
K /= phi.size
# Initialize T^μν array.
Tuv = np.empty((nsteps, nsteps, 3, 3))
# Compute the integrals one row at a time; this avoids significant
# python function call overhead compared to computing one cell at a
# time. In principle everything could be done in a single function
# call, but this would require a very large temporary array and hence
# may even be slower. Vectorizing each row sufficiently minimizes the
# function call overhead with a manageable memory footprint.
for row, y in zip(Tuv, Y):
# Evaluate the splines on all the integration points for this row.
# (These lines account for ~90% of the total computation time!)
# Cubic interpolation (Z3) accurately captures the curvature of the
# initial state, but can produce artifacts and negative values near
# the edges; linear interpolation (Z1) cannot capture the
# curvature, but behaves correctly at the edges. To combine the
# advantages, use Z3 where both splines are positive, otherwise set
# to zero.
Z1 = spline1(X, y, grid=False)
Z3 = spline3(X, y, grid=False)
Z3 = np.where((Z1 > 0) & (Z3 > 0), Z3, 0)
# Z3 (nsteps, npoints) contains the function evaluations along the
# circles centered at each grid point along the row. Now compute
# all six integrals in a single function call to the inner product
# and write the result into the T^μν array. np.inner calculates
# the sum over the last axes of Z3 (nsteps, npoints) and K (6,
# npoints), returning an (nsteps, 6) array. In other words, it
# sums over the integration points for each grid cell in the row.
# np.inner is a highly-optimized linear algebra routine so this is
# very efficient.
row[:, u, v] = np.inner(Z3, K)
# Copy the upper triangle to the lower triangle.
u, v = zip(*[(0, 1), (0, 2), (1, 2)])
Tuv[..., v, u] = Tuv[..., u, v]
# Normalize the tensor for boost-invariant longitudinal expansion.
Tuv /= time
# Initialize class members.
self._Tuv = Tuv
self._energy_density = None
self._flow_velocity = None
self._shear_tensor = None
self._total_pressure = None
def Tuv(self, u=None, v=None):
"""
Energy-momentum tensor T^μν.
With no arguments, returns an (n, n, 3, 3) array containing the full
tensor at each grid point.
With two integer arguments, returns an (n, n) array containing the
requested component of the tensor at each grid point. For example
FreeStreamer.Tuv(0, 0) returns T00.
"""
if u is None and v is None:
return self._Tuv
elif u is not None and v is not None:
return self._Tuv[..., u, v]
else:
raise ValueError('must provide both u and v')
def _compute_energy_density_flow_velocity(self):
"""
Compute energy density and flow velocity by solving the eigenvalue
equation from the Landau matching condition.
"""
# Ignore empty grid cells.
T00 = self._Tuv[..., 0, 0]
nonzero = T00 > 1e-16 * T00.max()
# The Landau matching condition expressed as an eigenvalue equation is
#
# T^μ_ν u^ν = e u^μ
#
# where the timelike eigenvector u^μ is the four-velocity required to
# boost to the local rest frame of the fluid, and the eigenvalue e is
# the energy density in the local rest frame.
# Construct the mixed tensor Tu_v (n, 3, 3), where n is the number of
# nonzero grid cells.
Tu_v = np.copy(self._Tuv[nonzero])
Tu_v[..., :, 1:] *= -1
# The mixed tensor is NOT symmetric, so must use the general
# eigensystem solver. Recent versions of numpy can solve all the
# eigensystems in a single function call (there's still an outer loop
# over the array, but it is executed in C).
eigvals, eigvecs = np.linalg.eig(Tu_v)
# Eigenvalues/vectors can sometimes be complex. This is numerically
# valid but clearly the physical energy density must be real.
# Therefore take the real part and ignore any complex
# eigenvalues/vectors.
if np.iscomplexobj(eigvals):
imag = eigvals.imag != 0
eigvals = eigvals.real
eigvals[imag] = 0
eigvecs = eigvecs.real
eigvecs.transpose(0, 2, 1)[imag] = 0
# eigvals (n, 3) contains the 3 eigenvalues for each nonzero grid cell.
# eigvecs (n, 3, 3) contains the eigenvectors, where in each (3, 3)
# block the columns are the vectors and the rows are the (t, x, y)
# components.
# The physical flow velocity and energy density correspond to the
# (unique) timelike eigenvector. Given eigenvectors (t, x, y) the
# timelike condition may be written (t^2 > x^2 + y^2). Since the
# vectors are normalized to t^2 + x^2 + y^2 = 1, the timelike condition
# may be simplified to t^2 > 1/2. However, t^2 == 1/2 corresponds to a
# perfectly lightlike vector, which is numerically undesirable.
# Testing reveals that the maximum realistic gamma (Lorentz) factor is
# ~40, but sometimes a few cells will have gamma >> 1000 due to
# numerical errors. Therefore ignore cells above a threshold.
gamma_max = 100
timelike = eigvecs[:, 0]**2 > 1/(2 - 1/gamma_max**2)
# "timelike" is an (n, 3) array of booleans denoting the timelike
# eigenvector (if any) for each grid cell. This line updates the
# "nonzero" mask to ignore cells that lack a timelike eigenvector.
# Effectively it is a logical and, i.e. each grid cell must be nonzero
# AND have a timelike eigvec.
nonzero[nonzero] = timelike.any(axis=1)
# Save the physical eigenvalues in the internal energy density array.
self._energy_density = np.zeros(self._Tuv.shape[:2])
self._energy_density[nonzero] = eigvals[timelike]
# Select the timelike eigenvectors and correct the overall signs, if
# necessary (the overall sign of numerical eigenvectors is arbitrary,
# but u^0 should always be positive).
u = eigvecs.transpose(0, 2, 1)[timelike]
u0 = u[..., 0]
u[u0 < 0] *= -1
# Normalize the flow velocity in Minkowski space. The numerical solver
# returns vectors A*u normalized in Euclidean space as
# A^2*(u0^2 + u1^2 + u2^2) = 1, which need to be renormalized as
# u0^2 - u1^2 - u2^2 = 1. The prefactor A may be derived by equating
# these two normalizations.
u /= np.sqrt(2*u0*u0 - 1)[..., np.newaxis]
# Save internal flow velocity array.
self._flow_velocity = np.zeros(self._Tuv.shape[:3])
self._flow_velocity[..., 0] = 1
self._flow_velocity[nonzero] = u
def energy_density(self):
"""
Energy density in the local rest frame from Landau matching.
Returns an (n, n) array.
"""
if self._energy_density is None:
self._compute_energy_density_flow_velocity()
return self._energy_density
def flow_velocity(self, u=None):
"""
Fluid flow velocity u^μ from Landau matching.
With no arguments, returns an (n, n, 3) array containing the flow
vector at each grid point.
With a single integer argument, returns an (n, n) array containing the
requested component of the flow vector at each grid point.
"""
if self._flow_velocity is None:
self._compute_energy_density_flow_velocity()
if u is None:
return self._flow_velocity
else:
return self._flow_velocity[..., u]
def _compute_viscous_corrections(self):
"""
Use T^μν and the results of Landau matching to calculate the shear
pressure tensor π^μν and the total pressure (P + Π).
"""
T = self.Tuv()
# Flow velocity "outer product" u^μ u^ν.
u = self.flow_velocity()
uu = np.einsum('...i,...j', u, u)
# Metric tensor g^μν in Minkowski space.
g = np.diag([1., -1., -1.])
# Projection operator Δ^μν.
Delta = g - uu
# Compute and save the total pressure = ideal + bulk = P + Π.
# See Eq. (11) in [1].
self._total_pressure = np.einsum('au,bv,...ab,...uv', g, g, Delta, T)
self._total_pressure /= -3
# Add two trailing dimensions to the energy density and total pressure
# arrays (n, n) -> (n, n, 1, 1) so that they can broadcast onto the uu
# and Delta arrays (n, n, 3, 3).
e = self.energy_density()[..., np.newaxis, np.newaxis]
Ptotal = self._total_pressure[..., np.newaxis, np.newaxis]
# Compute and save the shear pressure tensor π^μν.
# See Eq. (13) in [1].
self._shear_tensor = T - e*uu + Ptotal*Delta
def shear_tensor(self, u=None, v=None):
"""
Shear pressure tensor π^μν.
With no arguments, returns an (n, n, 3, 3) array containing the full
tensor at each grid point.
With two integer arguments, returns an (n, n) array containing the
requested component of the tensor at each grid point. For example
FreeStreamer.shear_tensor(1, 2) returns pi12.
"""
if self._shear_tensor is None:
self._compute_viscous_corrections()
if u is None and v is None:
return self._shear_tensor
elif u is not None and v is not None:
return self._shear_tensor[..., u, v]
else:
raise ValueError('must provide both u and v')
def bulk_pressure(self, eos=ideal_eos):
"""
Bulk viscous pressure Π.
Optional parameter eos must be a callable object that evaluates the
equation of state P(e). The default is the ideal EoS, P(e) = e/3.
Returns an (n, n) array.
"""
if self._total_pressure is None:
self._compute_viscous_corrections()
# Compute Π = (P + Π) - P = (total pressure) - P, P = P(e) from eos.
self._bulk_pressure = self._total_pressure - eos(self.energy_density())
return self._bulk_pressure
| [
"numpy.empty",
"numpy.einsum",
"numpy.sin",
"numpy.inner",
"numpy.diag",
"numpy.copy",
"numpy.linalg.eig",
"numpy.linspace",
"numpy.ones_like",
"numpy.ceil",
"numpy.asarray",
"scipy.interpolate.RectBivariateSpline",
"numpy.cos",
"numpy.iscomplexobj",
"numpy.zeros",
"numpy.subtract.oute... | [((1379, 1398), 'numpy.asarray', 'np.asarray', (['initial'], {}), '(initial)\n', (1389, 1398), True, 'import numpy as np\n'), ((2041, 2075), 'numpy.linspace', 'np.linspace', (['(-xymax)', 'xymax', 'nsteps'], {}), '(-xymax, xymax, nsteps)\n', (2052, 2075), True, 'import numpy as np\n'), ((3104, 3154), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'npoints'], {'endpoint': '(False)'}), '(0, 2 * np.pi, npoints, endpoint=False)\n', (3115, 3154), True, 'import numpy as np\n'), ((3171, 3182), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (3177, 3182), True, 'import numpy as np\n'), ((3201, 3212), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3207, 3212), True, 'import numpy as np\n'), ((3339, 3376), 'numpy.subtract.outer', 'np.subtract.outer', (['xy', '(time * cos_phi)'], {}), '(xy, time * cos_phi)\n', (3356, 3376), True, 'import numpy as np\n'), ((3387, 3424), 'numpy.subtract.outer', 'np.subtract.outer', (['xy', '(time * sin_phi)'], {}), '(xy, time * sin_phi)\n', (3404, 3424), True, 'import numpy as np\n'), ((3863, 3874), 'numpy.array', 'np.array', (['K'], {}), '(K)\n', (3871, 3874), True, 'import numpy as np\n'), ((3945, 3977), 'numpy.empty', 'np.empty', (['(nsteps, nsteps, 3, 3)'], {}), '((nsteps, nsteps, 3, 3))\n', (3953, 3977), True, 'import numpy as np\n'), ((7721, 7748), 'numpy.copy', 'np.copy', (['self._Tuv[nonzero]'], {}), '(self._Tuv[nonzero])\n', (7728, 7748), True, 'import numpy as np\n'), ((8081, 8100), 'numpy.linalg.eig', 'np.linalg.eig', (['Tu_v'], {}), '(Tu_v)\n', (8094, 8100), True, 'import numpy as np\n'), ((8353, 8377), 'numpy.iscomplexobj', 'np.iscomplexobj', (['eigvals'], {}), '(eigvals)\n', (8368, 8377), True, 'import numpy as np\n'), ((10081, 10110), 'numpy.zeros', 'np.zeros', (['self._Tuv.shape[:2]'], {}), '(self._Tuv.shape[:2])\n', (10089, 10110), True, 'import numpy as np\n'), ((10925, 10954), 'numpy.zeros', 'np.zeros', (['self._Tuv.shape[:3]'], {}), '(self._Tuv.shape[:3])\n', (10933, 10954), True, 'import numpy as np\n'), ((12249, 12277), 'numpy.einsum', 'np.einsum', (['"""...i,...j"""', 'u', 'u'], {}), "('...i,...j', u, u)\n", (12258, 12277), True, 'import numpy as np\n'), ((12340, 12366), 'numpy.diag', 'np.diag', (['[1.0, -1.0, -1.0]'], {}), '([1.0, -1.0, -1.0])\n', (12347, 12366), True, 'import numpy as np\n'), ((12557, 12603), 'numpy.einsum', 'np.einsum', (['"""au,bv,...ab,...uv"""', 'g', 'g', 'Delta', 'T'], {}), "('au,bv,...ab,...uv', g, g, Delta, T)\n", (12566, 12603), True, 'import numpy as np\n'), ((2117, 2174), 'scipy.interpolate.RectBivariateSpline', 'interp.RectBivariateSpline', (['xy', 'xy', 'initial.T'], {'kx': 'k', 'ky': 'k'}), '(xy, xy, initial.T, kx=k, ky=k)\n', (2143, 2174), True, 'import scipy.interpolate as interp\n'), ((5135, 5171), 'numpy.where', 'np.where', (['((Z1 > 0) & (Z3 > 0))', 'Z3', '(0)'], {}), '((Z1 > 0) & (Z3 > 0), Z3, 0)\n', (5143, 5171), True, 'import numpy as np\n'), ((5851, 5866), 'numpy.inner', 'np.inner', (['Z3', 'K'], {}), '(Z3, K)\n', (5859, 5866), True, 'import numpy as np\n'), ((10811, 10835), 'numpy.sqrt', 'np.sqrt', (['(2 * u0 * u0 - 1)'], {}), '(2 * u0 * u0 - 1)\n', (10818, 10835), True, 'import numpy as np\n'), ((3042, 3083), 'numpy.ceil', 'np.ceil', (['(np.pi * time * nsteps / grid_max)'], {}), '(np.pi * time * nsteps / grid_max)\n', (3049, 3083), True, 'import numpy as np\n'), ((3585, 3602), 'numpy.ones_like', 'np.ones_like', (['phi'], {}), '(phi)\n', (3597, 3602), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 13:56:33 2018
@author: haoxiangyang
"""
from os import path
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.pyplot import plot
matplotlib.use('agg') #To plot on linux
import sys
import pickle
import numpy as np
import pandas as pd
from scipy.stats import norm
import webbrowser
def calL(z):
L = np.exp(-1/2*z**2)/np.sqrt(2*np.pi) - z*(1 - norm.cdf(z))
return L
def QRCal(fl_df,c,scenSet,h,K,p,epsilon):
demandScen = [fl_df.demand[c]*i for i in scenSet]
mu = np.mean(demandScen)
sigma = np.std(demandScen)
QCurr = mu
RCurr = 0
stopBool = True
while stopBool:
# record the previous one
QPrev = QCurr
RPrev = RCurr
# calculate the new R and Q
RCurr = norm.ppf(1 - QCurr*h/(p*mu),loc = mu,scale = sigma)
QCurr = np.sqrt(2*mu*(K + p*sigma*calL((RCurr - mu)/sigma))/h)
if (QCurr - QPrev <= epsilon*QCurr)and(RCurr - RPrev <= epsilon*RCurr):
stopBool = False
return QCurr,RCurr
#%%
T=11
t_lenght = 6# Time of the length of one time period.
N=4
supply_factor = 1.5
Totaltrucks = 1000*supply_factor
truck_cap = 230#Barrels/Truck
dem_pen = 1
truck_speed = 80 #km/h
# create port sections
sectionDict = {}
sectionDict['Bay County_S'] = ['Walton County','Holmes County','Jackson County','Washington County','Bay County','Calhoun County','Gulf County']
sectionDict['Brevard County_S'] = ['Putnam County','Flagler County','Volusia County','Brevard County','Orange County','Seminole County','Osceola County','Indian River County']
sectionDict['Broward County_S'] = ['Lee County','DeSoto County','Hardee County','Highlands County','Glades County','Hendry County','Palm Beach County','St. Lucie County','Martin County','Collier County','Broward County','Monroe County','Miami-Dade County','Okeechobee County','Charlotte County']
sectionDict['Duval County_S'] = ['Bradford County','Clay County','Duval County','St. Johns County','Liberty County','Gadsden County','Franklin County','Wakulla County','Leon County','Jefferson County','Madison County','Taylor County','Lafayette County','Suwannee County','Hamilton County','Columbia County','Baker County','Nassau County','Union County']
sectionDict['Escambia County_S'] = ['Escambia County','Santa Rosa County','Okaloosa County']
sectionDict['Hillsborough County_S'] = ['Pinellas County','Dixie County','Gilchrist County','Alachua County','Levy County','Marion County','Citrus County','Hernando County','Sumter County','Lake County','Pasco County','Polk County','Hillsborough County','Manatee County','Sarasota County']
'''
===========================================================================
Data preparation
'''
netwrok_file='../data/floridaNetObj.p'
fl_df, fl_edges = pickle.load(open(netwrok_file, 'rb'))
fl_df = fl_df.set_index('County')
total_population = sum(fl_df.Population)
fl_df['demand'] = fl_df['Population']/total_population
fl_df['supply'] = 0
#===========================================================================
#Set ports supply
# Tampa = Hillsborough County
# 42.5%
fl_df.loc['Hillsborough County', 'supply'] = 0.425# 0.425
# Port Everglades = Broward County
# 40.5%
fl_df.loc['Broward County', 'supply'] = 0.405
# Jacksonville - Duval County
# 9.4%
fl_df.loc['Duval County', 'supply'] = 0.094
# entry point - Brevard County (port canaveral)
# 4.4%
fl_df.loc['Brevard County', 'supply'] = 0.044
# Pensacola = Escambia County
# 1.8%
fl_df.loc['Escambia County', 'supply'] = 0.018
# Panama City =Bay County
# 1.3% (1.4 so that they add up to 1)
fl_df.loc['Bay County', 'supply'] = 0.014
#===========================================================================
supply_nodes = fl_df.loc[fl_df['supply']>0].index.tolist()
supply_df = fl_df.loc[supply_nodes].copy()
supply_df['demand'] = 0
supply_df.index = [nn+'_S' for nn in supply_df.index]
supply_nodes = supply_df.index.tolist()
trucks = {sn:Totaltrucks*fl_df['supply'][sn[:-2]] for sn in supply_nodes}#Per Port}
print(trucks)
fl_df['supply'] = 0 #Old dataframe only has demands
fl_df = fl_df.append(supply_df)
fl_df['supply'] = fl_df['supply']*100000*supply_factor
fl_df['demand'] = fl_df['demand']*100000
fractions_i = [1,1.25, 1.5, 1.75]
for i in range(N):
demand_sce = 'demand_%i' %(i)
fl_df[demand_sce] = fractions_i[i]*fl_df['demand']
demand_nodes = fl_df.loc[fl_df['demand']>0].index.tolist()
# build the SS policy for Hurricane Irma
def build_irma_sample_path(filename):
irma_peacks = pickle.load(open(filename, 'rb'))
time_stamps = list(irma_peacks.keys())
time_stamps.sort()
sample_path = []
for (i,t) in enumerate(time_stamps):
data_t = irma_peacks[t]
realization_t = {'demand[%i,%s]' %(i+1,c):fl_df['demand'][c]*(data_t[c] if c in data_t else 1) for c in demand_nodes}
sample_path.append(realization_t)
realization_0 = {'demand[%i,%s]' %(0,c):0 for c in demand_nodes}
sample_path.insert(0, realization_0)
time_stamps.insert(0, None)
return sample_path, time_stamps
irma_sample_path, samplepath_time_stamps = build_irma_sample_path('../data/factor_data.p')
#%%
# for each county, calculate Q,R
# assume all shipment can be done within a period of time
Q = {}
R = {}
S = {}
h = 0.1
K = 0
p = 1
for c in demand_nodes:
# needs the function to calculate Q and R!!!!!!
#Q[c],R[c] = QRCal(fl_df,c,fractions_i,h,K,p,1e-3)
demandScen = [fl_df.demand[c]*i for i in fractions_i]
# 95% fulfillment rate
S[c] = np.mean(demandScen)+np.std(demandScen)*1.64
# initialization of the supply chain
Inflow = {}
InitialTruckInv = {}
InitialInv = {}
for c in supply_nodes:
Inflow[c] = fl_df.supply[c]
InitialTruckInv[c] = trucks[c]
InitialInv[c] = 10*Inflow[c]
for c in demand_nodes:
InitialTruckInv[c] = 0
InitialInv[c] = fl_df.demand[c]*3
# simulate for each time period, the order, the inventory and the demand
InvList = {}
rawOrder = {}
orderList = {}
rawDemand = {}
unsatDemand = {}
TruckInv = {}
for tp in range(len(samplepath_time_stamps)):
InvList[tp] = {}
rawOrder[tp] = {}
orderList[tp] = {}
rawDemand[tp] = {}
unsatDemand[tp] = {}
TruckInv[tp] = {}
if tp == 0:
# to initialize the initial inventory
for c in demand_nodes:
InvList[tp][c] = InitialInv[c]
TruckInv[tp][c] = InitialTruckInv[c]
orderList[tp][c] = 0
rawDemand[tp][c] = 0
for c in supply_nodes:
InvList[tp][c] = InitialInv[c]
TruckInv[tp][c] = InitialTruckInv[c]
else:
# to calculate the inventory and actual order amount
for c in demand_nodes:
rawDemand[tp][c] = irma_sample_path[tp]['demand[{},{}]'.format(tp,c)]
TruckInv[tp][c] = orderList[tp - 1][c]/truck_cap
InvList[tp][c] = max(InvList[tp - 1][c] - rawDemand[tp - 1][c],0) + orderList[tp - 1][c]
if InvList[tp][c] < S[c]:
rawOrder[tp][c] = S[c] - InvList[tp][c]
else:
rawOrder[tp][c] = 0
if rawDemand[tp][c] >= InvList[tp][c]:
tempusD = rawDemand[tp][c] - InvList[tp][c]
if tempusD >= 0.001:
unsatDemand[tp][c] = tempusD
else:
unsatDemand[tp][c] = 0
else:
unsatDemand[tp][c] = 0
for c in supply_nodes:
TruckInv[tp][c] = TruckInv[tp - 1][c] - sum(orderList[tp - 1][cd]/truck_cap - TruckInv[tp - 1][cd] for cd in sectionDict[c])
InvList[tp][c] = InvList[tp - 1][c] - sum(orderList[tp - 1][cd] for cd in sectionDict[c]) + Inflow[c]
totalOrderCurrent = sum(rawOrder[tp][cd] for cd in sectionDict[c])
totalCapacity = min(InvList[tp][c],TruckInv[tp][c]*truck_cap)
if totalOrderCurrent > totalCapacity:
for cd in sectionDict[c]:
orderList[tp][cd] = rawOrder[tp][cd]/totalOrderCurrent*totalCapacity
else:
for cd in sectionDict[c]:
orderList[tp][cd] = rawOrder[tp][cd]
uDList = []
for tp in range(len(samplepath_time_stamps)):
uDList.append(unsatDemand[tp])
policy_on_irma = {'sample_path': irma_sample_path, 'unmet_demand':uDList}
with open('../data/irma_policy.p', 'wb') as fp:
pickle.dump(policy_on_irma, fp, protocol=pickle.HIGHEST_PROTOCOL) | [
"scipy.stats.norm.ppf",
"pickle.dump",
"numpy.std",
"scipy.stats.norm.cdf",
"numpy.mean",
"matplotlib.use",
"numpy.exp",
"numpy.sqrt"
] | [((273, 294), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (287, 294), False, 'import matplotlib\n'), ((622, 641), 'numpy.mean', 'np.mean', (['demandScen'], {}), '(demandScen)\n', (629, 641), True, 'import numpy as np\n'), ((654, 672), 'numpy.std', 'np.std', (['demandScen'], {}), '(demandScen)\n', (660, 672), True, 'import numpy as np\n'), ((8433, 8498), 'pickle.dump', 'pickle.dump', (['policy_on_irma', 'fp'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(policy_on_irma, fp, protocol=pickle.HIGHEST_PROTOCOL)\n', (8444, 8498), False, 'import pickle\n'), ((872, 927), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - QCurr * h / (p * mu))'], {'loc': 'mu', 'scale': 'sigma'}), '(1 - QCurr * h / (p * mu), loc=mu, scale=sigma)\n', (880, 927), False, 'from scipy.stats import norm\n'), ((5587, 5606), 'numpy.mean', 'np.mean', (['demandScen'], {}), '(demandScen)\n', (5594, 5606), True, 'import numpy as np\n'), ((446, 469), 'numpy.exp', 'np.exp', (['(-1 / 2 * z ** 2)'], {}), '(-1 / 2 * z ** 2)\n', (452, 469), True, 'import numpy as np\n'), ((464, 482), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (471, 482), True, 'import numpy as np\n'), ((5607, 5625), 'numpy.std', 'np.std', (['demandScen'], {}), '(demandScen)\n', (5613, 5625), True, 'import numpy as np\n'), ((490, 501), 'scipy.stats.norm.cdf', 'norm.cdf', (['z'], {}), '(z)\n', (498, 501), False, 'from scipy.stats import norm\n')] |
import numpy as np
import uuid
import os
import tables as t
import nose
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_array_equal
from cyclopts import cyclopts_io as cycio
class TestIO:
def setUp(self):
self.db = ".tmp_{0}".format(uuid.uuid4())
if os.path.exists(self.db):
os.remove(self.db)
self.h5file = t.open_file(self.db, mode='w',)
self.pth = '/tbl'
self.dt = np.dtype([('data', float)])
def tearDown(self):
self.h5file.close()
os.remove(self.db)
def test_create(self):
tbl = cycio.Table(self.h5file, self.pth, self.dt, chunksize=2, cachesize=2)
tbl.create()
assert_true(self.pth in self.h5file)
def test_throw(self):
cyctbl = cycio.Table(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)
cyctbl.create()
h5tbl = self.h5file.root.tbl
data = np.empty(2, dtype=self.dt)
data['data'] = range(2)
cyctbl.append_data(data)
assert_raises(IOError, cyctbl.flush())
def test_write_flush(self):
cyctbl = cycio.Table(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)
cyctbl.create()
h5tbl = self.h5file.root.tbl
data = np.empty(2, dtype=self.dt)
data['data'] = range(2)
cyctbl.append_data(data)
assert_equal(0, h5tbl.nrows)
cyctbl.flush()
rows = self.h5file.root.tbl[:]
assert_array_equal(data, rows)
def test_write_single(self):
cyctbl = cycio.Table(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)
cyctbl.create()
h5tbl = self.h5file.root.tbl
data = np.empty(4, dtype=self.dt)
data['data'] = range(4)
cyctbl.append_data(data)
assert_equal(3, h5tbl.nrows)
rows = self.h5file.root.tbl[:]
assert_array_equal(data[:-1], rows)
def test_write_double(self):
cyctbl = cycio.Table(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)
cyctbl.create()
h5tbl = self.h5file.root.tbl
data = np.empty(7, dtype=self.dt)
data['data'] = range(7)
cyctbl.append_data(data)
assert_equal(6, h5tbl.nrows)
rows = self.h5file.root.tbl[:]
assert_array_equal(data[:-1], rows)
def test_write_triple(self):
cyctbl = cycio.Table(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)
cyctbl.create()
h5tbl = self.h5file.root.tbl
data = np.empty(9, dtype=self.dt)
data['data'] = range(9)
cyctbl.append_data(data)
assert_equal(9, h5tbl.nrows)
rows = self.h5file.root.tbl[:]
assert_array_equal(data, rows)
def test_write_triple_separate(self):
cyctbl = cycio.Table(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)
cyctbl.create()
h5tbl = self.h5file.root.tbl
data = np.empty(7, dtype=self.dt)
data['data'] = range(7)
cyctbl.append_data(data)
assert_equal(6, h5tbl.nrows)
rows = self.h5file.root.tbl[:]
assert_array_equal(data[:-1], rows)
data = np.empty(2, dtype=self.dt)
data['data'] = range(2)
cyctbl.append_data(data)
assert_equal(9, h5tbl.nrows)
exp = np.empty(9, dtype=self.dt)
exp['data'] = range(7) + range(2)
rows = self.h5file.root.tbl[:]
assert_array_equal(exp, rows)
def test_manager(self):
tbls = [cycio.Table(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)]
manager = cycio.IOManager(self.h5file, tbls)
h5tbl = self.h5file.root.tbl
data = np.empty(2, dtype=self.dt)
data['data'] = range(2)
manager.tables['tbl'].append_data(data)
assert_equal(0, h5tbl.nrows)
del manager
rows = self.h5file.root.tbl[:]
assert_array_equal(data, rows)
| [
"os.remove",
"uuid.uuid4",
"cyclopts.cyclopts_io.Table",
"nose.tools.assert_true",
"numpy.empty",
"numpy.dtype",
"os.path.exists",
"numpy.testing.assert_array_equal",
"nose.tools.assert_equal",
"cyclopts.cyclopts_io.IOManager",
"tables.open_file"
] | [((322, 345), 'os.path.exists', 'os.path.exists', (['self.db'], {}), '(self.db)\n', (336, 345), False, 'import os\n'), ((400, 430), 'tables.open_file', 't.open_file', (['self.db'], {'mode': '"""w"""'}), "(self.db, mode='w')\n", (411, 430), True, 'import tables as t\n'), ((476, 503), 'numpy.dtype', 'np.dtype', (["[('data', float)]"], {}), "([('data', float)])\n", (484, 503), True, 'import numpy as np\n'), ((569, 587), 'os.remove', 'os.remove', (['self.db'], {}), '(self.db)\n', (578, 587), False, 'import os\n'), ((630, 699), 'cyclopts.cyclopts_io.Table', 'cycio.Table', (['self.h5file', 'self.pth', 'self.dt'], {'chunksize': '(2)', 'cachesize': '(2)'}), '(self.h5file, self.pth, self.dt, chunksize=2, cachesize=2)\n', (641, 699), True, 'from cyclopts import cyclopts_io as cycio\n'), ((729, 765), 'nose.tools.assert_true', 'assert_true', (['(self.pth in self.h5file)'], {}), '(self.pth in self.h5file)\n', (740, 765), False, 'from nose.tools import assert_true, assert_equal, assert_raises\n'), ((811, 880), 'cyclopts.cyclopts_io.Table', 'cycio.Table', (['self.h5file', 'self.pth', 'self.dt'], {'chunksize': '(3)', 'cachesize': '(3)'}), '(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)\n', (822, 880), True, 'from cyclopts import cyclopts_io as cycio\n'), ((957, 983), 'numpy.empty', 'np.empty', (['(2)'], {'dtype': 'self.dt'}), '(2, dtype=self.dt)\n', (965, 983), True, 'import numpy as np\n'), ((1146, 1215), 'cyclopts.cyclopts_io.Table', 'cycio.Table', (['self.h5file', 'self.pth', 'self.dt'], {'chunksize': '(3)', 'cachesize': '(3)'}), '(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)\n', (1157, 1215), True, 'from cyclopts import cyclopts_io as cycio\n'), ((1292, 1318), 'numpy.empty', 'np.empty', (['(2)'], {'dtype': 'self.dt'}), '(2, dtype=self.dt)\n', (1300, 1318), True, 'import numpy as np\n'), ((1392, 1420), 'nose.tools.assert_equal', 'assert_equal', (['(0)', 'h5tbl.nrows'], {}), '(0, h5tbl.nrows)\n', (1404, 1420), False, 'from nose.tools import assert_true, assert_equal, assert_raises\n'), ((1491, 1521), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data', 'rows'], {}), '(data, rows)\n', (1509, 1521), False, 'from numpy.testing import assert_array_equal\n'), ((1573, 1642), 'cyclopts.cyclopts_io.Table', 'cycio.Table', (['self.h5file', 'self.pth', 'self.dt'], {'chunksize': '(3)', 'cachesize': '(3)'}), '(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)\n', (1584, 1642), True, 'from cyclopts import cyclopts_io as cycio\n'), ((1719, 1745), 'numpy.empty', 'np.empty', (['(4)'], {'dtype': 'self.dt'}), '(4, dtype=self.dt)\n', (1727, 1745), True, 'import numpy as np\n'), ((1819, 1847), 'nose.tools.assert_equal', 'assert_equal', (['(3)', 'h5tbl.nrows'], {}), '(3, h5tbl.nrows)\n', (1831, 1847), False, 'from nose.tools import assert_true, assert_equal, assert_raises\n'), ((1895, 1930), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data[:-1]', 'rows'], {}), '(data[:-1], rows)\n', (1913, 1930), False, 'from numpy.testing import assert_array_equal\n'), ((1982, 2051), 'cyclopts.cyclopts_io.Table', 'cycio.Table', (['self.h5file', 'self.pth', 'self.dt'], {'chunksize': '(3)', 'cachesize': '(3)'}), '(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)\n', (1993, 2051), True, 'from cyclopts import cyclopts_io as cycio\n'), ((2128, 2154), 'numpy.empty', 'np.empty', (['(7)'], {'dtype': 'self.dt'}), '(7, dtype=self.dt)\n', (2136, 2154), True, 'import numpy as np\n'), ((2228, 2256), 'nose.tools.assert_equal', 'assert_equal', (['(6)', 'h5tbl.nrows'], {}), '(6, h5tbl.nrows)\n', (2240, 2256), False, 'from nose.tools import assert_true, assert_equal, assert_raises\n'), ((2304, 2339), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data[:-1]', 'rows'], {}), '(data[:-1], rows)\n', (2322, 2339), False, 'from numpy.testing import assert_array_equal\n'), ((2391, 2460), 'cyclopts.cyclopts_io.Table', 'cycio.Table', (['self.h5file', 'self.pth', 'self.dt'], {'chunksize': '(3)', 'cachesize': '(3)'}), '(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)\n', (2402, 2460), True, 'from cyclopts import cyclopts_io as cycio\n'), ((2538, 2564), 'numpy.empty', 'np.empty', (['(9)'], {'dtype': 'self.dt'}), '(9, dtype=self.dt)\n', (2546, 2564), True, 'import numpy as np\n'), ((2638, 2666), 'nose.tools.assert_equal', 'assert_equal', (['(9)', 'h5tbl.nrows'], {}), '(9, h5tbl.nrows)\n', (2650, 2666), False, 'from nose.tools import assert_true, assert_equal, assert_raises\n'), ((2714, 2744), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data', 'rows'], {}), '(data, rows)\n', (2732, 2744), False, 'from numpy.testing import assert_array_equal\n'), ((2805, 2874), 'cyclopts.cyclopts_io.Table', 'cycio.Table', (['self.h5file', 'self.pth', 'self.dt'], {'chunksize': '(3)', 'cachesize': '(3)'}), '(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)\n', (2816, 2874), True, 'from cyclopts import cyclopts_io as cycio\n'), ((2952, 2978), 'numpy.empty', 'np.empty', (['(7)'], {'dtype': 'self.dt'}), '(7, dtype=self.dt)\n', (2960, 2978), True, 'import numpy as np\n'), ((3052, 3080), 'nose.tools.assert_equal', 'assert_equal', (['(6)', 'h5tbl.nrows'], {}), '(6, h5tbl.nrows)\n', (3064, 3080), False, 'from nose.tools import assert_true, assert_equal, assert_raises\n'), ((3128, 3163), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data[:-1]', 'rows'], {}), '(data[:-1], rows)\n', (3146, 3163), False, 'from numpy.testing import assert_array_equal\n'), ((3180, 3206), 'numpy.empty', 'np.empty', (['(2)'], {'dtype': 'self.dt'}), '(2, dtype=self.dt)\n', (3188, 3206), True, 'import numpy as np\n'), ((3280, 3308), 'nose.tools.assert_equal', 'assert_equal', (['(9)', 'h5tbl.nrows'], {}), '(9, h5tbl.nrows)\n', (3292, 3308), False, 'from nose.tools import assert_true, assert_equal, assert_raises\n'), ((3323, 3349), 'numpy.empty', 'np.empty', (['(9)'], {'dtype': 'self.dt'}), '(9, dtype=self.dt)\n', (3331, 3349), True, 'import numpy as np\n'), ((3439, 3468), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['exp', 'rows'], {}), '(exp, rows)\n', (3457, 3468), False, 'from numpy.testing import assert_array_equal\n'), ((3603, 3637), 'cyclopts.cyclopts_io.IOManager', 'cycio.IOManager', (['self.h5file', 'tbls'], {}), '(self.h5file, tbls)\n', (3618, 3637), True, 'from cyclopts import cyclopts_io as cycio\n'), ((3690, 3716), 'numpy.empty', 'np.empty', (['(2)'], {'dtype': 'self.dt'}), '(2, dtype=self.dt)\n', (3698, 3716), True, 'import numpy as np\n'), ((3805, 3833), 'nose.tools.assert_equal', 'assert_equal', (['(0)', 'h5tbl.nrows'], {}), '(0, h5tbl.nrows)\n', (3817, 3833), False, 'from nose.tools import assert_true, assert_equal, assert_raises\n'), ((3901, 3931), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data', 'rows'], {}), '(data, rows)\n', (3919, 3931), False, 'from numpy.testing import assert_array_equal\n'), ((297, 309), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (307, 309), False, 'import uuid\n'), ((359, 377), 'os.remove', 'os.remove', (['self.db'], {}), '(self.db)\n', (368, 377), False, 'import os\n'), ((3514, 3583), 'cyclopts.cyclopts_io.Table', 'cycio.Table', (['self.h5file', 'self.pth', 'self.dt'], {'chunksize': '(3)', 'cachesize': '(3)'}), '(self.h5file, self.pth, self.dt, chunksize=3, cachesize=3)\n', (3525, 3583), True, 'from cyclopts import cyclopts_io as cycio\n')] |
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import os
import sys
import time
import random
import numpy as np
from PIL import Image
import torch.nn as nn
import math
import torch
NYU14_name_list = ['Unknown', 'Bed', 'Books', 'Ceiling', 'Chair',
'Floor', 'Furniture', 'Objects', 'Picture',
'Sofa', 'Table', 'TV', 'Wall', 'Window'
]
Label11_name_list = ["None", "Ceiling", "Floor", "Wall", "Window",
"Chair", "Bed", "Sofa", "Desk","TV","Furniture","Objects"]
def get_label_name_list(label_num):
if label_num == 14:
return NYU14_name_list
elif label_num == 12:
return Label11_name_list
else:
raise NotImplementedError('')
def formatString(means:list,name:str):
def numpy_to_string(x:np):
string = ''
for n in x:
string += '%5.3f\t' % n
return string
return '{}{}'.format(numpy_to_string(means[name].numpy()), '%5.3f' % means[name].mean().item())
def cal_gan_from_op(x:nn.Module):
if x is torch.nn.LeakyReLU or x.__class__.__name__ == 'LeakyReLU':
# print(hasattr(activation, 'negative_slope'))
return nn.init.calculate_gain('leaky_relu',x.negative_slope)
if x is torch.nn.Sigmoid or x.__class__.__name__ == 'Sigmoid':
return nn.init.calculate_gain('sigmoid')
if x is torch.nn.ReLU() or x.__class__.__name__ == 'ReLU':
return nn.init.calculate_gain('relu')
if x is torch.nn.Tanh or x.__class__.__name__ == 'Tanh':
return nn.init.calculate_gain('tanh')
if x is torch.nn.Conv1d or x.__class__.__name__ == 'Conv1d':
return nn.init.calculate_gain('conv1d')
if x is torch.nn.Conv2d or x.__class__.__name__ == 'Conv2d':
return nn.init.calculate_gain('conv2d')
if x is torch.nn.Conv3d or x.__class__.__name__ == 'Conv3d':
return nn.init.calculate_gain('conv3d')
if x is torch.nn.ConvTranspose1d or x.__class__.__name__ == 'ConvTranspose1d':
return nn.init.calculate_gain('conv_transpose1d')
if x is torch.nn.ConvTranspose2d or x.__class__.__name__ == 'ConvTranspose2d':
return nn.init.calculate_gain('conv_transpose2d')
if x is torch.nn.ConvTranspose3d or x.__class__.__name__ == 'ConvTranspose3d':
return nn.init.calculate_gain('conv_transpose3d')
raise NotImplementedError('x.__class__.__name__:',x.__class__.__name__)
def print_params(named_parameters):
for name, param in named_parameters:
if param.requires_grad:
print(name, param.data.sum(), param.grad.sum())
def mean_with_mask(x, mask):
return (x * mask).sum() / (mask.sum()+1e-6)
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def create_mask(width, height, mask_width, mask_height, x=None, y=None):
mask = np.zeros((height, width))
mask_x = x if x is not None else random.randint(0, width - mask_width)
mask_y = y if y is not None else random.randint(0, height - mask_height)
mask[mask_y:mask_y + mask_height, mask_x:mask_x + mask_width] = 1
return mask
def maskImage(image, mask, merge=False, toFloat=False, inverse=True):
if inverse:
mask = 1- mask
if merge:
if toFloat:
return (image * (1-mask).float()) + mask
else:
return (image * (1-mask)) + mask
else:
if toFloat:
return (image * (1-mask).float())
else:
return (image * (1-mask))
def stitch_images(inputs, *outputs, img_per_row=2):
gap = 5
columns = len(outputs) + 1
width, height = inputs[0][:, :, 0].shape
img = Image.new('RGB', (width * img_per_row * columns + gap * (img_per_row - 1), height * int(len(inputs) / img_per_row)))
images = [inputs, *outputs]
for ix in range(len(inputs)):
xoffset = int(ix % img_per_row) * width * columns + int(ix % img_per_row) * gap
yoffset = int(ix / img_per_row) * height
for cat in range(len(images)):
im = np.array((images[cat][ix]).cpu()).astype(np.uint8).squeeze()
im = Image.fromarray(im)
img.paste(im, (xoffset + cat * width, yoffset))
return img
def imshow(img, title=''):
fig = plt.gcf()
fig.canvas.set_window_title(title)
plt.axis('off')
plt.imshow(img, interpolation='none')
plt.show()
def imsave(img, path):
im = Image.fromarray(img.cpu().numpy().astype(np.uint8).squeeze())
im.save(path)
def get_pad_same(dilation,kernel_size, stride=1,shape_in=1,shape_out=1):
# return tuple ((int(0.5 * (stride*(shape-1)-shape+dilation*(kernel_size-1)+1)),
# int(0.5 * (stride*(shape-1)-shape+dilation*(kernel_size-1)+1)),
# int(0.5 * (stride*(shape-1)-shape+dilation*(kernel_size-1)+1))))
# return int(0.5 * (dilation*(kernel_size-1)))
return int(math.floor(0.5 * (stride*(shape_out-1)+1-shape_in+dilation*(kernel_size-1))))
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=25, verbose=1, interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
def update(self, current, values=None, silent=False):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
self._values[k] = v
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if not silent:
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
if not silent:
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60,
eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
if not silent:
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
if not silent:
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None,silent=False):
self.update(self._seen_so_far + n, values,silent=silent)
def volumeToPointCloud(volume:torch.Tensor):
if volume.dim() == 4:
batch = volume.size(0)
X = volume.size(1)
Y = volume.size(2)
Z = volume.size(3)
output = torch.zeros([batch, X*Y*Z, 3])
counter=0;
for b in range(batch):
for x in range(X):
for y in range(Y):
for z in range(Z):
if volume[b,x,y,z] >= 0:
output[b, counter,:] = torch.FloatTensor([
x*100,y*100,z*100
])
# print(output[b, counter,:])
counter+=1
else:
output[b, counter,:] = torch.FloatTensor([
0,0,0
])
counter+=1
return output
else:
return None
from torch.utils.tensorboard import SummaryWriter
if __name__ == '__main__':
x = torch.rand(1,30,30,30)
x *=2
x -=1
# print(x)
vertices = volumeToPointCloud(x)
# print(vertices)
writter = SummaryWriter( 'testlog')
writter.add_mesh('mesh', vertices)
writter.add_scalar('scalar', 1.0)
writter.close()
| [
"os.path.abspath",
"torch.nn.ReLU",
"os.makedirs",
"random.randint",
"os.sys.stdout.isatty",
"os.path.exists",
"numpy.zeros",
"math.floor",
"time.time",
"PIL.Image.fromarray",
"os.sys.stdout.write",
"torch.FloatTensor",
"os.sys.stdout.flush",
"torch.utils.tensorboard.SummaryWriter",
"tor... | [((2973, 2998), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (2981, 2998), True, 'import numpy as np\n'), ((12622, 12647), 'torch.rand', 'torch.rand', (['(1)', '(30)', '(30)', '(30)'], {}), '(1, 30, 30, 30)\n', (12632, 12647), False, 'import torch\n'), ((12755, 12779), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['"""testlog"""'], {}), "('testlog')\n", (12768, 12779), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1315, 1369), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""leaky_relu"""', 'x.negative_slope'], {}), "('leaky_relu', x.negative_slope)\n", (1337, 1369), True, 'import torch.nn as nn\n'), ((1451, 1484), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""sigmoid"""'], {}), "('sigmoid')\n", (1473, 1484), True, 'import torch.nn as nn\n'), ((1564, 1594), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (1586, 1594), True, 'import torch.nn as nn\n'), ((1672, 1702), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""tanh"""'], {}), "('tanh')\n", (1694, 1702), True, 'import torch.nn as nn\n'), ((1784, 1816), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""conv1d"""'], {}), "('conv1d')\n", (1806, 1816), True, 'import torch.nn as nn\n'), ((1898, 1930), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""conv2d"""'], {}), "('conv2d')\n", (1920, 1930), True, 'import torch.nn as nn\n'), ((2012, 2044), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""conv3d"""'], {}), "('conv3d')\n", (2034, 2044), True, 'import torch.nn as nn\n'), ((2143, 2185), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""conv_transpose1d"""'], {}), "('conv_transpose1d')\n", (2165, 2185), True, 'import torch.nn as nn\n'), ((2284, 2326), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""conv_transpose2d"""'], {}), "('conv_transpose2d')\n", (2306, 2326), True, 'import torch.nn as nn\n'), ((2425, 2467), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""conv_transpose3d"""'], {}), "('conv_transpose3d')\n", (2447, 2467), True, 'import torch.nn as nn\n'), ((2837, 2856), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (2851, 2856), False, 'import os\n'), ((2866, 2882), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (2877, 2882), False, 'import os\n'), ((3036, 3073), 'random.randint', 'random.randint', (['(0)', '(width - mask_width)'], {}), '(0, width - mask_width)\n', (3050, 3073), False, 'import random\n'), ((3111, 3150), 'random.randint', 'random.randint', (['(0)', '(height - mask_height)'], {}), '(0, height - mask_height)\n', (3125, 3150), False, 'import random\n'), ((5003, 5098), 'math.floor', 'math.floor', (['(0.5 * (stride * (shape_out - 1) + 1 - shape_in + dilation * (kernel_size - 1))\n )'], {}), '(0.5 * (stride * (shape_out - 1) + 1 - shape_in + dilation * (\n kernel_size - 1)))\n', (5013, 5098), False, 'import math\n'), ((6527, 6538), 'time.time', 'time.time', ([], {}), '()\n', (6536, 6538), False, 'import time\n'), ((7668, 7679), 'time.time', 'time.time', ([], {}), '()\n', (7677, 7679), False, 'import time\n'), ((11766, 11800), 'torch.zeros', 'torch.zeros', (['[batch, X * Y * Z, 3]'], {}), '([batch, X * Y * Z, 3])\n', (11777, 11800), False, 'import torch\n'), ((1497, 1512), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (1510, 1512), False, 'import torch\n'), ((4229, 4248), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (4244, 4248), False, 'from PIL import Image\n'), ((126, 148), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (138, 148), False, 'from os import sys, path\n'), ((6146, 6165), 'os.sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (6163, 6165), False, 'from os import sys, path\n'), ((8982, 9003), 'os.sys.stdout.write', 'sys.stdout.write', (['bar'], {}), '(bar)\n', (8998, 9003), False, 'from os import sys, path\n'), ((10785, 10807), 'os.sys.stdout.write', 'sys.stdout.write', (['info'], {}), '(info)\n', (10801, 10807), False, 'from os import sys, path\n'), ((10824, 10842), 'os.sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10840, 10842), False, 'from os import sys, path\n'), ((8052, 8095), 'os.sys.stdout.write', 'sys.stdout.write', (["('\\x08' * prev_total_width)"], {}), "('\\x08' * prev_total_width)\n", (8068, 8095), False, 'from os import sys, path\n'), ((8114, 8136), 'os.sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (8130, 8136), False, 'from os import sys, path\n'), ((8179, 8201), 'os.sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (8195, 8201), False, 'from os import sys, path\n'), ((11340, 11362), 'os.sys.stdout.write', 'sys.stdout.write', (['info'], {}), '(info)\n', (11356, 11362), False, 'from os import sys, path\n'), ((11383, 11401), 'os.sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11399, 11401), False, 'from os import sys, path\n'), ((8284, 8305), 'numpy.log10', 'np.log10', (['self.target'], {}), '(self.target)\n', (8292, 8305), True, 'import numpy as np\n'), ((12053, 12099), 'torch.FloatTensor', 'torch.FloatTensor', (['[x * 100, y * 100, z * 100]'], {}), '([x * 100, y * 100, z * 100])\n', (12070, 12099), False, 'import torch\n'), ((12336, 12364), 'torch.FloatTensor', 'torch.FloatTensor', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (12353, 12364), False, 'import torch\n')] |
"""train.py
Developer: <NAME>
Date: 2-19-2022
Description: Tensorflow API
"""
################################## Imports ###################################
import os
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score
##############################################################################
################################## Globals ###################################
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.random.set_seed(42)
##############################################################################
model = tf.keras.Sequential([
tf.keras.layers.Dense(13, activation='relu'),
tf.keras.layers.Dense(15, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(
loss=tf.keras.losses.poisson,
optimizer=tf.keras.optimizers.Adam(lr=0.05)
)
def train(x_train, y_train, x_test, y_test):
model.fit(x_train, y_train, epochs=50, verbose=0)
predictions = model.predict(x_test)
prediction_classes = [
1 if prob > 0.5 else 0 for prob in np.ravel(predictions)
]
print(confusion_matrix(y_test, prediction_classes))
print(f'Accuracy: {accuracy_score(y_test, prediction_classes):.2f}')
print(f'Precision: {precision_score(y_test, prediction_classes):.2f}')
print(f'Recall: {recall_score(y_test, prediction_classes):.2f}')
| [
"tensorflow.random.set_seed",
"numpy.ravel",
"tensorflow.keras.layers.Dense",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.recall_score",
"tensorflow.keras.optimizers.Adam",
"sklearn.metrics.precision_score",
"sklearn.metrics.confusion_matrix"
] | [((544, 566), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (562, 566), True, 'import tensorflow as tf\n'), ((685, 729), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(13)'], {'activation': '"""relu"""'}), "(13, activation='relu')\n", (706, 729), True, 'import tensorflow as tf\n'), ((736, 780), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15)'], {'activation': '"""relu"""'}), "(15, activation='relu')\n", (757, 780), True, 'import tensorflow as tf\n'), ((787, 833), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (808, 833), True, 'import tensorflow as tf\n'), ((906, 939), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(0.05)'}), '(lr=0.05)\n', (930, 939), True, 'import tensorflow as tf\n'), ((1207, 1251), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'prediction_classes'], {}), '(y_test, prediction_classes)\n', (1223, 1251), False, 'from sklearn.metrics import confusion_matrix\n'), ((1165, 1186), 'numpy.ravel', 'np.ravel', (['predictions'], {}), '(predictions)\n', (1173, 1186), True, 'import numpy as np\n'), ((1279, 1321), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'prediction_classes'], {}), '(y_test, prediction_classes)\n', (1293, 1321), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((1354, 1397), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'prediction_classes'], {}), '(y_test, prediction_classes)\n', (1369, 1397), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((1427, 1467), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'prediction_classes'], {}), '(y_test, prediction_classes)\n', (1439, 1467), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n')] |
import math
import random
import os
import json
from time import time
from PIL import Image
import blobfile as bf
from mpi4py import MPI
import numpy as np
from scipy.ndimage import gaussian_filter
from torch.utils.data import DataLoader, Dataset
from transformers import GPT2TokenizerFast
import torch.nn.functional as F
import torch as th
def _load_data(
index_dir=None,
data_dir=None,
batch_size=1,
image_size=64,
deterministic=False,
random_crop=False,
random_flip=True,
text_length=None,
small_size=0,
text_loader=False,
text_aug_factor=1,
phase='train',
gaussian_blur=False,
num_workers=8,
):
"""
For a dataset, create a generator over (images, kwargs) pairs.
Each images is an NCHW float tensor, and the kwargs dict contains zero or
more keys, each of which map to a batched Tensor of their own.
The kwargs dict can be used for class labels, in which case the key is "y"
and the values are integer tensors of class labels.
:param data_dir: a dataset directory.
:param batch_size: the batch size of each returned pair.
:param image_size: the size to which images are resized.
:param deterministic: if True, yield results in a deterministic order.
:param random_crop: if True, randomly crop the images for augmentation.
:param random_flip: if True, randomly flip the images for augmentation.
"""
if not data_dir and not index_dir:
raise ValueError("unspecified data directory")
data_loader = TextDataset if text_loader else ImageTextDataset
dataset = data_loader(
image_size,
index_dir=index_dir,
data_dir=data_dir,
shard=MPI.COMM_WORLD.Get_rank(),
num_shards=MPI.COMM_WORLD.Get_size(),
random_crop=random_crop,
random_flip=random_flip,
text_length=text_length,
phase=phase,
small_size=small_size,
gaussian_blur=gaussian_blur,
)
if deterministic:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, drop_last=True
)
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=True
)
while True:
yield from loader
def _list_text_files_recursively(data_dir):
results = []
t = time()
for entry in sorted(bf.listdir(data_dir)):
full_path = bf.join(data_dir, entry)
ext = entry.split(".")[-1]
if "." in entry and ext.lower() in ["txt"]:
results.append(full_path)
elif bf.isdir(full_path):
results.extend(_list_text_files_recursively(full_path))
return results
class ImageTextDataset(Dataset):
def __init__(
self,
resolution,
index_dir=None,
data_dir=None,
shard=0,
num_shards=1,
random_crop=False,
random_flip=True,
text_length=None,
phase='train',
small_size=0,
gaussian_blur=False,
):
super().__init__()
self.resolution = resolution
self.phase = phase
self.gaussian_blur = gaussian_blur
self.small_size = small_size
if index_dir is not None:
with open(index_dir, 'r') as f:
indices = json.load(f)
self.local_texts = indices[shard:][::num_shards]
else:
txts = _list_text_files_recursively(data_dir)
self.local_texts = txts[shard:][::num_shards]
random.shuffle(self.local_texts)
self.random_crop = random_crop
self.random_flip = random_flip
self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
self.text_length = text_length
def __len__(self):
return len(self.local_texts)
def __getitem__(self, idx):
path = self.local_texts[idx]
out_dict = {}
# load text
with open(path) as f:
text = f.read()
text = self.tokenizer(text)["input_ids"]
text = text[:self.text_length-1]
text = text + [self.tokenizer.vocab_size - 1] * (self.text_length - len(text))
out_dict["y"] = np.array(text, dtype=np.int32)
# load image
path = os.path.splitext(path)[0]
path = path.replace('/texts/', '/images/')
for ext in [".jpg", ".jpeg", ".png", ".gif"]:
cur_path = path + ext
if os.path.exists(cur_path):
path = cur_path
break
with bf.BlobFile(path, "rb") as f:
pil_image = Image.open(f)
pil_image.load()
pil_image = pil_image.convert("RGB")
if self.random_crop:
arr = random_crop_arr(pil_image, self.resolution)
else:
arr = center_crop_arr(pil_image, self.resolution)
if self.random_flip and random.random() < 0.5:
arr = arr[:, ::-1]
arr = arr.astype(np.float32) / 127.5 - 1
image = np.transpose(arr, [2, 0, 1])
if self.small_size > 0:
if self.phase == 'train' and self.gaussian_blur and random.uniform(0, 1) < 0.5:
sigma = random.uniform(0.4, 0.6)
noised_image = image.copy()
out_dict["low_res"] = gaussian_filter(noised_image, [0, sigma, sigma], truncate=1.0)
else:
out_dict["low_res"] = image.copy()
return image, out_dict
class TextDataset(ImageTextDataset):
def __getitem__(self, idx):
path = self.local_texts[idx]
with open(path) as f:
text = f.read()
text = self.tokenizer(text)["input_ids"]
text = text[:self.text_length-1]
text = text + [self.tokenizer.vocab_size - 1] * (self.text_length - len(text))
return np.array(text, dtype=np.int32)
def center_crop_arr(pil_image, image_size):
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * image_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = image_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = (arr.shape[0] - image_size) // 2
crop_x = (arr.shape[1] - image_size) // 2
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * smaller_dim_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = smaller_dim_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = random.randrange(arr.shape[0] - image_size + 1)
crop_x = random.randrange(arr.shape[1] - image_size + 1)
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
def load_data(**kwargs):
data = _load_data(**kwargs)
small_size = kwargs["small_size"] if "small_size" in kwargs.keys() else 0
text_aug_factor = kwargs["text_aug_factor"] if "text_aug_factor" in kwargs.keys() else 1
if text_aug_factor > 1:
batch_size = kwargs["batch_size"]
aug_text_data = _load_data(**{**kwargs, **{"batch_size": batch_size * (text_aug_factor - 1), "text_loader": True, "num_workers": 64}})
for large_batch, model_kwargs in data:
if small_size > 0:
model_kwargs["low_res"] = F.interpolate(model_kwargs["low_res"], small_size, mode="area")
if text_aug_factor > 1:
aug_text = next(aug_text_data)
model_kwargs["y"] = th.cat([model_kwargs["y"], aug_text])
yield large_batch, model_kwargs | [
"transformers.GPT2TokenizerFast.from_pretrained",
"random.shuffle",
"blobfile.BlobFile",
"blobfile.join",
"torch.cat",
"torch.utils.data.DataLoader",
"scipy.ndimage.gaussian_filter",
"numpy.transpose",
"os.path.exists",
"mpi4py.MPI.COMM_WORLD.Get_size",
"blobfile.isdir",
"math.ceil",
"mpi4py... | [((2383, 2389), 'time.time', 'time', ([], {}), '()\n', (2387, 2389), False, 'from time import time\n'), ((6530, 6549), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (6538, 6549), True, 'import numpy as np\n'), ((6828, 6865), 'math.ceil', 'math.ceil', (['(image_size / max_crop_frac)'], {}), '(image_size / max_crop_frac)\n', (6837, 6865), False, 'import math\n'), ((6893, 6930), 'math.ceil', 'math.ceil', (['(image_size / min_crop_frac)'], {}), '(image_size / min_crop_frac)\n', (6902, 6930), False, 'import math\n'), ((6954, 7018), 'random.randrange', 'random.randrange', (['min_smaller_dim_size', '(max_smaller_dim_size + 1)'], {}), '(min_smaller_dim_size, max_smaller_dim_size + 1)\n', (6970, 7018), False, 'import random\n'), ((7573, 7592), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (7581, 7592), True, 'import numpy as np\n'), ((7606, 7653), 'random.randrange', 'random.randrange', (['(arr.shape[0] - image_size + 1)'], {}), '(arr.shape[0] - image_size + 1)\n', (7622, 7653), False, 'import random\n'), ((7667, 7714), 'random.randrange', 'random.randrange', (['(arr.shape[1] - image_size + 1)'], {}), '(arr.shape[1] - image_size + 1)\n', (7683, 7714), False, 'import random\n'), ((2002, 2105), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'drop_last': '(True)'}), '(dataset, batch_size=batch_size, shuffle=False, num_workers=\n num_workers, drop_last=True)\n', (2012, 2105), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((2150, 2252), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers', 'drop_last': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True, num_workers=\n num_workers, drop_last=True)\n', (2160, 2252), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((2414, 2434), 'blobfile.listdir', 'bf.listdir', (['data_dir'], {}), '(data_dir)\n', (2424, 2434), True, 'import blobfile as bf\n'), ((2457, 2481), 'blobfile.join', 'bf.join', (['data_dir', 'entry'], {}), '(data_dir, entry)\n', (2464, 2481), True, 'import blobfile as bf\n'), ((3568, 3600), 'random.shuffle', 'random.shuffle', (['self.local_texts'], {}), '(self.local_texts)\n', (3582, 3600), False, 'import random\n'), ((3704, 3745), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (3737, 3745), False, 'from transformers import GPT2TokenizerFast\n'), ((4228, 4258), 'numpy.array', 'np.array', (['text'], {'dtype': 'np.int32'}), '(text, dtype=np.int32)\n', (4236, 4258), True, 'import numpy as np\n'), ((5065, 5093), 'numpy.transpose', 'np.transpose', (['arr', '[2, 0, 1]'], {}), '(arr, [2, 0, 1])\n', (5077, 5093), True, 'import numpy as np\n'), ((5902, 5932), 'numpy.array', 'np.array', (['text'], {'dtype': 'np.int32'}), '(text, dtype=np.int32)\n', (5910, 5932), True, 'import numpy as np\n'), ((1696, 1721), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (1719, 1721), False, 'from mpi4py import MPI\n'), ((1742, 1767), 'mpi4py.MPI.COMM_WORLD.Get_size', 'MPI.COMM_WORLD.Get_size', ([], {}), '()\n', (1765, 1767), False, 'from mpi4py import MPI\n'), ((2620, 2639), 'blobfile.isdir', 'bf.isdir', (['full_path'], {}), '(full_path)\n', (2628, 2639), True, 'import blobfile as bf\n'), ((4304, 4326), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (4320, 4326), False, 'import os\n'), ((4492, 4516), 'os.path.exists', 'os.path.exists', (['cur_path'], {}), '(cur_path)\n', (4506, 4516), False, 'import os\n'), ((4594, 4617), 'blobfile.BlobFile', 'bf.BlobFile', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (4605, 4617), True, 'import blobfile as bf\n'), ((4648, 4661), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (4658, 4661), False, 'from PIL import Image\n'), ((8341, 8404), 'torch.nn.functional.interpolate', 'F.interpolate', (["model_kwargs['low_res']", 'small_size'], {'mode': '"""area"""'}), "(model_kwargs['low_res'], small_size, mode='area')\n", (8354, 8404), True, 'import torch.nn.functional as F\n'), ((8512, 8549), 'torch.cat', 'th.cat', (["[model_kwargs['y'], aug_text]"], {}), "([model_kwargs['y'], aug_text])\n", (8518, 8549), True, 'import torch as th\n'), ((3336, 3348), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3345, 3348), False, 'import json\n'), ((4937, 4952), 'random.random', 'random.random', ([], {}), '()\n', (4950, 4952), False, 'import random\n'), ((5251, 5275), 'random.uniform', 'random.uniform', (['(0.4)', '(0.6)'], {}), '(0.4, 0.6)\n', (5265, 5275), False, 'import random\n'), ((5358, 5420), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['noised_image', '[0, sigma, sigma]'], {'truncate': '(1.0)'}), '(noised_image, [0, sigma, sigma], truncate=1.0)\n', (5373, 5420), False, 'from scipy.ndimage import gaussian_filter\n'), ((5199, 5219), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (5213, 5219), False, 'import random\n')] |
import numpy as np
from stores import locations
import matplotlib.pyplot as plt
from itertools import combinations
from sklearn.cluster import KMeans
from credentials import API_KEY
import urllib.request
import json
import pandas as pd
import sys
from htmlparser import duration
COLOR = ('red', 'blue')
locations = np.array(locations)
#seeds = [(30, 33), (30, 6), (23, 28), (19, 31), (23, 28), ]
URL = 'https://maps.googleapis.com/maps/api/directions/json?key={0}&origin={1}&destination={1}&waypoints=optimize:true|{2}'
results = []
best = 1e9
seeds = combinations(range(len(locations)), 2)
for seed in list(seeds):
start = np.array([locations[seed[0]], locations[seed[1]]])
clusters = KMeans(init=start, n_init=1, n_clusters=2, random_state=0).fit(locations).labels_
s = clusters.sum()
if s < 13 or s > 23:
continue
print(s, flush=True)
both = 0
html = ['','']
t = [0, 0]
for i in range(2):
group = locations[clusters==i]
waypoints = '|'.join([('%s,%s' % tuple(x)) for x in group[1:]])
start ='%s,%s' % tuple(group[0])
url = URL.format(API_KEY, start, waypoints)
html[i] = urllib.request.urlopen(url).read()
t[i] = duration(html[i])
both += t[i]
results.append((seed[0], seed[1], both, t[0], t[1]))
print(results[-1], flush=True)
if both < best:
best = both
open('temp0.html', 'wb').write(html[0])
open('temp1.html', 'wb').write(html[1])
df = pd.DataFrame(results)
df.to_csv('results.csv')
| [
"pandas.DataFrame",
"sklearn.cluster.KMeans",
"numpy.array",
"htmlparser.duration"
] | [((316, 335), 'numpy.array', 'np.array', (['locations'], {}), '(locations)\n', (324, 335), True, 'import numpy as np\n'), ((1488, 1509), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (1500, 1509), True, 'import pandas as pd\n'), ((634, 684), 'numpy.array', 'np.array', (['[locations[seed[0]], locations[seed[1]]]'], {}), '([locations[seed[0]], locations[seed[1]]])\n', (642, 684), True, 'import numpy as np\n'), ((1215, 1232), 'htmlparser.duration', 'duration', (['html[i]'], {}), '(html[i])\n', (1223, 1232), False, 'from htmlparser import duration\n'), ((700, 758), 'sklearn.cluster.KMeans', 'KMeans', ([], {'init': 'start', 'n_init': '(1)', 'n_clusters': '(2)', 'random_state': '(0)'}), '(init=start, n_init=1, n_clusters=2, random_state=0)\n', (706, 758), False, 'from sklearn.cluster import KMeans\n')] |
import os
import os.path
import hashlib
import errno
import torch
from torchvision import transforms
import numpy as np
import random
import PIL
from PIL import Image, ImageEnhance, ImageOps
from torchvision import transforms as T
import cv2
dataset_stats = {
'CIFAR10' : {'mean': (0.49139967861519607, 0.48215840839460783, 0.44653091444546567),
'std' : (0.2470322324632819, 0.24348512800005573, 0.26158784172796434),
'size' : 32},
'CIFAR100': {'mean': (0.5070751592371323, 0.48654887331495095, 0.4409178433670343),
'std' : (0.2673342858792409, 0.25643846291708816, 0.2761504713256834),
'size' : 32},
'TinyIMNET': {'mean': (0.4389, 0.4114, 0.3682),
'std' : (0.2402, 0.2350, 0.2268),
'size' : 64},
}
# k transormations
class TransformK:
def __init__(self, transform, transformb, k):
self.transform = transform
self.transformb = transformb
self.k = k
def __call__(self, inp):
x = [self.transform(inp)]
for _ in range(self.k-1): x.append(self.transformb(inp))
return x
# transformations
def get_transform(dataset='cifar100', phase='test', aug=True, hard_aug=False):
transform_list = []
if phase == 'train' and not ('mnist' in dataset) and aug:
if hard_aug:
transform_list.extend([
transforms.ColorJitter(brightness=63/255, contrast=0.8),
RandomAugment(),
transforms.ToTensor(), \
transforms.Normalize(dataset_stats[dataset]['mean'], dataset_stats[dataset]['std']),
Cutout()
])
else:
transform_list.extend([
transforms.ColorJitter(brightness=63/255, contrast=0.8),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomCrop(dataset_stats[dataset]['size'], padding=4),
transforms.ToTensor(),
transforms.Normalize(dataset_stats[dataset]['mean'], dataset_stats[dataset]['std']),
])
else:
transform_list.extend([
transforms.ToTensor(),
transforms.Normalize(dataset_stats[dataset]['mean'], dataset_stats[dataset]['std']),
])
return transforms.Compose(transform_list)
def check_integrity(fpath, md5):
if not os.path.isfile(fpath):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
# read in 1MB chunks
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if md5c != md5:
return False
return True
def download_url(url, root, filename, md5):
from six.moves import urllib
root = os.path.expanduser(root)
fpath = os.path.join(root, filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
except:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
"""
Code adapted from
https://github.com/4uiiurz1/pytorch-auto-augment/blob/master/auto_augment.py
"""
class RandomAugment:
"""
Random aggressive data augmentation transformer.
"""
def __init__(self, N=2, M=9):
"""
:param N: int, [1, #ops]. max number of operations
:param M: int, [0, 9]. max magnitude of operations
"""
self.operations = {
'Identity': lambda img, magnitude: self.identity(img, magnitude),
'ShearX': lambda img, magnitude: self.shear_x(img, magnitude),
'ShearY': lambda img, magnitude: self.shear_y(img, magnitude),
'TranslateX': lambda img, magnitude: self.translate_x(img, magnitude),
'TranslateY': lambda img, magnitude: self.translate_y(img, magnitude),
'Rotate': lambda img, magnitude: self.rotate(img, magnitude),
'Mirror': lambda img, magnitude: self.mirror(img, magnitude),
'AutoContrast': lambda img, magnitude: self.auto_contrast(img, magnitude),
'Equalize': lambda img, magnitude: self.equalize(img, magnitude),
'Solarize': lambda img, magnitude: self.solarize(img, magnitude),
'Posterize': lambda img, magnitude: self.posterize(img, magnitude),
'Invert': lambda img, magnitude: self.invert(img, magnitude),
'Contrast': lambda img, magnitude: self.contrast(img, magnitude),
'Color': lambda img, magnitude: self.color(img, magnitude),
'Brightness': lambda img, magnitude: self.brightness(img, magnitude),
'Sharpness': lambda img, magnitude: self.sharpness(img, magnitude)
}
self.N = np.clip(N, a_min=1, a_max=len(self.operations))
self.M = np.clip(M, a_min=0, a_max=9)
def identity(self, img, magnitude):
return img
def transform_matrix_offset_center(self, matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = offset_matrix @ matrix @ reset_matrix
return transform_matrix
def shear_x(self, img, magnitude):
img = img.transpose(Image.TRANSPOSE)
magnitudes = np.random.choice([-1.0, 1.0]) * np.linspace(0, 0.3, 11)
transform_matrix = np.array([[1, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 0],
[0, 1, 0],
[0, 0, 1]])
transform_matrix = self.transform_matrix_offset_center(transform_matrix, img.size[0], img.size[1])
img = img.transform(img.size, Image.AFFINE, transform_matrix.flatten()[:6], Image.BICUBIC)
img = img.transpose(Image.TRANSPOSE)
return img
def shear_y(self, img, magnitude):
img = img.transpose(Image.TRANSPOSE)
magnitudes = np.random.choice([-1.0, 1.0]) * np.linspace(0, 0.3, 11)
transform_matrix = np.array([[1, 0, 0],
[random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 1, 0],
[0, 0, 1]])
transform_matrix = self.transform_matrix_offset_center(transform_matrix, img.size[0], img.size[1])
img = img.transform(img.size, Image.AFFINE, transform_matrix.flatten()[:6], Image.BICUBIC)
img = img.transpose(Image.TRANSPOSE)
return img
def translate_x(self, img, magnitude):
img = img.transpose(Image.TRANSPOSE)
magnitudes = np.random.choice([-1.0, 1.0]) * np.linspace(0, 0.3, 11)
transform_matrix = np.array([[1, 0, 0],
[0, 1, img.size[1]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])],
[0, 0, 1]])
transform_matrix = self.transform_matrix_offset_center(transform_matrix, img.size[0], img.size[1])
img = img.transform(img.size, Image.AFFINE, transform_matrix.flatten()[:6], Image.BICUBIC)
img = img.transpose(Image.TRANSPOSE)
return img
def translate_y(self, img, magnitude):
img = img.transpose(Image.TRANSPOSE)
magnitudes = np.random.choice([-1.0, 1.0]) * np.linspace(0, 0.3, 11)
transform_matrix = np.array([[1, 0, img.size[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])],
[0, 1, 0],
[0, 0, 1]])
transform_matrix = self.transform_matrix_offset_center(transform_matrix, img.size[0], img.size[1])
img = img.transform(img.size, Image.AFFINE, transform_matrix.flatten()[:6], Image.BICUBIC)
img = img.transpose(Image.TRANSPOSE)
return img
def rotate(self, img, magnitude):
img = img.transpose(Image.TRANSPOSE)
magnitudes = np.random.choice([-1.0, 1.0]) * np.linspace(0, 30, 11)
theta = np.deg2rad(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
transform_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = self.transform_matrix_offset_center(transform_matrix, img.size[0], img.size[1])
img = img.transform(img.size, Image.AFFINE, transform_matrix.flatten()[:6], Image.BICUBIC)
img = img.transpose(Image.TRANSPOSE)
return img
def mirror(self, img, magnitude):
img = ImageOps.mirror(img)
return img
def auto_contrast(self, img, magnitude):
img = ImageOps.autocontrast(img)
return img
def equalize(self, img, magnitude):
img = ImageOps.equalize(img)
return img
def solarize(self, img, magnitude):
magnitudes = np.linspace(0, 256, 11)
img = ImageOps.solarize(img, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def posterize(self, img, magnitude):
magnitudes = np.linspace(4, 8, 11)
img = ImageOps.posterize(img, int(round(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))))
return img
def invert(self, img, magnitude):
img = ImageOps.invert(img)
return img
def contrast(self, img, magnitude):
magnitudes = 1.0 + np.random.choice([-1.0, 1.0])*np.linspace(0.1, 0.9, 11)
img = ImageEnhance.Contrast(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def color(self, img, magnitude):
magnitudes = 1.0 + np.random.choice([-1.0, 1.0])*np.linspace(0.1, 0.9, 11)
img = ImageEnhance.Color(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def brightness(self, img, magnitude):
magnitudes = 1.0 + np.random.choice([-1.0, 1.0])*np.linspace(0.1, 0.9, 11)
img = ImageEnhance.Brightness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def sharpness(self, img, magnitude):
magnitudes = 1.0 + np.random.choice([-1.0, 1.0])*np.linspace(0.1, 0.9, 11)
img = ImageEnhance.Sharpness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def __call__(self, img):
ops = np.random.choice(list(self.operations.keys()), self.N)
for op in ops:
mag = random.randint(0, self.M)
img = self.operations[op](img, mag)
return img
class Cutout:
def __init__(self, M=0.5, fill=0.0):
self.M = np.clip(M, a_min=0.0, a_max=1.0)
self.fill = fill
def __call__(self, x):
"""
Ref https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
"""
_, h, w = x.shape
lh, lw = int(round(self.M * h)), int(round(self.M * w))
cx, cy = np.random.randint(0, h), np.random.randint(0, w)
x1 = np.clip(cx - lh // 2, 0, h)
x2 = np.clip(cx + lh // 2, 0, h)
y1 = np.clip(cy - lw // 2, 0, w)
y2 = np.clip(cy + lw // 2, 0, w)
x[:, x1: x2, y1: y2] = self.fill
return x | [
"PIL.ImageEnhance.Brightness",
"numpy.clip",
"os.path.isfile",
"numpy.random.randint",
"numpy.sin",
"torchvision.transforms.Normalize",
"os.path.join",
"random.randint",
"PIL.ImageOps.invert",
"PIL.ImageEnhance.Sharpness",
"torchvision.transforms.Compose",
"six.moves.urllib.request.urlretrieve... | [((2362, 2396), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (2380, 2396), False, 'from torchvision import transforms\n'), ((2497, 2510), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (2508, 2510), False, 'import hashlib\n'), ((2841, 2865), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (2859, 2865), False, 'import os\n'), ((2878, 2906), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (2890, 2906), False, 'import os\n'), ((3946, 3970), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (3964, 3970), False, 'import os\n'), ((4744, 4768), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (4762, 4768), False, 'import os\n'), ((2442, 2463), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (2456, 2463), False, 'import os\n'), ((2925, 2942), 'os.makedirs', 'os.makedirs', (['root'], {}), '(root)\n', (2936, 2942), False, 'import os\n'), ((3082, 3103), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (3096, 3103), False, 'import os\n'), ((6771, 6799), 'numpy.clip', 'np.clip', (['M'], {'a_min': '(0)', 'a_max': '(9)'}), '(M, a_min=0, a_max=9)\n', (6778, 6799), True, 'import numpy as np\n'), ((7011, 7058), 'numpy.array', 'np.array', (['[[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]'], {}), '([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n', (7019, 7058), True, 'import numpy as np\n'), ((7082, 7131), 'numpy.array', 'np.array', (['[[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]'], {}), '([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n', (7090, 7131), True, 'import numpy as np\n'), ((10581, 10601), 'PIL.ImageOps.mirror', 'ImageOps.mirror', (['img'], {}), '(img)\n', (10596, 10601), False, 'from PIL import Image, ImageEnhance, ImageOps\n'), ((10681, 10707), 'PIL.ImageOps.autocontrast', 'ImageOps.autocontrast', (['img'], {}), '(img)\n', (10702, 10707), False, 'from PIL import Image, ImageEnhance, ImageOps\n'), ((10782, 10804), 'PIL.ImageOps.equalize', 'ImageOps.equalize', (['img'], {}), '(img)\n', (10799, 10804), False, 'from PIL import Image, ImageEnhance, ImageOps\n'), ((10886, 10909), 'numpy.linspace', 'np.linspace', (['(0)', '(256)', '(11)'], {}), '(0, 256, 11)\n', (10897, 10909), True, 'import numpy as np\n'), ((11093, 11114), 'numpy.linspace', 'np.linspace', (['(4)', '(8)', '(11)'], {}), '(4, 8, 11)\n', (11104, 11114), True, 'import numpy as np\n'), ((11301, 11321), 'PIL.ImageOps.invert', 'ImageOps.invert', (['img'], {}), '(img)\n', (11316, 11321), False, 'from PIL import Image, ImageEnhance, ImageOps\n'), ((12672, 12704), 'numpy.clip', 'np.clip', (['M'], {'a_min': '(0.0)', 'a_max': '(1.0)'}), '(M, a_min=0.0, a_max=1.0)\n', (12679, 12704), True, 'import numpy as np\n'), ((13031, 13058), 'numpy.clip', 'np.clip', (['(cx - lh // 2)', '(0)', 'h'], {}), '(cx - lh // 2, 0, h)\n', (13038, 13058), True, 'import numpy as np\n'), ((13072, 13099), 'numpy.clip', 'np.clip', (['(cx + lh // 2)', '(0)', 'h'], {}), '(cx + lh // 2, 0, h)\n', (13079, 13099), True, 'import numpy as np\n'), ((13113, 13140), 'numpy.clip', 'np.clip', (['(cy - lw // 2)', '(0)', 'w'], {}), '(cy - lw // 2, 0, w)\n', (13120, 13140), True, 'import numpy as np\n'), ((13154, 13181), 'numpy.clip', 'np.clip', (['(cy + lw // 2)', '(0)', 'w'], {}), '(cy + lw // 2, 0, w)\n', (13161, 13181), True, 'import numpy as np\n'), ((3291, 3329), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'fpath'], {}), '(url, fpath)\n', (3317, 3329), False, 'from six.moves import urllib\n'), ((4083, 4099), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (4093, 4099), False, 'import os\n'), ((4163, 4184), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (4175, 4184), False, 'import os\n'), ((4899, 4915), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (4909, 4915), False, 'import os\n'), ((4973, 4994), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (4985, 4994), False, 'import os\n'), ((7335, 7364), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (7351, 7364), True, 'import numpy as np\n'), ((7367, 7390), 'numpy.linspace', 'np.linspace', (['(0)', '(0.3)', '(11)'], {}), '(0, 0.3, 11)\n', (7378, 7390), True, 'import numpy as np\n'), ((7973, 8002), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (7989, 8002), True, 'import numpy as np\n'), ((8005, 8028), 'numpy.linspace', 'np.linspace', (['(0)', '(0.3)', '(11)'], {}), '(0, 0.3, 11)\n', (8016, 8028), True, 'import numpy as np\n'), ((8615, 8644), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (8631, 8644), True, 'import numpy as np\n'), ((8647, 8670), 'numpy.linspace', 'np.linspace', (['(0)', '(0.3)', '(11)'], {}), '(0, 0.3, 11)\n', (8658, 8670), True, 'import numpy as np\n'), ((9269, 9298), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (9285, 9298), True, 'import numpy as np\n'), ((9301, 9324), 'numpy.linspace', 'np.linspace', (['(0)', '(0.3)', '(11)'], {}), '(0, 0.3, 11)\n', (9312, 9324), True, 'import numpy as np\n'), ((9918, 9947), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (9934, 9947), True, 'import numpy as np\n'), ((9950, 9972), 'numpy.linspace', 'np.linspace', (['(0)', '(30)', '(11)'], {}), '(0, 30, 11)\n', (9961, 9972), True, 'import numpy as np\n'), ((10000, 10064), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (10014, 10064), False, 'import random\n'), ((10947, 11011), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (10961, 11011), False, 'import random\n'), ((11514, 11578), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (11528, 11578), False, 'import random\n'), ((11764, 11828), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (11778, 11828), False, 'import random\n'), ((12024, 12088), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (12038, 12088), False, 'import random\n'), ((12282, 12346), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (12296, 12346), False, 'import random\n'), ((12505, 12530), 'random.randint', 'random.randint', (['(0)', 'self.M'], {}), '(0, self.M)\n', (12519, 12530), False, 'import random\n'), ((12969, 12992), 'numpy.random.randint', 'np.random.randint', (['(0)', 'h'], {}), '(0, h)\n', (12986, 12992), True, 'import numpy as np\n'), ((12994, 13017), 'numpy.random.randint', 'np.random.randint', (['(0)', 'w'], {}), '(0, w)\n', (13011, 13017), True, 'import numpy as np\n'), ((2187, 2208), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2206, 2208), False, 'from torchvision import transforms\n'), ((2226, 2314), 'torchvision.transforms.Normalize', 'transforms.Normalize', (["dataset_stats[dataset]['mean']", "dataset_stats[dataset]['std']"], {}), "(dataset_stats[dataset]['mean'], dataset_stats[dataset]\n ['std'])\n", (2246, 2314), False, 'from torchvision import transforms\n'), ((11409, 11438), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (11425, 11438), True, 'import numpy as np\n'), ((11439, 11464), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', '(11)'], {}), '(0.1, 0.9, 11)\n', (11450, 11464), True, 'import numpy as np\n'), ((11479, 11505), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['img'], {}), '(img)\n', (11500, 11505), False, 'from PIL import Image, ImageEnhance, ImageOps\n'), ((11662, 11691), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (11678, 11691), True, 'import numpy as np\n'), ((11692, 11717), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', '(11)'], {}), '(0.1, 0.9, 11)\n', (11703, 11717), True, 'import numpy as np\n'), ((11732, 11755), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['img'], {}), '(img)\n', (11750, 11755), False, 'from PIL import Image, ImageEnhance, ImageOps\n'), ((11917, 11946), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (11933, 11946), True, 'import numpy as np\n'), ((11947, 11972), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', '(11)'], {}), '(0.1, 0.9, 11)\n', (11958, 11972), True, 'import numpy as np\n'), ((11987, 12015), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['img'], {}), '(img)\n', (12010, 12015), False, 'from PIL import Image, ImageEnhance, ImageOps\n'), ((12176, 12205), 'numpy.random.choice', 'np.random.choice', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (12192, 12205), True, 'import numpy as np\n'), ((12206, 12231), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', '(11)'], {}), '(0.1, 0.9, 11)\n', (12217, 12231), True, 'import numpy as np\n'), ((12246, 12273), 'PIL.ImageEnhance.Sharpness', 'ImageEnhance.Sharpness', (['img'], {}), '(img)\n', (12268, 12273), False, 'from PIL import Image, ImageEnhance, ImageOps\n'), ((1401, 1458), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(63 / 255)', 'contrast': '(0.8)'}), '(brightness=63 / 255, contrast=0.8)\n', (1423, 1458), False, 'from torchvision import transforms\n'), ((1507, 1528), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1526, 1528), False, 'from torchvision import transforms\n'), ((1548, 1636), 'torchvision.transforms.Normalize', 'transforms.Normalize', (["dataset_stats[dataset]['mean']", "dataset_stats[dataset]['std']"], {}), "(dataset_stats[dataset]['mean'], dataset_stats[dataset]\n ['std'])\n", (1568, 1636), False, 'from torchvision import transforms\n'), ((1759, 1816), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(63 / 255)', 'contrast': '(0.8)'}), '(brightness=63 / 255, contrast=0.8)\n', (1781, 1816), False, 'from torchvision import transforms\n'), ((1832, 1870), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1863, 1870), False, 'from torchvision import transforms\n'), ((1888, 1952), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (["dataset_stats[dataset]['size']"], {'padding': '(4)'}), "(dataset_stats[dataset]['size'], padding=4)\n", (1909, 1952), False, 'from torchvision import transforms\n'), ((1970, 1991), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1989, 1991), False, 'from torchvision import transforms\n'), ((2009, 2097), 'torchvision.transforms.Normalize', 'transforms.Normalize', (["dataset_stats[dataset]['mean']", "dataset_stats[dataset]['std']"], {}), "(dataset_stats[dataset]['mean'], dataset_stats[dataset]\n ['std'])\n", (2029, 2097), False, 'from torchvision import transforms\n'), ((3583, 3621), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'fpath'], {}), '(url, fpath)\n', (3609, 3621), False, 'from six.moves import urllib\n'), ((4047, 4068), 'os.path.join', 'os.path.join', (['root', 'p'], {}), '(root, p)\n', (4059, 4068), False, 'import os\n'), ((7432, 7496), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (7446, 7496), False, 'import random\n'), ((8115, 8179), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (8129, 8179), False, 'import random\n'), ((10102, 10115), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10108, 10115), True, 'import numpy as np\n'), ((10175, 10188), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10181, 10188), True, 'import numpy as np\n'), ((10190, 10203), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10196, 10203), True, 'import numpy as np\n'), ((11163, 11227), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (11177, 11227), False, 'import random\n'), ((4840, 4861), 'os.path.join', 'os.path.join', (['root', 'p'], {}), '(root, p)\n', (4852, 4861), False, 'import os\n'), ((8775, 8839), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (8789, 8839), False, 'import random\n'), ((9381, 9445), 'random.uniform', 'random.uniform', (['magnitudes[magnitude]', 'magnitudes[magnitude + 1]'], {}), '(magnitudes[magnitude], magnitudes[magnitude + 1])\n', (9395, 9445), False, 'import random\n'), ((10118, 10131), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10124, 10131), True, 'import numpy as np\n')] |
# Credit for setup : https://www.analyticsvidhya.com/blog/2018/11/introduction-text-summarization-textrank-python/
import math
import os
import nltk
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
import networkx as nx
#nltk.download('punkt') # one time execution
#nltk.download('stopwords') Comment out after first time!
def text_rank_summarize(upload_path, summary_path, word_embeddings, fraction_of_words=0.2):
"""
Return TextRank summary
param: fraction_of_words
>>> text_rank_summarize(upload_folder, summary_folder, 0.2)
>>> Returns top 10 sentances of each file in "upload_folder"
"""
files = os.listdir(upload_path)
stop_words = stopwords.words('english')
for file in files:
lines = []
with open(f'{upload_path}/{file}', encoding="utf8", errors="ignore") as f:
for line in f:
lines.append(bytes(line, "utf-8").decode("utf-8", "ignore"))
text = "".join(lines)
sentences = sent_tokenize(text)
len_sentence = len(sentences)
# remove stopwords from the sentences
def remove_stopwords(sen):
return " ".join([i for i in sen if i not in stop_words])
# Clean and remove stop words
clean_sentences = [remove_stopwords(s.lower().split()) for s in pd.Series(sentences).str.replace("[^a-zA-Z]", " ")]
sentence_vectors = []
for i in clean_sentences:
if len(i) != 0:
v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()])/(len(i.split())+0.001)
else:
v = np.zeros((100,))
sentence_vectors.append(v)
# similarity matrix
sim_mat = np.zeros([len_sentence, len_sentence])
for i in range(len_sentence):
for j in range(len_sentence):
if i != j:
sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1,100), sentence_vectors[j].reshape(1,100))[0,0]
# Calculate PageRank scores from matrix
scores = nx.pagerank(nx.from_numpy_array(sim_mat))
ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
# Extract top 10 sentences as the summary
top_x_sentences = int(len_sentence * fraction_of_words)
if top_x_sentences > 15:
top_x_sentences = 15
if top_x_sentences < 1:
top_x_sentences = len_sentence
summary = " ".join([ranked_sentences[i][1] for i in range(top_x_sentences)])
filename, _ = os.path.splitext(str(file))
with open(f'{summary_path}/{filename}_summary.txt', "w") as f:
f.write(summary)
def word_embeddings(embedding_path='summarization/textrank/glove.6B.100d.txt'):
# Extract word vectors
word_embeddings = {}
with open(embedding_path, encoding='utf-8') as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
word_embeddings[word] = coefs
return word_embeddings
| [
"numpy.asarray",
"numpy.zeros",
"networkx.from_numpy_array",
"nltk.tokenize.sent_tokenize",
"pandas.Series",
"nltk.corpus.stopwords.words",
"os.listdir"
] | [((793, 816), 'os.listdir', 'os.listdir', (['upload_path'], {}), '(upload_path)\n', (803, 816), False, 'import os\n'), ((834, 860), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (849, 860), False, 'from nltk.corpus import stopwords\n'), ((1146, 1165), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (1159, 1165), False, 'from nltk.tokenize import sent_tokenize\n'), ((1862, 1900), 'numpy.zeros', 'np.zeros', (['[len_sentence, len_sentence]'], {}), '([len_sentence, len_sentence])\n', (1870, 1900), True, 'import numpy as np\n'), ((2217, 2245), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['sim_mat'], {}), '(sim_mat)\n', (2236, 2245), True, 'import networkx as nx\n'), ((3131, 3170), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (3141, 3170), True, 'import numpy as np\n'), ((1759, 1775), 'numpy.zeros', 'np.zeros', (['(100,)'], {}), '((100,))\n', (1767, 1775), True, 'import numpy as np\n'), ((1466, 1486), 'pandas.Series', 'pd.Series', (['sentences'], {}), '(sentences)\n', (1475, 1486), True, 'import pandas as pd\n'), ((1659, 1675), 'numpy.zeros', 'np.zeros', (['(100,)'], {}), '((100,))\n', (1667, 1675), True, 'import numpy as np\n')] |
import h5py
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from utils import *
model = dict()
channels,channel_dat,channel_norms = draw_sample(data_path="data",total_samples=10000)
channel_to_idx = {ch: i for i,ch in enumerate(channels)}
model['data'] = dict()
model['channels'] = channels
model['norms'] = channel_norms
# NOT USED
# # Load up previously computed correlation matrix (for what?)
# corr_M = np.load("corr_M.npy")
# # Construct 0-1 matrix from corr_M given threshold
# pos = np.where(corr_M > 0.1)
# corr_M = np.zeros(corr_M.shape)
# for p in pos:
# corr_M[p] = 1
# corr_M = corr_M - np.identity(corr_M.shape[0])
# Initialize sensors
sensors = {ch: Channel(ch,len(channels)) for ch in channels}
redundant_channels = []
params_M = []
# Feed compound dataframe into Regressor to train for prediction
for ch in channels:
print(f"Fitting with linear regression channel: {ch}")
# Extract current channel as Y
Y = np.asarray(channel_dat[ch])
X = np.asmatrix([channel_dat[c] if c != ch else np.zeros(len(channel_dat[c])) for c in channels])
r2_fit, params = sensors[ch].train(X,Y,channels)
if (r2_fit > 0.995):
redundant_channels.append(ch)
params = norm_zero(params)
params[channel_to_idx[ch]] = r2_fit
params_M.append(params)
print(redundant_channels)
params_M = np.asmatrix(params_M)
np.save("params_M",params_M)
params_M = np.square(params_M)
my_dpi=96
plt.figure(figsize=(2048/my_dpi,2048/my_dpi),dpi=my_dpi,frameon=False)
plt.imshow(params_M, cmap='cool', interpolation='nearest')
# plt.show()
plt.savefig('params_matrix_mixed_model.png',dpi=my_dpi*2)
# Verify
for ch in channels:
print(f"{ch}: {sensors[ch].covariates}")
model['data'][ch] = sensors[ch].lin_reg
pickle.dump(model,open('normed_mixed_model3.mdl','wb')) | [
"numpy.save",
"matplotlib.pyplot.imshow",
"numpy.asarray",
"numpy.square",
"matplotlib.pyplot.figure",
"numpy.asmatrix",
"matplotlib.pyplot.savefig"
] | [((1320, 1341), 'numpy.asmatrix', 'np.asmatrix', (['params_M'], {}), '(params_M)\n', (1331, 1341), True, 'import numpy as np\n'), ((1342, 1371), 'numpy.save', 'np.save', (['"""params_M"""', 'params_M'], {}), "('params_M', params_M)\n", (1349, 1371), True, 'import numpy as np\n'), ((1382, 1401), 'numpy.square', 'np.square', (['params_M'], {}), '(params_M)\n', (1391, 1401), True, 'import numpy as np\n'), ((1412, 1489), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2048 / my_dpi, 2048 / my_dpi)', 'dpi': 'my_dpi', 'frameon': '(False)'}), '(figsize=(2048 / my_dpi, 2048 / my_dpi), dpi=my_dpi, frameon=False)\n', (1422, 1489), True, 'import matplotlib.pyplot as plt\n'), ((1483, 1541), 'matplotlib.pyplot.imshow', 'plt.imshow', (['params_M'], {'cmap': '"""cool"""', 'interpolation': '"""nearest"""'}), "(params_M, cmap='cool', interpolation='nearest')\n", (1493, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1555, 1615), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""params_matrix_mixed_model.png"""'], {'dpi': '(my_dpi * 2)'}), "('params_matrix_mixed_model.png', dpi=my_dpi * 2)\n", (1566, 1615), True, 'import matplotlib.pyplot as plt\n'), ((960, 987), 'numpy.asarray', 'np.asarray', (['channel_dat[ch]'], {}), '(channel_dat[ch])\n', (970, 987), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.