code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from keras.layers import Input, LSTM, Dense, Masking, merge, Dropout
from keras.models import Model, load_model
from keras.optimizers import adam
from keras.utils import to_categorical
import numpy as np
from numpy import genfromtxt, savetxt
from matplotlib import pyplot as plt
data_dir = './split2/scene_activity_data_test_none'
model_path = './scenelstm/scenelstm-split1.h5'
batch_size = 128
trainX1 = (genfromtxt(data_dir + '/' + 'trainX1.csv', delimiter=','))
trainX2 = np.reshape(genfromtxt(data_dir + '/' + 'trainX2.csv', delimiter=','), newshape=(-1, 10, 2048))
trainY = to_categorical(genfromtxt(data_dir + '/' + 'trainY.csv', delimiter=',') - 1)
testX1 = (genfromtxt(data_dir + '/' + 'testX1.csv', delimiter=','))
testX2 = np.reshape(genfromtxt(data_dir + '/' + 'testX2.csv', delimiter=','), newshape=(-1, 10, 2048))
testY = to_categorical(genfromtxt(data_dir + '/' + 'testY.csv', delimiter=',') - 1)
print(trainX1.shape, trainX2.shape, trainY.shape, testX1.shape, testX2.shape, testY.shape)
# freq_layer = Input(shape=(4,))
# context_layer = Input(shape=(10, 2048))
# masked = Masking()(context_layer)
# lstm1 = LSTM(256, activation='sigmoid', recurrent_activation='tanh')(masked)
# drop1 = Dropout(rate=0.95)(lstm1)
# fc_context = Dense(16, activation='tanh')(drop1)
# fc_freq = Dense(16, activation='tanh')(freq_layer)
# merged = merge(inputs=[fc_context, fc_freq], mode='concat', concat_axis=1)
# fc2 = Dense(5, activation='softmax')(merged)
# scene_net = Model(inputs=[freq_layer, context_layer], outputs=fc2)
# print(scene_net.summary())
#
# optm = adam(lr=0.001)
# scene_net.compile(optimizer=optm, loss='categorical_crossentropy', metrics=['accuracy'])
scene_net = load_model(model_path)
print(testY.shape, trainY.shape)
scores = scene_net.evaluate(x=[testX1, testX2], y=testY, batch_size=batch_size)
y_fit = scene_net.predict(x=[testX1, testX2])
savetxt(data_dir + '/' + 'scene_res_none.txt', np.hstack((y_fit, testY)))
print(scores)
| [
"keras.models.load_model",
"numpy.genfromtxt",
"numpy.hstack"
] | [((409, 466), 'numpy.genfromtxt', 'genfromtxt', (["(data_dir + '/' + 'trainX1.csv')"], {'delimiter': '""","""'}), "(data_dir + '/' + 'trainX1.csv', delimiter=',')\n", (419, 466), False, 'from numpy import genfromtxt, savetxt\n'), ((669, 725), 'numpy.genfromtxt', 'genfromtxt', (["(data_dir + '/' + 'testX1.csv')"], {'delimiter': '""","""'}), "(data_dir + '/' + 'testX1.csv', delimiter=',')\n", (679, 725), False, 'from numpy import genfromtxt, savetxt\n'), ((1689, 1711), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (1699, 1711), False, 'from keras.models import Model, load_model\n'), ((489, 546), 'numpy.genfromtxt', 'genfromtxt', (["(data_dir + '/' + 'trainX2.csv')"], {'delimiter': '""","""'}), "(data_dir + '/' + 'trainX2.csv', delimiter=',')\n", (499, 546), False, 'from numpy import genfromtxt, savetxt\n'), ((747, 803), 'numpy.genfromtxt', 'genfromtxt', (["(data_dir + '/' + 'testX2.csv')"], {'delimiter': '""","""'}), "(data_dir + '/' + 'testX2.csv', delimiter=',')\n", (757, 803), False, 'from numpy import genfromtxt, savetxt\n'), ((1919, 1944), 'numpy.hstack', 'np.hstack', (['(y_fit, testY)'], {}), '((y_fit, testY))\n', (1928, 1944), True, 'import numpy as np\n'), ((597, 653), 'numpy.genfromtxt', 'genfromtxt', (["(data_dir + '/' + 'trainY.csv')"], {'delimiter': '""","""'}), "(data_dir + '/' + 'trainY.csv', delimiter=',')\n", (607, 653), False, 'from numpy import genfromtxt, savetxt\n'), ((853, 908), 'numpy.genfromtxt', 'genfromtxt', (["(data_dir + '/' + 'testY.csv')"], {'delimiter': '""","""'}), "(data_dir + '/' + 'testY.csv', delimiter=',')\n", (863, 908), False, 'from numpy import genfromtxt, savetxt\n')] |
#
"""
Various RNN encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework import nest
from texar.modules.encoders.encoder_base import EncoderBase
from texar.modules.networks.conv_networks import _to_list
from texar.core import layers
from texar.utils.mode import is_train_mode
from texar.utils.shapes import mask_sequences
from texar.hyperparams import HParams
from texar.modules.decoders.rnn_decoder_base import compute_output_shape
# pylint: disable=too-many-arguments, too-many-locals, invalid-name, no-member
__all__ = [
"_forward_single_output_layer",
"RNNEncoderBase",
"UnidirectionalRNNEncoder",
"BidirectionalRNNEncoder"
]
def _default_output_layer_hparams():
return {
"num_layers": 0,
"layer_size": 128,
"activation": "identity",
"final_layer_activation": None,
"other_dense_kwargs": None,
"dropout_layer_ids": [],
"dropout_rate": 0.5,
"variational_dropout": False,
"@no_typecheck": ["activation", "final_layer_activation",
"layer_size", "dropout_layer_ids"]
}
def _build_dense_output_layer(hparams):
nlayers = hparams.num_layers
if nlayers <= 0:
return None
layer_size = _to_list(
hparams.layer_size, 'output_layer.layer_size', nlayers)
other_kwargs = hparams.other_dense_kwargs or {}
if isinstance(other_kwargs, HParams):
other_kwargs = other_kwargs.todict()
if not isinstance(other_kwargs, dict):
raise ValueError(
"hparams 'output_layer.other_dense_kwargs' must be a dict.")
dense_layers = []
for i in range(nlayers):
if i == nlayers - 1:
activation = hparams.final_layer_activation
else:
activation = hparams.activation
kwargs_i = {"units": layer_size[i],
"activation": activation,
"name": "dense_%d" % (i+1)}
kwargs_i.update(other_kwargs)
layer_hparams = {"type": "Dense", "kwargs": kwargs_i}
dense_layers.append(layers.get_layer(hparams=layer_hparams))
if len(dense_layers) == 1:
dense_layers = dense_layers[0]
return dense_layers
def _forward_single_output_layer(inputs, input_size, output_layer):
"""Forwards the input through a single output layer.
Args:
inputs: A Tensor of shape `[batch_size, max_time] + input_size` if
:attr:`time_major=False`, or shape
`[max_time, batch_size] + input_size` if :attr:`time_major=True`.
input_size: An `int` or 1D `int` array.
"""
dim = np.prod(input_size)
inputs_flat = inputs
inputs_flat = tf.reshape(inputs_flat, [-1, dim])
# Feed to the layer
output_flat = output_layer(inputs_flat)
# output_size = output_layer.compute_output_shape([1, dim]).as_list()[1:]
output_size = compute_output_shape(output_layer.units, [1, dim]).as_list()[1:]
output_size = np.array(output_size)
# Reshape output to [batch_size/max_time, max_time/batch_size] + output_size
output_shape = tf.concat([tf.shape(inputs)[:2], output_size], axis=0)
output = tf.reshape(output_flat, output_shape)
return output, output_size
def _apply_dropout(inputs, time_major, hparams, training):
"""Applies dropout to the inputs.
:attr:`inputs` is a Tensor of shape `[batch_size, max_time, dim]`
if :attr:`time_major=False`, or shape `[max_time, batch_size, dim]`
if :attr:`time_major=True`.
"""
noise_shape = None
if hparams.variational_dropout:
if time_major:
noise_shape = [1, None, None]
else:
noise_shape = [None, 1, None]
return tf.layers.dropout(inputs, rate=hparams.dropout_rate,
noise_shape=noise_shape, training=training)
def _forward_output_layers(inputs, input_size, output_layer, time_major,
hparams, mode, sequence_length=None):
"""Forwards inputs through the output layers.
Args:
inputs: A Tensor of shape `[batch_size, max_time] + input_size` if
:attr:`time_major=False`, or shape
`[max_time, batch_size] + input_size` if :attr:`time_major=True`.
Returns:
A pair :attr:`(outputs, outputs_size), where
- :attr:`outputs`: A Tensor of shape \
`[batch_size, max_time] + outputs_size`.
- :attr:`outputs_size`: An `int` or 1D `int` array representing the \
output size.
"""
if output_layer is None:
return inputs, input_size
if hparams is None:
# output_layer was passed in from the constructor
if isinstance(output_layer, (list, tuple)):
raise ValueError('output_layer must not be a list or tuple.')
output, output_size = _forward_single_output_layer(
inputs, input_size, output_layer)
else:
# output_layer was built based on hparams
output_layer = _to_list(output_layer)
dropout_layer_ids = _to_list(hparams.dropout_layer_ids)
if len(dropout_layer_ids) > 0:
training = is_train_mode(mode)
output = inputs
output_size = input_size
for i, layer in enumerate(output_layer):
if i in dropout_layer_ids:
output = _apply_dropout(output, time_major, hparams, training)
output, output_size = _forward_single_output_layer(
output, output_size, layer)
if len(output_layer) in dropout_layer_ids:
output = _apply_dropout(output, time_major, hparams, training)
if sequence_length is not None:
output = mask_sequences(
output, sequence_length, time_major=time_major, tensor_rank=3)
return output, output_size
def _apply_rnn_encoder_output_layer(output_layer, time_major, hparams, mode,
cell_outputs, cell_output_size):
map_func = functools.partial(
_forward_output_layers,
output_layer=output_layer,
time_major=time_major,
hparams=hparams,
mode=mode)
cell_outputs_flat = nest.flatten(cell_outputs)
cell_output_size_flat = nest.flatten(cell_output_size)
o = [map_func(inputs=x, input_size=xs)
for x, xs in zip(cell_outputs_flat, cell_output_size_flat)]
outputs_flat, output_size_flat = zip(*o)
outputs = nest.pack_sequence_as(cell_outputs, outputs_flat)
output_size = nest.pack_sequence_as(cell_outputs, output_size_flat)
return outputs, output_size
class RNNEncoderBase(EncoderBase):
"""Base class for all RNN encoder classes.
Args:
hparams (dict, optional): Encoder hyperparameters. If it is not
specified, the default hyperparameter setting is used. See
:attr:`default_hparams` for the sturcture and default values.
"""
def __init__(self, hparams=None):
EncoderBase.__init__(self, hparams)
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
Returns:
.. code-block:: python
{
"name": "rnn_encoder"
}
Here:
"name" : str
Name of the encoder.
"""
return {
"name": "rnn_encoder"
}
def _build(self, inputs, *args, **kwargs):
"""Encodes the inputs.
Args:
inputs: Inputs to the encoder.
*args: Other arguments.
**kwargs: Keyword arguments.
Returns:
Encoding results.
"""
raise NotImplementedError
class UnidirectionalRNNEncoder(RNNEncoderBase):
"""One directional RNN encoder.
Args:
cell: (RNNCell, optional) If it is not specified,
a cell is created as specified in :attr:`hparams["rnn_cell"]`.
cell_dropout_mode (optional): A Tensor taking value of
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, which
toggles dropout in the RNN cell (e.g., activates dropout in the
TRAIN mode). If `None`, :func:`~texar.context.global_mode` is used.
Ignored if :attr:`cell` is given.
output_layer (optional): An instance of
:tf_main:`tf.layers.Layer <layers/Layer>`. Apply to the RNN cell
output of each step. If `None` (default), the output layer is
created as specified in :attr:`hparams["output_layer"]`.
hparams (dict, optional): Encoder hyperparameters. If it is not
specified, the default hyperparameter setting is used. See
:attr:`default_hparams` for the sturcture and default values.
Missing values will take default.
"""
def __init__(self,
cell=None,
cell_dropout_mode=None,
output_layer=None,
hparams=None):
RNNEncoderBase.__init__(self, hparams)
# Make RNN cell
with tf.variable_scope(self.variable_scope):
if cell is not None:
self._cell = cell
else:
self._cell = layers.get_rnn_cell(
self._hparams.rnn_cell, cell_dropout_mode)
# Make output layer
with tf.variable_scope(self.variable_scope):
if output_layer is not None:
self._output_layer = output_layer
self._output_layer_hparams = None
else:
self._output_layer = _build_dense_output_layer(
self._hparams.output_layer)
self._output_layer_hparams = self._hparams.output_layer
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
Returns:
.. code-block:: python
{
"rnn_cell": default_rnn_cell_hparams(),
"output_layer": {
"num_layers": 0,
"layer_size": 128,
"activation": "identity",
"final_layer_activation": None,
"other_dense_kwargs": None,
"dropout_layer_ids": [],
"dropout_rate": 0.5,
"variational_dropout": False
},
"name": "unidirectional_rnn_encoder"
}
Here:
"rnn_cell" : dict
A dictionary of RNN cell hyperparameters. Ignored if
:attr:`cell` is given when constructing the encoder.
The default value is defined in
:func:`~texar.core.layers.default_rnn_cell_hparams`.
"output_layer" : dict
Output layer hyperparameters. Ignored if :attr:`output_layer`
is given in the constructor. Includes:
"num_layers" : int
The number of output (dense) layers. Set to 0 to avoid any
output layers applied to the cell outputs..
"layer_size" : int or list
The size of each of the output (dense) layers.
If an `int`, each output layer will have the same size. If
a list, the length must equal to :attr:`num_layers`.
"activation" : str or callable or None
The activation function for each of the output (dense)
layer except for the final layer. This can be
the function itself, or its string name or full path.
E.g., `"activation": tensorflow.nn.relu`
or `"activation": "relu"`
or `"activation": "tensorflow.nn.relu"`
Default is `None` which maintains a linear activation.
"final_layer_activation" : str or callable or None
The activation function for the final output layer.
"other_dense_kwargs" : dict or None
Other keyword arguments to construct each of the output
dense layers, e.g., :attr:`use_bias`. See
:tf_main:`Dense <layers/Dense>` for the arguments.
E.g., `"other_dense_kwargs": { "use_bias": False }`.
"dropout_layer_ids" : int or list
The indexes of layers (starting from `0`) whose inputs
are applied with dropout. The index = :attr:`num_layers`
means dropout applies to the final layer output. E.g.,
.. code-block:: python
{
"num_layers": 2,
"dropout_layer_ids": [0, 2]
}
will leads to a series of layers as
`-dropout-layer0-layer1-dropout-`.
The dropout mode (training or not) is controlled
by the :attr:`mode` argument when calling the encoder.
"dropout_rate" : float
The dropout rate, between 0 and 1. E.g.,
`"dropout_rate": 0.1` would drop out 10% of elements.
"variational_dropout": bool
Whether the dropout mask is the same across all time steps.
"name" : str
Name of the encoder
"""
hparams = RNNEncoderBase.default_hparams()
hparams.update({
"rnn_cell": layers.default_rnn_cell_hparams(),
"output_layer": _default_output_layer_hparams(),
"name": "unidirectional_rnn_encoder"
})
return hparams
def _build(self,
inputs,
sequence_length=None,
initial_state=None,
time_major=False,
mode=None,
return_cell_output=False,
return_output_size=False,
**kwargs):
"""Encodes the inputs.
Args:
inputs: A 3D Tensor of shape `[batch_size, max_time, dim]`.
The first two dimensions
:attr:`batch_size` and :attr:`max_time` are exchanged if
:attr:`time_major=True` is specified.
sequence_length (int list or 1D Tensor, optional): Sequence lengths
of the batch inputs. Used to copy-through state and zero-out
outputs when past a batch element's sequence length.
initial_state (optional): Initial state of the RNN.
time_major (bool): The shape format of the :attr:`inputs` and
:attr:`outputs` Tensors. If `True`, these tensors are of shape
`[max_time, batch_size, depth]`. If `False` (default),
these tensors are of shape `[batch_size, max_time, depth]`.
mode (optional): A tensor taking value in
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, including
`TRAIN`, `EVAL`, and `PREDICT`. Controls output layer dropout
if the output layer is specified with :attr:`hparams`.
If `None` (default), :func:`texar.context.global_mode()`
is used.
return_cell_output (bool): Whether to return the output of the RNN
cell. This is the results prior to the output layer.
return_output_size (bool): Whether to return the size of the
output (i.e., the results after output layers).
**kwargs: Optional keyword arguments of
:tf_main:`tf.nn.dynamic_rnn <nn/dynamic_rnn>`,
such as `swap_memory`, `dtype`, `parallel_iterations`, etc.
Returns:
By default (both :attr:`return_cell_output` and
:attr:`return_output_size` are `False`), returns a pair
:attr:`(outputs, final_state)` where
- :attr:`outputs`: The RNN output tensor by the output layer \
(if exists) or the RNN cell (otherwise). The tensor is of shape \
`[batch_size, max_time, output_size]` (if \
:attr:`time_major` == `False`) or \
`[max_time, batch_size, output_size]` (if \
:attr:`time_major` == `True`). \
If RNN cell output is a (nested) tuple of Tensors, then the \
:attr:`outputs` will be a (nested) tuple having the same \
nest structure as the cell output.
- :attr:`final_state`: The final state of the RNN, which is a \
Tensor of shape `[batch_size] + cell.state_size` or \
a (nested) tuple of Tensors (if `cell.state_size` is a (nested) \
tuple).
If :attr:`return_cell_output` is `True`, returns a triple
:attr:`(outputs, final_state, cell_outputs)` where
- :attr:`cell_outputs`: The outputs by the RNN cell prior to the \
output layer, having the same structure with :attr:`outputs` \
except for the `output_dim`.
If :attr:`return_output_size` is also `True`, returns a tuple
:attr:`(outputs, final_state, cell_outputs, output_size)` where
- :attr:`output_size`: A (possibly nested tuple of) `int` \
representing the size of :attr:`outputs`. If a single `int` or \
an `int` array, then :attr:`outputs` has shape \
`[batch/time, time/batch] + output_size`. If :attr:`output_size` \
is a (nested) tuple, then :attr:`output_size` has the same \
structure as with :attr:`outputs`.
"""
if ('dtype' not in kwargs) and (initial_state is None):
cell_outputs, state = tf.nn.dynamic_rnn(
cell=self._cell,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state,
time_major=time_major,
dtype=tf.float32,
**kwargs)
else:
cell_outputs, state = tf.nn.dynamic_rnn(
cell=self._cell,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state,
time_major=time_major,
**kwargs)
outputs, output_size = _apply_rnn_encoder_output_layer(
self._output_layer, time_major, self._output_layer_hparams,
mode, cell_outputs, self._cell.output_size)
if not self._built:
self._add_internal_trainable_variables()
# Add trainable variables of `self._cell` and `self._output_layer`
# which may be constructed externally.
self._add_trainable_variable(
layers.get_rnn_cell_trainable_variables(self._cell))
if self._output_layer and \
not isinstance(self._output_layer, (list, tuple)):
self._add_trainable_variable(
self._output_layer.trainable_variables)
self._built = True
rets = (outputs, state)
if return_cell_output:
rets += (cell_outputs, )
if return_output_size:
rets += (output_size, )
return rets
#def append_layer(self, layer):
# """Appends a layer to the end of the output layer. The layer must take
# as inputs a 2D Tensor and output another 2D Tensor (e.g., a
# :tf_main:`Dense <layers/Dense>` layer).
# The method is only feasible before :meth:`_build` is called.
# Args:
# layer: A :tf_main:`tf.layers.Layer <layers/Layer>` instance, or
# a `dict` of layer hyperparameters.
# """
# if self._built:
# raise TexarError("`UnidirectionalRNNEncoder.append_layer` can be "
# "called only before `_build` is called.")
# with tf.variable_scope(self.variable_scope):
# layer_ = layer
# if not isinstance(layer_, tf.layers.Layer):
# layer_ = layers.get_layer(hparams=layer_)
# if self._output_layer is None:
# self._output_layer = layer_
# else:
# self._output_layer = _to_list(self._output_layer)
# self._output_layers.append(layer_)
@property
def cell(self):
"""The RNN cell.
"""
return self._cell
@property
def state_size(self):
"""The state size of encoder cell.
Same as :attr:`encoder.cell.state_size`.
"""
return self.cell.state_size
@property
def output_layer(self):
"""The output layer.
"""
return self._output_layer
class BidirectionalRNNEncoder(RNNEncoderBase):
"""Bidirectional forward-backward RNN encoder.
Args:
cell_fw (RNNCell, optional): The forward RNN cell. If not given,
a cell is created as specified in :attr:`hparams["rnn_cell_fw"]`.
cell_bw (RNNCell, optional): The backward RNN cell. If not given,
a cell is created as specified in :attr:`hparams["rnn_cell_bw"]`.
cell_dropout_mode (optional): A tensor taking value of
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, which
toggles dropout in the RNN cells (e.g., activates dropout in the
TRAIN mode). If `None`, :func:`~texar.context.global_mode()` is
used. Ignored if respective cell is given.
output_layer_fw (optional): An instance of
:tf_main:`tf.layers.Layer <layers/Layer>`. Apply to the forward
RNN cell output of each step. If `None` (default), the output
layer is created as specified in :attr:`hparams["output_layer_fw"]`.
output_layer_bw (optional): An instance of
:tf_main:`tf.layers.Layer <layers/Layer>`. Apply to the backward
RNN cell output of each step. If `None` (default), the output
layer is created as specified in :attr:`hparams["output_layer_bw"]`.
hparams (dict, optional): Encoder hyperparameters. If it is not
specified, the default hyperparameter setting is used. See
:attr:`default_hparams` for the sturcture and default values.
Missing values will take default.
"""
def __init__(self,
cell_fw=None,
cell_bw=None,
cell_dropout_mode=None,
output_layer_fw=None,
output_layer_bw=None,
hparams=None):
RNNEncoderBase.__init__(self, hparams)
# Make RNN cells
with tf.variable_scope(self.variable_scope):
if cell_fw is not None:
self._cell_fw = cell_fw
else:
self._cell_fw = layers.get_rnn_cell(
self._hparams.rnn_cell_fw, cell_dropout_mode)
if cell_bw is not None:
self._cell_bw = cell_bw
elif self._hparams.rnn_cell_share_config:
self._cell_bw = layers.get_rnn_cell(
self._hparams.rnn_cell_fw, cell_dropout_mode)
else:
self._cell_bw = layers.get_rnn_cell(
self._hparams.rnn_cell_bw, cell_dropout_mode)
# Make output layers
with tf.variable_scope(self.variable_scope):
if output_layer_fw is not None:
self._output_layer_fw = output_layer_fw
self._output_layer_hparams_fw = None
else:
self._output_layer_fw = _build_dense_output_layer(
self._hparams.output_layer_fw)
self._output_layer_hparams_fw = self._hparams.output_layer_fw
if output_layer_bw is not None:
self._output_layer_bw = output_layer_bw
self._output_layer_hparams_bw = None
elif self._hparams.output_layer_share_config:
self._output_layer_bw = _build_dense_output_layer(
self._hparams.output_layer_fw)
self._output_layer_hparams_bw = self._hparams.output_layer_fw
else:
self._output_layer_bw = _build_dense_output_layer(
self._hparams.output_layer_bw)
self._output_layer_hparams_bw = self._hparams.output_layer_bw
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
Returns:
.. code-block:: python
{
"rnn_cell_fw": default_rnn_cell_hparams(),
"rnn_cell_bw": default_rnn_cell_hparams(),
"rnn_cell_share_config": True,
"output_layer_fw": {
"num_layers": 0,
"layer_size": 128,
"activation": "identity",
"final_layer_activation": None,
"other_dense_kwargs": None,
"dropout_layer_ids": [],
"dropout_rate": 0.5,
"variational_dropout": False
},
"output_layer_bw": {
# Same as "output_layer_fw"
# ...
},
"output_layer_share_config": True,
"name": "bidirectional_rnn_encoder"
}
Here:
"rnn_cell_fw" : dict
Hyperparameters of the forward RNN cell.
Ignored if :attr:`cell_fw` is given when constructing
the encoder.
The default value is defined in
:meth:`~texar.core.layers.default_rnn_cell_hparams`.
"rnn_cell_bw" : dict
Hyperparameters of the backward RNN cell.
Ignored if :attr:`cell_bw` is given when constructing
the encoder, or if :attr:`"rnn_cell_share_config"` is `True`.
The default value is defined in
:meth:`~texar.core.layers.default_rnn_cell_hparams`.
"rnn_cell_share_config" : bool
Whether share hyperparameters of the backward cell with the
forward cell. Note that the cell parameters are not shared.
If `True` (default), :attr:`"rnn_cell_bw"` is ignored.
"output_layer_fw" : dict
Hyperparameters of the forward output layer. Ignored if
:attr:`output_layer_fw` is given in the constructor. Includes:
"num_layers" : int
The number of output (dense) layers. Set to 0 to avoid any
output layers applied to the cell outputs..
"layer_size" : int or list
The size of each of the output (dense) layers.
If an `int`, each output layer will have the same size. If
a list, the length must equal to :attr:`num_layers`.
"activation" : str or callable or None
The activation function for each of the output (dense)
layer except for the final layer. This can be
the function itself, or its string name or full path.
E.g., `"activation": tensorflow.nn.relu`
or `"activation": "relu"`
or `"activation": "tensorflow.nn.relu"`
Default is `None` which maintains a linear activation.
"final_layer_activation" : str or callable or None
The activation function for the final output layer.
"other_dense_kwargs" : dict or None
Other keyword arguments to construct each of the output
dense layers, e.g., :attr:`use_bias`. See
:tf_main:`Dense <layers/Dense>` for the arguments.
E.g., `"other_dense_kwargs": { "use_bias": False }`.
"dropout_layer_ids" : int or list
The indexes of layers (starting from `0`) whose inputs
are applied with dropout. The index = :attr:`num_layers`
means dropout applies to the final layer output. E.g.,
.. code-block:: python
{
"num_layers": 2,
"dropout_layer_ids": [0, 2]
}
will leads to a series of layers as
`-dropout-layer0-layer1-dropout-`.
The dropout mode (training or not) is controlled
by the :attr:`mode` argument when calling the encoder.
"dropout_rate" : float
The dropout rate, between 0 and 1. E.g.,
`"dropout_rate": 0.1` would drop out 10% of elements.
"variational_dropout": bool
Whether the dropout mask is the same across all time steps.
"output_layer_bw" : dict
Hyperparameters of the backward output layer. Ignored if
:attr:`output_layer_bw` is given in the constructor. Have the
same structure and defaults with :attr:`"output_layer_fw"`.
"output_layer_share_config" : bool
Whether share hyperparameters of the backward output layer
with the forward output layer. Note that the layer parameters
are not shared.
If `True` (default), :attr:`"output_layer_bw"` is ignored.
"name" : str
Name of the encoder
"""
hparams = RNNEncoderBase.default_hparams()
hparams.update({
"rnn_cell_fw": layers.default_rnn_cell_hparams(),
"rnn_cell_bw": layers.default_rnn_cell_hparams(),
"rnn_cell_share_config": True,
"output_layer_fw": _default_output_layer_hparams(),
"output_layer_bw": _default_output_layer_hparams(),
"output_layer_share_config": True,
"name": "bidirectional_rnn_encoder"
})
return hparams
def _build(self,
inputs,
sequence_length=None,
initial_state_fw=None,
initial_state_bw=None,
time_major=False,
mode=None,
return_cell_output=False,
return_output_size=False,
**kwargs):
"""Encodes the inputs.
Args:
inputs: A 3D Tensor of shape `[batch_size, max_time, dim]`.
The first two dimensions
`batch_size` and `max_time` may be exchanged if
`time_major=True` is specified.
sequence_length (int list or 1D Tensor, optional): Sequence lengths
of the batch inputs. Used to copy-through state and zero-out
outputs when past a batch element's sequence length.
initial_state (optional): Initial state of the RNN.
time_major (bool): The shape format of the :attr:`inputs` and
:attr:`outputs` Tensors. If `True`, these tensors are of shape
`[max_time, batch_size, depth]`. If `False` (default),
these tensors are of shape `[batch_size, max_time, depth]`.
mode (optional): A tensor taking value in
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, including
`TRAIN`, `EVAL`, and `PREDICT`. Controls output layer dropout
if the output layer is specified with :attr:`hparams`.
If `None` (default), :func:`texar.context.global_mode()`
is used.
return_cell_output (bool): Whether to return the output of the RNN
cell. This is the results prior to the output layer.
**kwargs: Optional keyword arguments of
:tf_main:`tf.nn.dynamic_rnn <nn/dynamic_rnn>`,
such as `swap_memory`, `dtype`, `parallel_iterations`, etc.
Returns:
If :attr:`return_cell_output` is `False` (default), returns a
pair :attr:`(outputs, final_state)` where
- :attr:`outputs`: A tuple `(outputs_fw, outputs_bw)` containing \
the forward and the backward RNN outputs, each of which is of \
shape `[batch_size, max_time, output_dim]` (if \
:attr:`time_major` == `False`) or \
`[max_time, batch_size, output_dim]` (if \
:attr:`time_major` == `True`). \
If RNN cell output is a (nested) tuple of Tensors, then the \
`outputs_fw` and `outputs_bw` will be a (nested) tuple having \
the same structure as the cell output.
- :attr:`final_state`: A tuple `(final_state_fw, final_state_bw)` \
containing the final states of the forward and the backward \
RNNs, each of which is a \
Tensor of shape `[batch_size] + cell.state_size` or \
a (nested) tuple of Tensors (if `cell.state_size` is a (nested) \
tuple).
If :attr:`return_cell_output` is `True`, returns a triple
:attr:`(outputs, final_state, cell_outputs)` where
- :attr:`cell_outputs`: A tuple \
`(cell_outputs_fw, cell_outputs_bw)` containting the outputs \
by the forward and backward RNN cells prior to the \
output layers, having the same structure with :attr:`outputs` \
except for the `output_dim`.
"""
no_initial_state = initial_state_fw is None and initial_state_bw is None
if ('dtype' not in kwargs) and no_initial_state:
cell_outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self._cell_fw,
cell_bw=self._cell_bw,
inputs=inputs,
sequence_length=sequence_length,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
time_major=time_major,
dtype=tf.float32,
**kwargs)
else:
cell_outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self._cell_fw,
cell_bw=self._cell_bw,
inputs=inputs,
sequence_length=sequence_length,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
time_major=time_major,
**kwargs)
outputs_fw, output_size_fw = _apply_rnn_encoder_output_layer(
self._output_layer_fw, time_major, self._output_layer_hparams_fw,
mode, cell_outputs[0], self._cell_fw.output_size)
outputs_bw, output_size_bw = _apply_rnn_encoder_output_layer(
self._output_layer_bw, time_major, self._output_layer_hparams_bw,
mode, cell_outputs[1], self._cell_bw.output_size)
outputs = (outputs_fw, outputs_bw)
output_size = (output_size_fw, output_size_bw)
if not self._built:
self._add_internal_trainable_variables()
# Add trainable variables of cells and output layers
# which may be constructed externally.
self._add_trainable_variable(
layers.get_rnn_cell_trainable_variables(self._cell_fw))
self._add_trainable_variable(
layers.get_rnn_cell_trainable_variables(self._cell_bw))
if self._output_layer_fw and \
not isinstance(self._output_layer_fw, (list, tuple)):
self._add_trainable_variable(
self._output_layer_fw.trainable_variables)
if self._output_layer_bw and \
not isinstance(self._output_layer_bw, (list, tuple)):
self._add_trainable_variable(
self._output_layer_bw.trainable_variables)
self._built = True
returns = (outputs, states)
if return_cell_output:
returns += (cell_outputs, )
if return_output_size:
returns += (output_size, )
return returns
@staticmethod
def concat_outputs(outputs):
"""Concats the outputs of the bidirectional encoder into a single
tensor.
"""
return tf.concat(outputs, 2)
@property
def cell_fw(self):
"""The forward RNN cell.
"""
return self._cell_fw
@property
def cell_bw(self):
"""The backward RNN cell.
"""
return self._cell_bw
@property
def state_size_fw(self):
"""The state size of the forward encoder cell.
Same as :attr:`encoder.cell_fw.state_size`.
"""
return self.cell_fw.state_size
@property
def state_size_bw(self):
"""The state size of the backward encoder cell.
Same as :attr:`encoder.cell_bw.state_size`.
"""
return self.cell_bw.state_size
@property
def output_layer_fw(self):
"""The output layer of the forward RNN.
"""
return self._output_layer_fw
@property
def output_layer_bw(self):
"""The output layer of the backward RNN.
"""
return self._output_layer_bw
| [
"numpy.prod",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.shape",
"texar.modules.decoders.rnn_decoder_base.compute_output_shape",
"numpy.array",
"texar.core.layers.get_layer",
"texar.core.layers.get_rnn_cell",
"texar.modules.networks.conv_networks._to_list",
"tensorflow.contrib.framework.... | [((1394, 1458), 'texar.modules.networks.conv_networks._to_list', '_to_list', (['hparams.layer_size', '"""output_layer.layer_size"""', 'nlayers'], {}), "(hparams.layer_size, 'output_layer.layer_size', nlayers)\n", (1402, 1458), False, 'from texar.modules.networks.conv_networks import _to_list\n'), ((2753, 2772), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (2760, 2772), True, 'import numpy as np\n'), ((2816, 2850), 'tensorflow.reshape', 'tf.reshape', (['inputs_flat', '[-1, dim]'], {}), '(inputs_flat, [-1, dim])\n', (2826, 2850), True, 'import tensorflow as tf\n'), ((3098, 3119), 'numpy.array', 'np.array', (['output_size'], {}), '(output_size)\n', (3106, 3119), True, 'import numpy as np\n'), ((3288, 3325), 'tensorflow.reshape', 'tf.reshape', (['output_flat', 'output_shape'], {}), '(output_flat, output_shape)\n', (3298, 3325), True, 'import tensorflow as tf\n'), ((3829, 3930), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['inputs'], {'rate': 'hparams.dropout_rate', 'noise_shape': 'noise_shape', 'training': 'training'}), '(inputs, rate=hparams.dropout_rate, noise_shape=\n noise_shape, training=training)\n', (3846, 3930), True, 'import tensorflow as tf\n'), ((6061, 6184), 'functools.partial', 'functools.partial', (['_forward_output_layers'], {'output_layer': 'output_layer', 'time_major': 'time_major', 'hparams': 'hparams', 'mode': 'mode'}), '(_forward_output_layers, output_layer=output_layer,\n time_major=time_major, hparams=hparams, mode=mode)\n', (6078, 6184), False, 'import functools\n'), ((6246, 6272), 'tensorflow.contrib.framework.nest.flatten', 'nest.flatten', (['cell_outputs'], {}), '(cell_outputs)\n', (6258, 6272), False, 'from tensorflow.contrib.framework import nest\n'), ((6301, 6331), 'tensorflow.contrib.framework.nest.flatten', 'nest.flatten', (['cell_output_size'], {}), '(cell_output_size)\n', (6313, 6331), False, 'from tensorflow.contrib.framework import nest\n'), ((6503, 6552), 'tensorflow.contrib.framework.nest.pack_sequence_as', 'nest.pack_sequence_as', (['cell_outputs', 'outputs_flat'], {}), '(cell_outputs, outputs_flat)\n', (6524, 6552), False, 'from tensorflow.contrib.framework import nest\n'), ((6571, 6624), 'tensorflow.contrib.framework.nest.pack_sequence_as', 'nest.pack_sequence_as', (['cell_outputs', 'output_size_flat'], {}), '(cell_outputs, output_size_flat)\n', (6592, 6624), False, 'from tensorflow.contrib.framework import nest\n'), ((5092, 5114), 'texar.modules.networks.conv_networks._to_list', '_to_list', (['output_layer'], {}), '(output_layer)\n', (5100, 5114), False, 'from texar.modules.networks.conv_networks import _to_list\n'), ((5144, 5179), 'texar.modules.networks.conv_networks._to_list', '_to_list', (['hparams.dropout_layer_ids'], {}), '(hparams.dropout_layer_ids)\n', (5152, 5179), False, 'from texar.modules.networks.conv_networks import _to_list\n'), ((5776, 5853), 'texar.utils.shapes.mask_sequences', 'mask_sequences', (['output', 'sequence_length'], {'time_major': 'time_major', 'tensor_rank': '(3)'}), '(output, sequence_length, time_major=time_major, tensor_rank=3)\n', (5790, 5853), False, 'from texar.utils.shapes import mask_sequences\n'), ((7024, 7059), 'texar.modules.encoders.encoder_base.EncoderBase.__init__', 'EncoderBase.__init__', (['self', 'hparams'], {}), '(self, hparams)\n', (7044, 7059), False, 'from texar.modules.encoders.encoder_base import EncoderBase\n'), ((36591, 36612), 'tensorflow.concat', 'tf.concat', (['outputs', '(2)'], {}), '(outputs, 2)\n', (36600, 36612), True, 'import tensorflow as tf\n'), ((2213, 2252), 'texar.core.layers.get_layer', 'layers.get_layer', ([], {'hparams': 'layer_hparams'}), '(hparams=layer_hparams)\n', (2229, 2252), False, 'from texar.core import layers\n'), ((5242, 5261), 'texar.utils.mode.is_train_mode', 'is_train_mode', (['mode'], {}), '(mode)\n', (5255, 5261), False, 'from texar.utils.mode import is_train_mode\n'), ((9118, 9156), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.variable_scope'], {}), '(self.variable_scope)\n', (9135, 9156), True, 'import tensorflow as tf\n'), ((9398, 9436), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.variable_scope'], {}), '(self.variable_scope)\n', (9415, 9436), True, 'import tensorflow as tf\n'), ((17893, 18064), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'self._cell', 'inputs': 'inputs', 'sequence_length': 'sequence_length', 'initial_state': 'initial_state', 'time_major': 'time_major', 'dtype': 'tf.float32'}), '(cell=self._cell, inputs=inputs, sequence_length=\n sequence_length, initial_state=initial_state, time_major=time_major,\n dtype=tf.float32, **kwargs)\n', (17910, 18064), True, 'import tensorflow as tf\n'), ((18217, 18371), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'self._cell', 'inputs': 'inputs', 'sequence_length': 'sequence_length', 'initial_state': 'initial_state', 'time_major': 'time_major'}), '(cell=self._cell, inputs=inputs, sequence_length=\n sequence_length, initial_state=initial_state, time_major=time_major, **\n kwargs)\n', (18234, 18371), True, 'import tensorflow as tf\n'), ((22792, 22830), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.variable_scope'], {}), '(self.variable_scope)\n', (22809, 22830), True, 'import tensorflow as tf\n'), ((23475, 23513), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.variable_scope'], {}), '(self.variable_scope)\n', (23492, 23513), True, 'import tensorflow as tf\n'), ((33992, 34251), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'self._cell_fw', 'cell_bw': 'self._cell_bw', 'inputs': 'inputs', 'sequence_length': 'sequence_length', 'initial_state_fw': 'initial_state_fw', 'initial_state_bw': 'initial_state_bw', 'time_major': 'time_major', 'dtype': 'tf.float32'}), '(cell_fw=self._cell_fw, cell_bw=self.\n _cell_bw, inputs=inputs, sequence_length=sequence_length,\n initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,\n time_major=time_major, dtype=tf.float32, **kwargs)\n', (34023, 34251), True, 'import tensorflow as tf\n'), ((34433, 34674), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'self._cell_fw', 'cell_bw': 'self._cell_bw', 'inputs': 'inputs', 'sequence_length': 'sequence_length', 'initial_state_fw': 'initial_state_fw', 'initial_state_bw': 'initial_state_bw', 'time_major': 'time_major'}), '(cell_fw=self._cell_fw, cell_bw=self.\n _cell_bw, inputs=inputs, sequence_length=sequence_length,\n initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,\n time_major=time_major, **kwargs)\n', (34464, 34674), True, 'import tensorflow as tf\n'), ((3015, 3065), 'texar.modules.decoders.rnn_decoder_base.compute_output_shape', 'compute_output_shape', (['output_layer.units', '[1, dim]'], {}), '(output_layer.units, [1, dim])\n', (3035, 3065), False, 'from texar.modules.decoders.rnn_decoder_base import compute_output_shape\n'), ((3231, 3247), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (3239, 3247), True, 'import tensorflow as tf\n'), ((9272, 9334), 'texar.core.layers.get_rnn_cell', 'layers.get_rnn_cell', (['self._hparams.rnn_cell', 'cell_dropout_mode'], {}), '(self._hparams.rnn_cell, cell_dropout_mode)\n', (9291, 9334), False, 'from texar.core import layers\n'), ((13671, 13704), 'texar.core.layers.default_rnn_cell_hparams', 'layers.default_rnn_cell_hparams', ([], {}), '()\n', (13702, 13704), False, 'from texar.core import layers\n'), ((18922, 18973), 'texar.core.layers.get_rnn_cell_trainable_variables', 'layers.get_rnn_cell_trainable_variables', (['self._cell'], {}), '(self._cell)\n', (18961, 18973), False, 'from texar.core import layers\n'), ((22958, 23023), 'texar.core.layers.get_rnn_cell', 'layers.get_rnn_cell', (['self._hparams.rnn_cell_fw', 'cell_dropout_mode'], {}), '(self._hparams.rnn_cell_fw, cell_dropout_mode)\n', (22977, 23023), False, 'from texar.core import layers\n'), ((29960, 29993), 'texar.core.layers.default_rnn_cell_hparams', 'layers.default_rnn_cell_hparams', ([], {}), '()\n', (29991, 29993), False, 'from texar.core import layers\n'), ((30022, 30055), 'texar.core.layers.default_rnn_cell_hparams', 'layers.default_rnn_cell_hparams', ([], {}), '()\n', (30053, 30055), False, 'from texar.core import layers\n'), ((35568, 35622), 'texar.core.layers.get_rnn_cell_trainable_variables', 'layers.get_rnn_cell_trainable_variables', (['self._cell_fw'], {}), '(self._cell_fw)\n', (35607, 35622), False, 'from texar.core import layers\n'), ((35682, 35736), 'texar.core.layers.get_rnn_cell_trainable_variables', 'layers.get_rnn_cell_trainable_variables', (['self._cell_bw'], {}), '(self._cell_bw)\n', (35721, 35736), False, 'from texar.core import layers\n'), ((23208, 23273), 'texar.core.layers.get_rnn_cell', 'layers.get_rnn_cell', (['self._hparams.rnn_cell_fw', 'cell_dropout_mode'], {}), '(self._hparams.rnn_cell_fw, cell_dropout_mode)\n', (23227, 23273), False, 'from texar.core import layers\n'), ((23345, 23410), 'texar.core.layers.get_rnn_cell', 'layers.get_rnn_cell', (['self._hparams.rnn_cell_bw', 'cell_dropout_mode'], {}), '(self._hparams.rnn_cell_bw, cell_dropout_mode)\n', (23364, 23410), False, 'from texar.core import layers\n')] |
# -*- coding: utf-8 -*-
"""Copyright 2014 <NAME>.
filterpy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from numpy import array
from filterpy.hinfinity import HInfinityFilter
def test_Hinfinity():
dt = 0.1
f = HInfinityFilter(2, 1, 0, gamma=.4)
f.F = array([[1., dt],
[0., 1.]])
f.H = array([[0., 1.]])
f.x = array([[0., 0.]]).T
#f.G = array([[dt**2 / 2, dt]]).T
f.P = 0.01
f.W = array([[0.0003, 0.005],
[0.0050, 0.100]])/ 1000
f.V = 0.01
f.Q = 0.01
xs = []
vs = []
for i in range(1,40):
f.update (5)
print(f.x.T)
xs.append(f.x[0,0])
vs.append(f.x[1,0])
f.predict()
| [
"numpy.array",
"filterpy.hinfinity.HInfinityFilter"
] | [((555, 590), 'filterpy.hinfinity.HInfinityFilter', 'HInfinityFilter', (['(2)', '(1)', '(0)'], {'gamma': '(0.4)'}), '(2, 1, 0, gamma=0.4)\n', (570, 590), False, 'from filterpy.hinfinity import HInfinityFilter\n'), ((601, 631), 'numpy.array', 'array', (['[[1.0, dt], [0.0, 1.0]]'], {}), '([[1.0, dt], [0.0, 1.0]])\n', (606, 631), False, 'from numpy import array\n'), ((657, 676), 'numpy.array', 'array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (662, 676), False, 'from numpy import array\n'), ((685, 704), 'numpy.array', 'array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (690, 704), False, 'from numpy import array\n'), ((769, 807), 'numpy.array', 'array', (['[[0.0003, 0.005], [0.005, 0.1]]'], {}), '([[0.0003, 0.005], [0.005, 0.1]])\n', (774, 807), False, 'from numpy import array\n')] |
# Author: <NAME>
# Public Domain
# Ce script n'est PAS un modèle de bonnes pratiques pythoniennes.
# Mieux vaut se focaliser sur les autres scripts qui sont bien mieux écrits.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
tfin = 15e-3 # instant final de la simulation
dt = 0.01e-3 # pas temporel
Vsat = 15 # potentiel de saturation de l'ALI
T0 = tfin / 10
f0 = 1 / T0
omega0 = 2 * np.pi * f0
H0 = 1 / 3
Q = 1 / 3
G = 1.09 / H0
# Conditions Initiales
t = [0.0]
e = [0.01] # Entrée de l'étage d'amplification / sortie du filtre ; 0.01 simule le bruit
s = [0.0] # Sortie de l'étage d'amplification / entrée du filtre
dsdt = [0.0] # Dérivée de s
dedt = [0.01] # Dérivée de e ; 0.01 simule le bruit en entree
mode = [0] # 0 = linéaire, +1 = saturation positive, -1 = saturation négative
CIs = [[t[0], e[0], dedt[0], s[0], dsdt[0]]]
types_ED = [mode[0]]
# Resolution de l'équation différentielle par la méthode d'Euler
while t[-1] < tfin:
dedt.append((omega0 * dsdt[-1] - omega0 / Q * dedt[-1] - e[-1] * omega0**2) * dt + dedt[-1])
e.append(dedt[-1] * dt + e[-1])
if G * e[-1] < -Vsat:
s.append(-Vsat)
mode.append(-1)
elif G * e[-1] > Vsat:
s.append(Vsat)
mode.append(1)
else:
s.append(G * e[-1])
mode.append(0)
dsdt.append((s[-1] - s[-2]) / dt)
if mode[-1] != mode[-2]:
CIs.append([t[-1], e[-1], dedt[-1], s[-1], dsdt[-1]])
types_ED.append(mode[-1])
t.append(t[-1] + dt)
e = np.asarray(e)
dedt = np.asarray(dedt)
s = np.asarray(s)
t = np.asarray(t)
mode = np.asarray(mode)
e_ED = []
dedt_ED = []
t_ED = []
def ED_mode0(X, t, Q, G, H0):
e, dedt = X
return [dedt,
- (omega0 * (1 - G * H0) * dedt / float(Q)) - (omega0**2 * e)]
def ED_mode_not0(X, t, Q, G, H0):
e, dedt = X
return [dedt,
- (omega0 * dedt / float(Q)) - (omega0**2 * e)]
for i, CI in enumerate(CIs):
t_ED.append(np.linspace(CI[0], CI[0] + 2 * T0, 100))
if types_ED[i] == 0:
sol = odeint(ED_mode0, [CI[1], CI[2]], t_ED[-1], args = (Q, G, H0))
else:
sol = odeint(ED_mode_not0, [CI[1], CI[2]], t_ED[-1], args = (Q, G, H0))
e_ED.append(sol[:,0])
dedt_ED.append(sol[:,1])
plt.subplot(3,1,1) # Sous-figure 1: évolution temporelle de e
plt.plot(t, e)
plt.plot(t[mode == 1], e[mode == 1], 'r.')
plt.plot(t[mode == -1], e[mode == -1], 'g.')
plt.xlim([0, np.max(t_ED)])
plt.ylim([-10, 10])
for i, CI in enumerate(CIs):
if types_ED[i] == 0:
style = 'C0--'
elif types_ED[i] == 1:
style = 'r--'
else:
style = 'g--'
plt.plot(t_ED[i], e_ED[i], style, linewidth = .5)
plt.yticks([-Vsat / G, Vsat / G])
plt.grid(which = 'both')
plt.title('f0 = {0:.3f} Hz, Q = {1:.3f}, H0 = {2:.2f}, G = {3:.3f}, Vsat = {4:.2f} V, Vsat / G = {5:.3f} V'.format(f0, Q, H0, G, Vsat, Vsat / G))
plt.xlabel("t (s)")
plt.ylabel("e (V)")
plt.subplot(3,1,2) # Sous-figure 2 : évolution temporelle de s
plt.plot(t, s)
plt.plot(t[mode == 1], s[mode == 1], 'r.')
plt.plot(t[mode == -1], s[mode == -1], 'g.')
plt.xlim([0, np.max(t_ED)])
plt.ylim([-20, 20])
plt.yticks([-Vsat, Vsat])
plt.grid(which = 'both')
plt.xlabel("t (s)")
plt.ylabel("s (V)")
plt.subplot(3,1,3) # Sous-figure 3 : Portrait de phase
plt.plot(e, dedt)
plt.plot(e[mode == 1], dedt[mode ==1], 'r.')
plt.plot(e[mode == -1], dedt[mode ==-1], 'g.')
plt.xticks([-Vsat / G, 0, Vsat / G])
plt.grid(which = 'both')
plt.xlabel("e (V)")
plt.ylabel("de/dt (V/s)")
plt.title("Plan de phase")
plt.tight_layout() # Pour ajuster les espaces autour des sous-figures
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"scipy.integrate.odeint",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layo... | [((1614, 1627), 'numpy.asarray', 'np.asarray', (['e'], {}), '(e)\n', (1624, 1627), True, 'import numpy as np\n'), ((1635, 1651), 'numpy.asarray', 'np.asarray', (['dedt'], {}), '(dedt)\n', (1645, 1651), True, 'import numpy as np\n'), ((1656, 1669), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (1666, 1669), True, 'import numpy as np\n'), ((1674, 1687), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (1684, 1687), True, 'import numpy as np\n'), ((1695, 1711), 'numpy.asarray', 'np.asarray', (['mode'], {}), '(mode)\n', (1705, 1711), True, 'import numpy as np\n'), ((2363, 2383), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (2374, 2383), True, 'import matplotlib.pyplot as plt\n'), ((2427, 2441), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'e'], {}), '(t, e)\n', (2435, 2441), True, 'import matplotlib.pyplot as plt\n'), ((2442, 2484), 'matplotlib.pyplot.plot', 'plt.plot', (['t[mode == 1]', 'e[mode == 1]', '"""r."""'], {}), "(t[mode == 1], e[mode == 1], 'r.')\n", (2450, 2484), True, 'import matplotlib.pyplot as plt\n'), ((2485, 2529), 'matplotlib.pyplot.plot', 'plt.plot', (['t[mode == -1]', 'e[mode == -1]', '"""g."""'], {}), "(t[mode == -1], e[mode == -1], 'g.')\n", (2493, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2578), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-10, 10]'], {}), '([-10, 10])\n', (2567, 2578), True, 'import matplotlib.pyplot as plt\n'), ((2802, 2835), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-Vsat / G, Vsat / G]'], {}), '([-Vsat / G, Vsat / G])\n', (2812, 2835), True, 'import matplotlib.pyplot as plt\n'), ((2836, 2858), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (2844, 2858), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3028), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (s)"""'], {}), "('t (s)')\n", (3019, 3028), True, 'import matplotlib.pyplot as plt\n'), ((3029, 3048), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""e (V)"""'], {}), "('e (V)')\n", (3039, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3050, 3070), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (3061, 3070), True, 'import matplotlib.pyplot as plt\n'), ((3115, 3129), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 's'], {}), '(t, s)\n', (3123, 3129), True, 'import matplotlib.pyplot as plt\n'), ((3130, 3172), 'matplotlib.pyplot.plot', 'plt.plot', (['t[mode == 1]', 's[mode == 1]', '"""r."""'], {}), "(t[mode == 1], s[mode == 1], 'r.')\n", (3138, 3172), True, 'import matplotlib.pyplot as plt\n'), ((3173, 3217), 'matplotlib.pyplot.plot', 'plt.plot', (['t[mode == -1]', 's[mode == -1]', '"""g."""'], {}), "(t[mode == -1], s[mode == -1], 'g.')\n", (3181, 3217), True, 'import matplotlib.pyplot as plt\n'), ((3246, 3265), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-20, 20]'], {}), '([-20, 20])\n', (3254, 3265), True, 'import matplotlib.pyplot as plt\n'), ((3267, 3292), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-Vsat, Vsat]'], {}), '([-Vsat, Vsat])\n', (3277, 3292), True, 'import matplotlib.pyplot as plt\n'), ((3293, 3315), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (3301, 3315), True, 'import matplotlib.pyplot as plt\n'), ((3319, 3338), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (s)"""'], {}), "('t (s)')\n", (3329, 3338), True, 'import matplotlib.pyplot as plt\n'), ((3339, 3358), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""s (V)"""'], {}), "('s (V)')\n", (3349, 3358), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3380), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (3371, 3380), True, 'import matplotlib.pyplot as plt\n'), ((3417, 3434), 'matplotlib.pyplot.plot', 'plt.plot', (['e', 'dedt'], {}), '(e, dedt)\n', (3425, 3434), True, 'import matplotlib.pyplot as plt\n'), ((3435, 3480), 'matplotlib.pyplot.plot', 'plt.plot', (['e[mode == 1]', 'dedt[mode == 1]', '"""r."""'], {}), "(e[mode == 1], dedt[mode == 1], 'r.')\n", (3443, 3480), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3527), 'matplotlib.pyplot.plot', 'plt.plot', (['e[mode == -1]', 'dedt[mode == -1]', '"""g."""'], {}), "(e[mode == -1], dedt[mode == -1], 'g.')\n", (3488, 3527), True, 'import matplotlib.pyplot as plt\n'), ((3528, 3564), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-Vsat / G, 0, Vsat / G]'], {}), '([-Vsat / G, 0, Vsat / G])\n', (3538, 3564), True, 'import matplotlib.pyplot as plt\n'), ((3565, 3587), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (3573, 3587), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""e (V)"""'], {}), "('e (V)')\n", (3601, 3610), True, 'import matplotlib.pyplot as plt\n'), ((3611, 3636), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""de/dt (V/s)"""'], {}), "('de/dt (V/s)')\n", (3621, 3636), True, 'import matplotlib.pyplot as plt\n'), ((3637, 3663), 'matplotlib.pyplot.title', 'plt.title', (['"""Plan de phase"""'], {}), "('Plan de phase')\n", (3646, 3663), True, 'import matplotlib.pyplot as plt\n'), ((3665, 3683), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3681, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3737, 3747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3745, 3747), True, 'import matplotlib.pyplot as plt\n'), ((2751, 2799), 'matplotlib.pyplot.plot', 'plt.plot', (['t_ED[i]', 'e_ED[i]', 'style'], {'linewidth': '(0.5)'}), '(t_ED[i], e_ED[i], style, linewidth=0.5)\n', (2759, 2799), True, 'import matplotlib.pyplot as plt\n'), ((2065, 2104), 'numpy.linspace', 'np.linspace', (['CI[0]', '(CI[0] + 2 * T0)', '(100)'], {}), '(CI[0], CI[0] + 2 * T0, 100)\n', (2076, 2104), True, 'import numpy as np\n'), ((2146, 2205), 'scipy.integrate.odeint', 'odeint', (['ED_mode0', '[CI[1], CI[2]]', 't_ED[-1]'], {'args': '(Q, G, H0)'}), '(ED_mode0, [CI[1], CI[2]], t_ED[-1], args=(Q, G, H0))\n', (2152, 2205), False, 'from scipy.integrate import odeint\n'), ((2232, 2295), 'scipy.integrate.odeint', 'odeint', (['ED_mode_not0', '[CI[1], CI[2]]', 't_ED[-1]'], {'args': '(Q, G, H0)'}), '(ED_mode_not0, [CI[1], CI[2]], t_ED[-1], args=(Q, G, H0))\n', (2238, 2295), False, 'from scipy.integrate import odeint\n'), ((2544, 2556), 'numpy.max', 'np.max', (['t_ED'], {}), '(t_ED)\n', (2550, 2556), True, 'import numpy as np\n'), ((3231, 3243), 'numpy.max', 'np.max', (['t_ED'], {}), '(t_ED)\n', (3237, 3243), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from multiprocessing import Pool, Process
#import cudf
from scipy.spatial.distance import cosine
print(__name__)
def write_cos_dist(t):
print(t[0].shape)
print(t[1].shape)
res = t[0].apply(cosine, axis=1, args=(t[1],))
return res
if __name__ == 'predict':
from collections import Counter
import math
import re
import model as model_py
import threading
import multiprocessing
user = {}
def initializer():
multiprocessing.current_process().name = 'helper'
threading.current_thread().name = 'helper'
class MusicRecommender():
"""
Takes as input some Yandex Music track ids and gives
as output id of tracks you should probably listen to!
Requirements:
numpy
pandas
regex
scipy.spatial.distance.cosine
collections.Counter
math
"""
def __init__(self, df_songs, df_song_names, df_users, df_users_preferences):
self.df_songs = df_songs#pd.read_csv('songs_dataset_transformed.csv', ',', index_col='track_id', encoding='cp1251')
self.df_song_names = df_song_names#pd.read_csv('songs_dataset.csv',';', encoding='cp1251', index_col='track_id').drop_duplicates()
self.df_users = df_users#pd.read_csv('users_dataset.csv', ';', encoding='cp1251')
self.df_users_preferences = df_users_preferences#pd.read_csv('users_preferences_dataset.csv', index_col='user_id')
self.users_track_id_valid = [id_ for id_ in self.df_users['track_id'] if id_ in self.df_song_names.index]
self.songs_model_weights_coldstart = self.coldstart_model()
self.neighbors_users_count = 25
return None
def take_average_footprint_by_songs_ids(self, ids, consider_relevance=False):
ids = [int(id) for id in ids if int(id) in df_songs.index]
print(ids)
how_many_songs = len(ids)
if how_many_songs > 0:
feature_list = self.df_songs.columns
user_cumulative_info = pd.Series({feature:0 for feature in feature_list})
if consider_relevance:
ordinal_coefficients = {i:self.song_time_relevance_sigmoid(i) for i in range(1, how_many_songs+1)}
norma_coef = sum(ordinal_coefficients.values())
for key,value in ordinal_coefficients.items():
ordinal_coefficients[key] = value/norma_coef
curr_order = 1
for track_id in ids:
try:
if consider_relevance == False:
print(self.df_songs.loc[track_id])
user_cumulative_info += self.df_songs.loc[track_id]
else:
print(self.df_songs.loc[track_id])
user_cumulative_info += self.df_songs.loc[track_id]*ordinal_coefficients[curr_order]
except Exception as e:
print(e)
how_many_songs -= 1
curr_order += 1
if not consider_relevance:
user_cumulative_info /= how_many_songs
else:
user_cumulative_info *= len(ids)/how_many_songs
genre_filter = re.compile('genre_[a-z]*')
genre_cols = [col for col in feature_list if genre_filter.match(col)]
user_cumulative_info[genre_cols] /= max(user_cumulative_info[genre_cols])
user_cumulative_info[genre_cols] *=2
return user_cumulative_info
else:
return self.take_average_footprint_by_songs_ids(df_users_preferences.dropna().index, False)
def coldstart_coeff_sigmoid(self, n_songs):
if n_songs < 15:
hard_factor = -1/2
offset = 6.5
return 1 - (1/(1+math.exp(hard_factor*(n_songs-offset))) - 1/(1+math.exp(hard_factor*(-offset))))
else:
return 0
def song_time_relevance_sigmoid(self, x):
if x > 15:
hard_factor = 1/10
offset = 40
return 1/(1+math.exp(hard_factor*(x-offset))) + 0.075
else:
return 1.
def str_int_list_to_list(self, str_):
no_brackets = str_.replace(']', '').replace('[', '').replace(' ', '')
if len(no_brackets) > 0:
return([int(t) for t in no_brackets.split(',')])
else:
return []
def coldstart_model(self):
most_popular_artists = self.df_song_names.loc[self.users_track_id_valid]['artist'].value_counts()[2:30]
most_popular_artists_songs = self.df_song_names[self.df_song_names['artist'].isin(dict(most_popular_artists).keys())]
bests_from_popular_artists = []
for index,row in most_popular_artists_songs.iterrows():
bests_from_popular_artists.extend(self.str_int_list_to_list(row['bests_from_album']))
bests_from_popular_artists = [track_id for track_id in list(set(bests_from_popular_artists))
if track_id in self.df_song_names.index]
bests_from_popular_artists_df_sorted = self.df_song_names.loc[bests_from_popular_artists]
bests_from_popular_artists_df_sorted = bests_from_popular_artists_df_sorted[
(bests_from_popular_artists_df_sorted['likes_count'] != 'None') &
(bests_from_popular_artists_df_sorted['duration_ms'] > 120000)]
bests_from_popular_artists_df_sorted = bests_from_popular_artists_df_sorted.drop_duplicates()
bests_from_popular_artists_df_sorted['likes_count'] = bests_from_popular_artists_df_sorted['likes_count'].astype(int)
bests_all_time = open('../data/bests.csv', 'r')
bests_all_time_l = [int(s) for s in bests_all_time.readline().replace(' ', '').split(',')]
bests_all_time.close()
bests_all_time_df = self.df_song_names.loc[bests_all_time_l]
#print(bests_all_time_df)
songs_to_boost = pd.concat([bests_from_popular_artists_df_sorted.sort_values('likes_count', ascending=False)[:350],
bests_all_time_df]).sample(25, random_state=100)
#print(songs_to_boost)
model_track_weights = {track_id:0 for track_id in self.df_song_names.index}
for best_track_id in songs_to_boost.index:
model_track_weights[best_track_id] = 0.85
return model_track_weights
def add_neighbor_tracks(self, neighbor_id):
track_indecies = [id_
for id_ in self.df_users[self.df_users['owner_id'] == neighbor_id]['track_id']
if id_ in self.df_songs.index]
self.songs_neighbors.extend(track_indecies)
def user_based_distanse_decreasing_sigmoid(self, x):
"""
Takes percent of users that have certain song and returns
coefficient for cosine distance to decrease.
"""
hard_factor = -7
offset = 0.4
return 1/(1+math.exp(hard_factor*(x-offset))) - 1/(1+math.exp(hard_factor*(-offset)))
def user_based_model(self, list_user_tracks_id):
global user
user = self.take_average_footprint_by_songs_ids(list_user_tracks_id, consider_relevance=True)
print(self.df_users_preferences.shape, len(user))
cosine_dists = self.df_users_preferences.apply(cosine, axis=1, args=(user,))
#for index,row in self.df_users_preferences.iterrows():
# cosine_dists[index] = cosine(user, row)
cosine_dists_sorted = sorted(list(cosine_dists.items()), key=lambda x:x[1])
users_neighbors_to_consider = [(user_dist[0], 1-user_dist[1])
for user_dist in cosine_dists_sorted[:self.neighbors_users_count]]
users_neighbors = [t[0] for t in users_neighbors_to_consider]
self.songs_neighbors = []
add_neighbor_tracks_v = np.vectorize(self.add_neighbor_tracks)
add_neighbor_tracks_v(users_neighbors)
song_id_vs_neighbor_repeats = sorted(list(dict(Counter(self.songs_neighbors)).items()), key=lambda x:1/x[1])
relevant_artists = self.df_song_names.loc[self.songs_neighbors]['artist'].value_counts(normalize=True).head(50)
relevant_artists_normalized = relevant_artists/(relevant_artists[0]/self.artists_importance)
relevant_genres = self.df_song_names.loc[self.songs_neighbors]['genre'].value_counts(normalize=True).head(5)
relevant_genres = relevant_genres/(relevant_genres[0]/self.genres_importance)
relevant_genres.append(pd.Series({genre_chosen:self.genres_importance for genre_chosen in self.genres_chosen}))
all_sim_songs = []
for track_id_lst in self.df_song_names.loc[[track_id for track_id in list_user_tracks_id
if track_id in self.df_songs.index]]['bests_from_album']:
no_brackets = track_id_lst.replace(']', '').replace('[', '').replace(' ', '')
if len(no_brackets) > 0:
all_sim_songs.extend([int(t) for t in no_brackets.split(',')])
all_sim_songs = set(all_sim_songs)
self.songs_model_weights_user_based = {sim_song:self.sim_songs_importance for sim_song in all_sim_songs}
self.genres_model_weights = relevant_genres
self.artists_model_weights = relevant_artists_normalized
def artists_vs_weight_into_songs_vs_weights(self, dict_artists_vs_weights):
result = {}
for artist, weight in dict_artists_vs_weights.items():
for track_id in self.df_song_names[self.df_song_names['artist'] == artist].index:
result[track_id] = weight
return result
def genres_vs_weight_into_songs_vs_weights(self, dict_genres_vs_weights):
result = {}
for genre, weight in dict_genres_vs_weights.items():
for track_id in self.df_song_names[self.df_song_names['genre'] == genre].index:
result[track_id] = weight
return result
#def compute_
def apply_song_weights(self, dict_song_ids_vs_weights, coeff):
#song_ids_vs_weights = pd.Series(dict_song_ids_vs_weights)
#result_weights.apply()
for track_id, weight in dict_song_ids_vs_weights.items():
self.result_weights[track_id] = 1. - (1. - self.result_weights[track_id])*(1.-1.*weight*coeff)
pass
def fit(self, genres_chosen, artists_chosen, tracks_chosen,
learning_rate=0.05,
artists_importance=0.15,
genres_importance=0.25,
sim_songs_importance=0.10,
sim_artists_importance=0.15):
self.artists_importance = artists_importance
self.genres_importance = genres_importance
self.sim_songs_importance = sim_songs_importance
self.sim_artists_importance = sim_artists_importance
self.tracks_chosen = tracks_chosen
self.genres_chosen = tracks_chosen
if len(tracks_chosen) > 0:
print(tracks_chosen)
self.user_based_model(tracks_chosen)
coldstart_coeff = self.coldstart_coeff_sigmoid(len(tracks_chosen))
self.result_weights = {track_id:0 for track_id in self.df_song_names.index}
all_sim_artists = []
for artist_name in artists_chosen:
similar_artists = [x[0] for x in sorted(
list(
dict(
Counter(
df_users[
df_users['owner_id'].isin(df_users[df_users['artist_name'] == artist_name]['owner_id'].unique())]
['artist_name'])).items()), key=lambda x: 1/x[1])]
try:
similar_artists.remove(artist_name)
except Exception:
pass
similar_artists = similar_artists[:5]
all_sim_artists.extend(similar_artists)
#print(artist_name)
#all_sim_artists.extend(artists_chosen)
self.artists_model_weights_user_based = {artist_name:self.sim_artists_importance for artist_name in all_sim_artists}
#print(self.df_song_names.loc[self.songs_model_weights_user_based.keys()])
#print(self.genres_model_weights)
#print(self.artists_model_weights)
#print('begin')
self.apply_song_weights(self.songs_model_weights_coldstart, coldstart_coeff)
self.apply_song_weights(self.songs_model_weights_user_based, 1. - coldstart_coeff)
self.apply_song_weights(self.genres_vs_weight_into_songs_vs_weights(self.genres_model_weights),
1. - coldstart_coeff)
self.apply_song_weights(self.artists_vs_weight_into_songs_vs_weights(self.artists_model_weights),
1. - coldstart_coeff)
#print(self.artists_model_weights_user_based)
self.apply_song_weights(self.artists_vs_weight_into_songs_vs_weights(self.artists_model_weights_user_based),
1. - coldstart_coeff)
pass
def update_dists(self, track_id_vs_coeff):
try:
self.all_cos_distances[track_id_vs_coeff[0]] = self.all_cos_distances[track_id_vs_coeff[0]]*(1.-track_id_vs_coeff[1])
except Exception:
pass
def predict(self, predict_count=20):
print('cos_dist_start')
global user
#num_workers = 8
#pool = Pool(num_workers, initargs={'name': 'helper'})
#len_df = recommender.df_songs.shape[0]
#self.all_cos_distances = pd.concat(pool.map(write_cos_dist, [(recommender.df_songs[int(len_df*i/num_workers):int(len_df*(i+1)/num_workers)], user) for i in range(num_workers)]))
self.all_cos_distances = write_cos_dist((self.df_songs, user))
#pool.close()
#pool.join()
print('cos_dist_end')
#print('a')
update_dists_v = np.vectorize(self.update_dists)
update_dists_v(self.result_weights.items())
#for track_id, coeff in self.result_weights.items():
# try:
# self.all_cos_distances[track_id] = self.all_cos_distances[track_id]*(1.-coeff)
# except Exception:
# pass
#print('b')
track_ids_sorted = [t[0] for t in sorted(list(self.all_cos_distances.items()), key=lambda x: x[1])]
for track_id_already_exist in self.tracks_chosen:
try:
track_ids_sorted.remove(track_id_already_exist)
except Exception:
pass
return self.df_song_names.loc[track_ids_sorted][:predict_count*5].sample(predict_count)
df_songs = model_py.df_songs #pd.read_csv('songs_dataset_transformed.csv', ',', index_col='track_id', encoding='cp1251')
df_song_names = model_py.df_song_names#pd.read_csv('songs_dataset.csv',';', encoding='cp1251', index_col='track_id').drop_duplicates()
df_users = model_py.df_users#pd.read_csv('users_dataset.csv', ';', encoding='cp1251')
df_users_preferences = model_py.df_users_preferences#pd.read_csv('users_preferences_dataset.csv', index_col='user_id')
recommender = MusicRecommender(df_songs, df_song_names, df_users, df_users_preferences)
def get_recommends(artists_like, genres_like, tracks_like):
print('fit')
recommender.fit(genres_like, artists_like, tracks_like)
print('predict')
pred = recommender.predict()
print('predicted')
pred_json = {'tracks': [{'track_id': index,
'album_id': row['album_id'],
'artist_id': row['artist_id'],
'artist': row['artist'],
'title': row['title'],
'album': row['album'],
'song_version': row['song_version'],
'duration': row['duration_ms']
} for index, row in pred[['album_id', 'artist_id', 'artist', 'title', 'album', 'song_version', 'duration_ms']].iterrows()]}
return pred_json
| [
"pandas.Series",
"threading.current_thread",
"re.compile",
"collections.Counter",
"math.exp",
"numpy.vectorize",
"multiprocessing.current_process"
] | [((506, 539), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (537, 539), False, 'import multiprocessing\n'), ((563, 589), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (587, 589), False, 'import threading\n'), ((8423, 8461), 'numpy.vectorize', 'np.vectorize', (['self.add_neighbor_tracks'], {}), '(self.add_neighbor_tracks)\n', (8435, 8461), True, 'import numpy as np\n'), ((14956, 14987), 'numpy.vectorize', 'np.vectorize', (['self.update_dists'], {}), '(self.update_dists)\n', (14968, 14987), True, 'import numpy as np\n'), ((2156, 2209), 'pandas.Series', 'pd.Series', (['{feature: (0) for feature in feature_list}'], {}), '({feature: (0) for feature in feature_list})\n', (2165, 2209), True, 'import pandas as pd\n'), ((3451, 3477), 're.compile', 're.compile', (['"""genre_[a-z]*"""'], {}), "('genre_[a-z]*')\n", (3461, 3477), False, 'import re\n'), ((9112, 9205), 'pandas.Series', 'pd.Series', (['{genre_chosen: self.genres_importance for genre_chosen in self.genres_chosen}'], {}), '({genre_chosen: self.genres_importance for genre_chosen in self.\n genres_chosen})\n', (9121, 9205), True, 'import pandas as pd\n'), ((7445, 7481), 'math.exp', 'math.exp', (['(hard_factor * (x - offset))'], {}), '(hard_factor * (x - offset))\n', (7453, 7481), False, 'import math\n'), ((7486, 7517), 'math.exp', 'math.exp', (['(hard_factor * -offset)'], {}), '(hard_factor * -offset)\n', (7494, 7517), False, 'import math\n'), ((4362, 4398), 'math.exp', 'math.exp', (['(hard_factor * (x - offset))'], {}), '(hard_factor * (x - offset))\n', (4370, 4398), False, 'import math\n'), ((4065, 4107), 'math.exp', 'math.exp', (['(hard_factor * (n_songs - offset))'], {}), '(hard_factor * (n_songs - offset))\n', (4073, 4107), False, 'import math\n'), ((4112, 4143), 'math.exp', 'math.exp', (['(hard_factor * -offset)'], {}), '(hard_factor * -offset)\n', (4120, 4143), False, 'import math\n'), ((8572, 8601), 'collections.Counter', 'Counter', (['self.songs_neighbors'], {}), '(self.songs_neighbors)\n', (8579, 8601), False, 'from collections import Counter\n')] |
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
import keras
from hyperas.distributions import uniform
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
np.random.seed(123)
from keras.models import load_model
from keras.models import Sequential
from keras.optimizers import SGD;
from keras.layers import Convolution1D, Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.utils import np_utils
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import tensorflow as tf
import pandas as pd
import os
import sys
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# Custom activation function
from keras.utils.generic_utils import get_custom_objects
from keras.layers import Activation
from keras import backend as K
class Logus(Activation):
def __init__(self, activation, **kwargs):
super(Logus, self).__init__(activation, **kwargs)
self.__name__ = 'Logus'
def logus(x):
return (K.log(x*x + 0.1))
get_custom_objects().update({'Logus': Logus(logus)})
#np.random.seed(123)
TRAIN = './data/train.tsv'
TEST = './data/test.tsv'
def read_tsv(filename):
df = pd.read_csv(filename, header=None, delimiter='\t', names=['id','name','label','spectrum'])
df = df.sample(frac=1,random_state=12345)
X = []
Y = []
for index, row in df.iterrows():
Y.append( row['label'] == 'cancer' )
X.append( np.array( row['spectrum'].split(',') ).astype(np.float) )
X = np.array(X)
Y = np_utils.to_categorical(np.array(Y), 2).astype(np.int) ## BINARY!
return X,Y
X_train, Y_train = read_tsv(TRAIN)
X_test, Y_test = read_tsv(TEST)
print(X_train.shape)
print(Y_train.shape)
print(X_test.shape)
print(Y_test.shape)
# define a dense input model
model = Sequential()
model.add(Dense(32, input_dim = X_train.shape[1]))
model.add(Activation('sigmoid'))
model.add(Dense(32))
model.add(Activation('tanh'))
model.add(Dropout(0.87))
model.add(Dense(64))
model.add(Activation('tanh'))
model.add(Dropout(0.4))
model.add(Dense(2))
model.add(Activation('softmax'))
# simple early stopping
optimizer = keras.optimizers.Adam(lr=1e-5)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.summary()
np.random.seed(123)
with tf.device('/cpu:0'):
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5000)
mc = ModelCheckpoint('model-valacc-{val_acc:03f}.h5', verbose=0, monitor='val_loss',save_best_only=True, mode='auto')
# fit model
history = model.fit(
X_train,
Y_train,
validation_data=(X_test, Y_test),
nb_epoch=15000,
batch_size=32,
verbose=1,
callbacks=[es, mc]
)
np.save('./history.npy',history.history)
# history=np.load('my_history.npy',allow_pickle='TRUE').item()
if False:
plt.grid()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# load the saved model
#evaluate the model
#_, train_acc = saved_model.evaluate(X_train, Y_train, verbose=0)
#_, test_acc = saved_model.evaluate(X_test, Y_test, verbose=0)
#print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
sys.exit()
| [
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"keras.layers.Activation",
"sys.exit",
"keras.layers.Dense",
"numpy.save",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"keras.callbacks.EarlyStopping",
"keras.optimizers.Ada... | [((20, 39), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (34, 39), True, 'import numpy as np\n'), ((223, 242), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (237, 242), True, 'import numpy as np\n'), ((1905, 1917), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1915, 1917), False, 'from keras.models import Sequential\n'), ((2247, 2278), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (2268, 2278), False, 'import keras\n'), ((2414, 2433), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (2428, 2433), True, 'import numpy as np\n'), ((3577, 3587), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3585, 3587), False, 'import sys\n'), ((1092, 1110), 'keras.backend.log', 'K.log', (['(x * x + 0.1)'], {}), '(x * x + 0.1)\n', (1097, 1110), True, 'from keras import backend as K\n'), ((1285, 1382), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': 'None', 'delimiter': '"""\t"""', 'names': "['id', 'name', 'label', 'spectrum']"}), "(filename, header=None, delimiter='\\t', names=['id', 'name',\n 'label', 'spectrum'])\n", (1296, 1382), True, 'import pandas as pd\n'), ((1612, 1623), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1620, 1623), True, 'import numpy as np\n'), ((1928, 1965), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_dim': 'X_train.shape[1]'}), '(32, input_dim=X_train.shape[1])\n', (1933, 1965), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1979, 2000), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (1989, 2000), False, 'from keras.layers import Activation\n'), ((2013, 2022), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (2018, 2022), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((2034, 2052), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (2044, 2052), False, 'from keras.layers import Activation\n'), ((2065, 2078), 'keras.layers.Dropout', 'Dropout', (['(0.87)'], {}), '(0.87)\n', (2072, 2078), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((2091, 2100), 'keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (2096, 2100), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((2112, 2130), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (2122, 2130), False, 'from keras.layers import Activation\n'), ((2143, 2155), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2150, 2155), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((2168, 2176), 'keras.layers.Dense', 'Dense', (['(2)'], {}), '(2)\n', (2173, 2176), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((2188, 2209), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2198, 2209), False, 'from keras.layers import Activation\n'), ((2439, 2458), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (2448, 2458), True, 'import tensorflow as tf\n'), ((2474, 2545), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'patience': '(5000)'}), "(monitor='val_loss', mode='min', verbose=1, patience=5000)\n", (2487, 2545), False, 'from keras.callbacks import EarlyStopping\n'), ((2555, 2673), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model-valacc-{val_acc:03f}.h5"""'], {'verbose': '(0)', 'monitor': '"""val_loss"""', 'save_best_only': '(True)', 'mode': '"""auto"""'}), "('model-valacc-{val_acc:03f}.h5', verbose=0, monitor=\n 'val_loss', save_best_only=True, mode='auto')\n", (2570, 2673), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2927, 2968), 'numpy.save', 'np.save', (['"""./history.npy"""', 'history.history'], {}), "('./history.npy', history.history)\n", (2934, 2968), True, 'import numpy as np\n'), ((1111, 1131), 'keras.utils.generic_utils.get_custom_objects', 'get_custom_objects', ([], {}), '()\n', (1129, 1131), False, 'from keras.utils.generic_utils import get_custom_objects\n'), ((3059, 3069), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3067, 3069), True, 'import matplotlib.pyplot as plt\n'), ((3078, 3111), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (3086, 3111), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3157), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (3128, 3157), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3189), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (3175, 3189), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3216), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (3208, 3216), True, 'import matplotlib.pyplot as plt\n'), ((3225, 3244), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3235, 3244), True, 'import matplotlib.pyplot as plt\n'), ((3253, 3306), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), "(['train', 'validation'], loc='upper left')\n", (3263, 3306), True, 'import matplotlib.pyplot as plt\n'), ((3315, 3325), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3323, 3325), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1667), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1664, 1667), True, 'import numpy as np\n')] |
# A command line type example for using BIFS example
import numpy as np
import imageio
import random
from pylab import *
import matplotlib.pyplot as plt
import bifs
import bifs_util.util as bu
# 2D image
# Load image - standard Lena for now
im = imageio.imread('../../images/lena512.bmp')
im = np.asarray(im)
# 2D imag
noise_level = 1.5
noise = noise_level*(np.max(im) - np.min(im))*np.random.rand(im.shape[0],im.shape[1])
# Add noise:
noisy_im = im + noise
# Create mybifs BIFS object:
mybifs = bifs.bifs()
# Can take a look at what functions and variables are available with, e.g.:
# dir(mybifs)
# Set a few things:
# Prior
mybifs.prior = "Gaussian" # Choices are currently: "Gaussian","Uniform"
# "Gaussian" is actually the default but for illustration...
# Lilelihood
mybifs.likelihood = "Gaussian" # Choices are currently: "Gaussian","Rician"
# "Gaussian" is actually the default but again for illustration...
# Parameter Space Function
# mybifs.param_func_type = "Linear Decay"
# mybifs.bvec = np.array([0.1,500.])
mybifs.param_func_type = "Inverse Power Decay"
# mybifs.decay = 0.5
# Current choices are: "Inverse Power Decay","Banded Inverse Power Decay",
# "Linear Decay" with default "Inverse Power Decay",
# but again for illustration...
# Try adding a bump
# mybifs.bumps = {mybifs.bump_default_type:[0.5,0.05,0.1]}
# Can check comments in bifs.py for description of other parametere to set
# Load the image - note, typically just start here re. loading a noisy image
mybifs.load_image(noisy_im)
# Run BIFS MAP making sure that the initial image is loaded
if np.isscalar(mybifs.init_image):
print("Error: Need to load an image into mybifs before running MAP")
else:
print("Running BIFS_MAP() on image")
mybifs.BIFS_MAP()
# Take a look at the parameter function
# NOTE: For a reason I don't yet understand
# running this and/or the following utility
# functions before running mybifs.BIFS_MAP()
# causes mybifs.BIFS_MAP() to crash horribly
# with a string of errors like:
#
# Break on __THE_PROCESS_HAS_FORKED_AND_YOU_CANNOT_USE_THIS_
# COREFOUNDATION_FUNCTIONALITY___YOU_MUST_EXEC__() to debug.
# The process has forked and you cannot use this CoreFoundation
# functionality safely. You MUST exec().
#
# bu.plot_param_func(mybifs)
# Look at the prior, liklelihood, and posterior at a voxel
bu.voxel_dist(mybifs,[mybifs.mod_image.shape[0]//2,mybifs.mod_image.shape[1]//2],do_plots=True)
# Plot the resulting images, checking again that nothing went wrong...
if np.isscalar(mybifs.init_image):
print("Error: Need to load an image into mybifs before running MAP")
else:
# Initial noisy image
plt.subplot(221)
plt.axis('off')
plt.title("Initial Image")
plt.imshow(mybifs.init_image, cmap = cm.Greys_r)
# Initial K-Space Image
plt.subplot(222)
plt.axis('off')
plt.title("Initial K-Space Image")
showim1k = np.roll(np.roll(mybifs.mod_image,mybifs.mod_image.shape[0]//2,0),mybifs.mod_image.shape[1]//2,1)
plt.imshow(np.log(showim1k), cmap = cm.Greys_r)
# Final K-Space Image after running BIFS
plt.subplot(224)
plt.axis('off')
plt.title("Initial K-Space Image")
showim2k = np.roll(np.roll(mybifs.bifsk_image,mybifs.bifsk_image.shape[0]//2,0),mybifs.bifsk_image.shape[1]//2,1)
plt.imshow(np.log(showim2k), cmap = cm.Greys_r)
# Final Image after running BIFS
plt.subplot(223)
plt.axis('off')
plt.title("Reconstructed Image")
plt.imshow(mybifs.final_image,cmap = cm.Greys_r)
plt.show()
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.isscalar",
"bifs.bifs",
"bifs_util.util.voxel_dist",
"numpy.random.rand",
"numpy.roll",
"numpy.log",
"numpy.asarray",
"numpy.max",
"numpy.min",
"imageio.imread",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotl... | [((248, 290), 'imageio.imread', 'imageio.imread', (['"""../../images/lena512.bmp"""'], {}), "('../../images/lena512.bmp')\n", (262, 290), False, 'import imageio\n'), ((296, 310), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (306, 310), True, 'import numpy as np\n'), ((500, 511), 'bifs.bifs', 'bifs.bifs', ([], {}), '()\n', (509, 511), False, 'import bifs\n'), ((1584, 1614), 'numpy.isscalar', 'np.isscalar', (['mybifs.init_image'], {}), '(mybifs.init_image)\n', (1595, 1614), True, 'import numpy as np\n'), ((2325, 2432), 'bifs_util.util.voxel_dist', 'bu.voxel_dist', (['mybifs', '[mybifs.mod_image.shape[0] // 2, mybifs.mod_image.shape[1] // 2]'], {'do_plots': '(True)'}), '(mybifs, [mybifs.mod_image.shape[0] // 2, mybifs.mod_image.\n shape[1] // 2], do_plots=True)\n', (2338, 2432), True, 'import bifs_util.util as bu\n'), ((2496, 2526), 'numpy.isscalar', 'np.isscalar', (['mybifs.init_image'], {}), '(mybifs.init_image)\n', (2507, 2526), True, 'import numpy as np\n'), ((385, 425), 'numpy.random.rand', 'np.random.rand', (['im.shape[0]', 'im.shape[1]'], {}), '(im.shape[0], im.shape[1])\n', (399, 425), True, 'import numpy as np\n'), ((2631, 2647), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (2642, 2647), True, 'import matplotlib.pyplot as plt\n'), ((2650, 2665), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2658, 2665), True, 'import matplotlib.pyplot as plt\n'), ((2668, 2694), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial Image"""'], {}), "('Initial Image')\n", (2677, 2694), True, 'import matplotlib.pyplot as plt\n'), ((2697, 2743), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mybifs.init_image'], {'cmap': 'cm.Greys_r'}), '(mybifs.init_image, cmap=cm.Greys_r)\n', (2707, 2743), True, 'import matplotlib.pyplot as plt\n'), ((2777, 2793), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (2788, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2811), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2804, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2814, 2848), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial K-Space Image"""'], {}), "('Initial K-Space Image')\n", (2823, 2848), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3084), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (3079, 3084), True, 'import matplotlib.pyplot as plt\n'), ((3087, 3102), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3095, 3102), True, 'import matplotlib.pyplot as plt\n'), ((3105, 3139), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial K-Space Image"""'], {}), "('Initial K-Space Image')\n", (3114, 3139), True, 'import matplotlib.pyplot as plt\n'), ((3357, 3373), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (3368, 3373), True, 'import matplotlib.pyplot as plt\n'), ((3376, 3391), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3384, 3391), True, 'import matplotlib.pyplot as plt\n'), ((3394, 3426), 'matplotlib.pyplot.title', 'plt.title', (['"""Reconstructed Image"""'], {}), "('Reconstructed Image')\n", (3403, 3426), True, 'import matplotlib.pyplot as plt\n'), ((3429, 3476), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mybifs.final_image'], {'cmap': 'cm.Greys_r'}), '(mybifs.final_image, cmap=cm.Greys_r)\n', (3439, 3476), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3491), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3489, 3491), True, 'import matplotlib.pyplot as plt\n'), ((2870, 2930), 'numpy.roll', 'np.roll', (['mybifs.mod_image', '(mybifs.mod_image.shape[0] // 2)', '(0)'], {}), '(mybifs.mod_image, mybifs.mod_image.shape[0] // 2, 0)\n', (2877, 2930), True, 'import numpy as np\n'), ((2972, 2988), 'numpy.log', 'np.log', (['showim1k'], {}), '(showim1k)\n', (2978, 2988), True, 'import numpy as np\n'), ((3161, 3225), 'numpy.roll', 'np.roll', (['mybifs.bifsk_image', '(mybifs.bifsk_image.shape[0] // 2)', '(0)'], {}), '(mybifs.bifsk_image, mybifs.bifsk_image.shape[0] // 2, 0)\n', (3168, 3225), True, 'import numpy as np\n'), ((3269, 3285), 'numpy.log', 'np.log', (['showim2k'], {}), '(showim2k)\n', (3275, 3285), True, 'import numpy as np\n'), ((360, 370), 'numpy.max', 'np.max', (['im'], {}), '(im)\n', (366, 370), True, 'import numpy as np\n'), ((373, 383), 'numpy.min', 'np.min', (['im'], {}), '(im)\n', (379, 383), True, 'import numpy as np\n')] |
import read_bvh
import numpy as np
from os import listdir
import os
def generate_traindata_from_bvh(src_bvh_folder, tar_traindata_folder):
print ("Generating training data for "+ src_bvh_folder)
if (os.path.exists(tar_traindata_folder)==False):
os.makedirs(tar_traindata_folder)
bvh_dances_names=listdir(src_bvh_folder)
for bvh_dance_name in bvh_dances_names:
name_len=len(bvh_dance_name)
if(name_len>4):
if(bvh_dance_name[name_len-4: name_len]==".bvh"):
print ("Processing "+bvh_dance_name)
dance=read_bvh.get_train_data(src_bvh_folder+bvh_dance_name)
np.save(tar_traindata_folder+bvh_dance_name+".npy", dance)
def generate_bvh_from_traindata(src_train_folder, tar_bvh_folder):
print ("Generating bvh data for "+ src_train_folder)
if (os.path.exists(tar_bvh_folder)==False):
os.makedirs(tar_bvh_folder)
dances_names=listdir(src_train_folder)
for dance_name in dances_names:
name_len=len(dance_name)
if(name_len>4):
if(dance_name[name_len-4: name_len]==".npy"):
print ("Processing"+dance_name)
dance=np.load(src_train_folder+dance_name)
dance2=[]
for i in range(dance.shape[0]/8):
dance2=dance2+[dance[i*8]]
print (len(dance2))
read_bvh.write_traindata_to_bvh(tar_bvh_folder+dance_name+".bvh",np.array(dance2))
generate_traindata_from_bvh("../train_data_bvh/indian/","../train_data_xyz/indian/")
#generate_traindata_from_bvh("../train_data_bvh/salsa/","../train_data_xyz/salsa/")
#generate_traindata_from_bvh("../train_data_bvh/martial/","../train_data_xyz/martial/") | [
"os.path.exists",
"os.listdir",
"os.makedirs",
"read_bvh.get_train_data",
"numpy.array",
"numpy.load",
"numpy.save"
] | [((328, 351), 'os.listdir', 'listdir', (['src_bvh_folder'], {}), '(src_bvh_folder)\n', (335, 351), False, 'from os import listdir\n'), ((969, 994), 'os.listdir', 'listdir', (['src_train_folder'], {}), '(src_train_folder)\n', (976, 994), False, 'from os import listdir\n'), ((217, 253), 'os.path.exists', 'os.path.exists', (['tar_traindata_folder'], {}), '(tar_traindata_folder)\n', (231, 253), False, 'import os\n'), ((272, 305), 'os.makedirs', 'os.makedirs', (['tar_traindata_folder'], {}), '(tar_traindata_folder)\n', (283, 305), False, 'import os\n'), ((874, 904), 'os.path.exists', 'os.path.exists', (['tar_bvh_folder'], {}), '(tar_bvh_folder)\n', (888, 904), False, 'import os\n'), ((923, 950), 'os.makedirs', 'os.makedirs', (['tar_bvh_folder'], {}), '(tar_bvh_folder)\n', (934, 950), False, 'import os\n'), ((600, 656), 'read_bvh.get_train_data', 'read_bvh.get_train_data', (['(src_bvh_folder + bvh_dance_name)'], {}), '(src_bvh_folder + bvh_dance_name)\n', (623, 656), False, 'import read_bvh\n'), ((672, 734), 'numpy.save', 'np.save', (["(tar_traindata_folder + bvh_dance_name + '.npy')", 'dance'], {}), "(tar_traindata_folder + bvh_dance_name + '.npy', dance)\n", (679, 734), True, 'import numpy as np\n'), ((1222, 1260), 'numpy.load', 'np.load', (['(src_train_folder + dance_name)'], {}), '(src_train_folder + dance_name)\n', (1229, 1260), True, 'import numpy as np\n'), ((1504, 1520), 'numpy.array', 'np.array', (['dance2'], {}), '(dance2)\n', (1512, 1520), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
from collections import Iterable, defaultdict, deque
from functools import reduce
import numbers
import operator
import numpy as np
import scipy.sparse
try: # Windows compatibility
int = long
except NameError:
pass
class COO(object):
""" A Sparse Multidimensional Array
This is stored in COO format. It depends on NumPy and Scipy.sparse for
computation, but supports arrays of arbitrary dimension.
Parameters
----------
coords: np.ndarray (ndim, nnz)
An array holding the index locations of every value
Should have shape (number of dimensions, number of non-zeros)
data: np.array (nnz,)
An array of Values
shape: tuple (ndim,), optional
The shape of the array
Examples
--------
>>> x = np.eye(4)
>>> x[2, 3] = 5
>>> s = COO(x)
>>> s
<COO: shape=(4, 4), dtype=float64, nnz=5, sorted=True, duplicates=False>
>>> s.data
array([ 1., 1., 1., 5., 1.])
>>> s.coords
array([[0, 1, 2, 2, 3],
[0, 1, 2, 3, 3]], dtype=uint8)
>>> s.dot(s.T).sum(axis=0).todense()
array([ 1., 1., 31., 6.])
Make a sparse array by passing in an array of coordinates and an array of
values.
>>> coords = [[0, 0, 0, 1, 1],
... [0, 1, 2, 0, 3],
... [0, 3, 2, 0, 1]]
>>> data = [1, 2, 3, 4, 5]
>>> y = COO(coords, data, shape=(3, 4, 5))
>>> y
<COO: shape=(3, 4, 5), dtype=int64, nnz=5, sorted=False, duplicates=True>
>>> tensordot(s, y, axes=(0, 1))
<COO: shape=(4, 3, 5), dtype=float64, nnz=6, sorted=False, duplicates=False>
Following scipy.sparse conventions you can also pass these as a tuple with
rows and columns
>>> rows = [0, 1, 2, 3, 4]
>>> cols = [0, 0, 0, 1, 1]
>>> data = [10, 20, 30, 40, 50]
>>> z = COO((data, (rows, cols)))
>>> z.todense()
array([[10, 0],
[20, 0],
[30, 0],
[ 0, 40],
[ 0, 50]])
You can also pass a dictionary or iterable of index/value pairs. Repeated
indices imply summation:
>>> d = {(0, 0, 0): 1, (1, 2, 3): 2, (1, 1, 0): 3}
>>> COO(d)
<COO: shape=(2, 3, 4), dtype=int64, nnz=3, sorted=False, duplicates=False>
>>> L = [((0, 0), 1),
... ((1, 1), 2),
... ((0, 0), 3)]
>>> COO(L).todense()
array([[4, 0],
[0, 2]])
See Also
--------
COO.from_numpy
COO.from_scipy_sparse
"""
__array_priority__ = 12
def __init__(self, coords, data=None, shape=None, has_duplicates=True,
sorted=False, cache=False):
self._cache = None
if cache:
self.enable_caching()
if data is None:
# {(i, j, k): x, (i, j, k): y, ...}
if isinstance(coords, dict):
coords = list(coords.items())
has_duplicates = False
if isinstance(coords, np.ndarray):
result = COO.from_numpy(coords)
self.coords = result.coords
self.data = result.data
self.has_duplicates = result.has_duplicates
self.sorted = result.sorted
self.shape = result.shape
return
# []
if not coords:
data = []
coords = []
# [((i, j, k), value), (i, j, k), value), ...]
elif isinstance(coords[0][0], Iterable):
if coords:
assert len(coords[0]) == 2
data = [x[1] for x in coords]
coords = [x[0] for x in coords]
coords = np.asarray(coords).T
# (data, (row, col, slab, ...))
else:
data = coords[0]
coords = np.stack(coords[1], axis=0)
self.data = np.asarray(data)
self.coords = np.asarray(coords)
if self.coords.ndim == 1:
self.coords = self.coords[None, :]
if shape and not np.prod(self.coords.shape):
self.coords = np.zeros((len(shape), 0), dtype=np.uint64)
if shape is None:
if self.coords.nbytes:
shape = tuple((self.coords.max(axis=1) + 1).tolist())
else:
shape = ()
self.shape = tuple(shape)
if self.shape:
dtype = np.min_scalar_type(max(self.shape))
else:
dtype = np.int_
self.coords = self.coords.astype(dtype)
assert not self.shape or len(data) == self.coords.shape[1]
self.has_duplicates = has_duplicates
self.sorted = sorted
def enable_caching(self):
""" Enable caching of reshape, transpose, and tocsr/csc operations
This enables efficient iterative workflows that make heavy use of
csr/csc operations, such as tensordot. This maintains a cache of
recent results of reshape and transpose so that operations like
tensordot (which uses both internally) store efficiently stored
representations for repeated use. This can significantly cut down on
computational costs in common numeric algorithms.
However, this also assumes that neither this object, nor the downstream
objects will have their data mutated.
Examples
--------
>>> x.enable_caching() # doctest: +SKIP
>>> csr1 = x.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP
>>> csr2 = x.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP
>>> csr1 is csr2 # doctest: +SKIP
True
"""
self._cache = defaultdict(lambda: deque(maxlen=3))
return self
@classmethod
def from_numpy(cls, x):
if x.shape:
coords = np.where(x)
data = x[coords]
coords = np.vstack(coords)
else:
coords = []
data = x
return cls(coords, data, shape=x.shape, has_duplicates=False,
sorted=True)
def todense(self):
self = self.sum_duplicates()
x = np.zeros(shape=self.shape, dtype=self.dtype)
coords = tuple([self.coords[i, :] for i in range(self.ndim)])
x[coords] = self.data
return x
@classmethod
def from_scipy_sparse(cls, x):
x = scipy.sparse.coo_matrix(x)
coords = np.empty((2, x.nnz), dtype=x.row.dtype)
coords[0, :] = x.row
coords[1, :] = x.col
return COO(coords, x.data, shape=x.shape,
has_duplicates=not x.has_canonical_format,
sorted=x.has_canonical_format)
@property
def dtype(self):
return self.data.dtype
@property
def ndim(self):
return len(self.shape)
@property
def nnz(self):
return self.coords.shape[1]
@property
def nbytes(self):
return self.data.nbytes + self.coords.nbytes
def __sizeof__(self):
return self.nbytes
def __getitem__(self, index):
if not isinstance(index, tuple):
index = (index,)
index = tuple(ind + self.shape[i] if isinstance(ind, numbers.Integral) and ind < 0 else ind
for i, ind in enumerate(index))
if (all(ind == slice(None) or ind == slice(0, d)
for ind, d in zip(index, self.shape))):
return self
mask = np.ones(self.nnz, dtype=bool)
for i, ind in enumerate([i for i in index if i is not None]):
if ind == slice(None, None):
continue
mask &= _mask(self.coords[i], ind)
n = mask.sum()
coords = []
shape = []
i = 0
for ind in index:
if isinstance(ind, numbers.Integral):
i += 1
continue
elif isinstance(ind, slice):
start = ind.start or 0
stop = ind.stop if ind.stop is not None else self.shape[i]
shape.append(min(stop, self.shape[i]) - start)
coords.append(self.coords[i][mask] - start)
i += 1
elif isinstance(ind, list):
old = self.coords[i][mask]
new = np.empty(shape=old.shape, dtype=old.dtype)
for j, item in enumerate(ind):
new[old == item] = j
coords.append(new)
shape.append(len(ind))
i += 1
elif ind is None:
coords.append(np.zeros(n))
shape.append(1)
for j in range(i, self.ndim):
coords.append(self.coords[j][mask])
shape.append(self.shape[j])
coords = np.stack(coords, axis=0)
shape = tuple(shape)
data = self.data[mask]
return COO(coords, data, shape=shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted)
def __str__(self):
return "<COO: shape=%s, dtype=%s, nnz=%d, sorted=%s, duplicates=%s>" % (
self.shape, self.dtype, self.nnz, self.sorted,
self.has_duplicates)
__repr__ = __str__
def reduction(self, method, axis=None, keepdims=False, dtype=None):
if axis is None:
axis = tuple(range(self.ndim))
kwargs = {}
if dtype:
kwargs['dtype'] = dtype
if isinstance(axis, numbers.Integral):
axis = (axis,)
if set(axis) == set(range(self.ndim)):
result = getattr(self.data, method)(**kwargs)
else:
axis = tuple(axis)
neg_axis = list(range(self.ndim))
for ax in axis:
neg_axis.remove(ax)
neg_axis = tuple(neg_axis)
a = self.transpose(axis + neg_axis)
a = a.reshape((np.prod([self.shape[d] for d in axis]),
np.prod([self.shape[d] for d in neg_axis])))
a = a.to_scipy_sparse()
a = getattr(a, method)(axis=0, **kwargs)
if isinstance(a, scipy.sparse.spmatrix):
a = COO.from_scipy_sparse(a)
a.sorted = self.sorted
a.has_duplicates = False
elif isinstance(a, np.matrix):
a = np.asarray(a)[0]
a = COO.from_numpy(a)
a = a.reshape([self.shape[d] for d in neg_axis])
result = a
if keepdims:
result = _keepdims(self, result, axis)
return result
def sum(self, axis=None, keepdims=False, dtype=None, out=None):
return self.reduction('sum', axis=axis, keepdims=keepdims, dtype=dtype)
def max(self, axis=None, keepdims=False, out=None):
x = self.reduction('max', axis=axis, keepdims=keepdims)
# TODO: verify that there are some missing elements in each entry
if isinstance(x, COO):
x.data[x.data < 0] = 0
return x
elif isinstance(x, np.ndarray):
x[x < 0] = 0
return x
else:
return np.max(x, 0)
def transpose(self, axes=None):
if axes is None:
axes = reversed(range(self.ndim))
axes = tuple(axes)
if axes == tuple(range(self.ndim)):
return self
if self._cache is not None:
for ax, value in self._cache['transpose']:
if ax == axes:
return value
shape = tuple(self.shape[ax] for ax in axes)
result = COO(self.coords[axes, :], self.data, shape,
has_duplicates=self.has_duplicates,
cache=self._cache is not None)
if self._cache is not None:
self._cache['transpose'].append((axes, result))
return result
@property
def T(self):
return self.transpose(list(range(self.ndim))[::-1])
def dot(self, other):
return dot(self, other)
def __matmul__(self, other):
try:
return dot(self, other)
except NotImplementedError:
return NotImplemented
def __rmatmul__(self, other):
try:
return dot(other, self)
except NotImplementedError:
return NotImplemented
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs):
return NotImplemented
def linear_loc(self, signed=False):
""" Index location of every piece of data in a flattened array
This is used internally to check for duplicates, re-order, reshape,
etc..
"""
n = reduce(operator.mul, self.shape)
if signed:
n = -n
dtype = np.min_scalar_type(n)
out = np.zeros(self.nnz, dtype=dtype)
tmp = np.zeros(self.nnz, dtype=dtype)
strides = 1
for i, d in enumerate(self.shape[::-1]):
# out += self.coords[-(i + 1), :].astype(dtype) * strides
np.multiply(self.coords[-(i + 1), :], strides, out=tmp, dtype=dtype)
np.add(tmp, out, out=out)
strides *= d
return out
def reshape(self, shape):
if self.shape == shape:
return self
if any(d == -1 for d in shape):
extra = int(np.prod(self.shape) /
np.prod([d for d in shape if d != -1]))
shape = tuple([d if d != -1 else extra for d in shape])
if self.shape == shape:
return self
if self._cache is not None:
for sh, value in self._cache['reshape']:
if sh == shape:
return value
# TODO: this np.prod(self.shape) enforces a 2**64 limit to array size
linear_loc = self.linear_loc()
coords = np.empty((len(shape), self.nnz), dtype=np.min_scalar_type(max(shape)))
strides = 1
for i, d in enumerate(shape[::-1]):
coords[-(i + 1), :] = (linear_loc // strides) % d
strides *= d
result = COO(coords, self.data, shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted, cache=self._cache is not None)
if self._cache is not None:
self._cache['reshape'].append((shape, result))
return result
def to_scipy_sparse(self):
assert self.ndim == 2
result = scipy.sparse.coo_matrix((self.data,
(self.coords[0],
self.coords[1])),
shape=self.shape)
result.has_canonical_format = (not self.has_duplicates and self.sorted)
return result
def _tocsr(self):
assert self.ndim == 2
# Pass 1: sum duplicates
self.sum_duplicates()
# Pass 2: sort indices
self.sort_indices()
row, col = self.coords
# Pass 3: count nonzeros in each row
indptr = np.zeros(self.shape[0] + 1, dtype=np.int64)
np.cumsum(np.bincount(row, minlength=self.shape[0]), out=indptr[1:])
return scipy.sparse.csr_matrix((self.data, col, indptr), shape=self.shape)
def tocsr(self):
if self._cache is not None:
try:
return self._csr
except AttributeError:
pass
try:
self._csr = self._csc.tocsr()
return self._csr
except AttributeError:
pass
self._csr = csr = self._tocsr()
else:
csr = self._tocsr()
return csr
def tocsc(self):
if self._cache is not None:
try:
return self._csc
except AttributeError:
pass
try:
self._csc = self._csr.tocsc()
return self._csc
except AttributeError:
pass
self._csc = csc = self.tocsr().tocsc()
else:
csc = self.tocsr().tocsc()
return csc
def sort_indices(self):
if self.sorted:
return
linear = self.linear_loc(signed=True)
if (np.diff(linear) > 0).all(): # already sorted
self.sorted = True
return self
order = np.argsort(linear)
self.coords = self.coords[:, order]
self.data = self.data[order]
self.sorted = True
return self
def sum_duplicates(self):
# Inspired by scipy/sparse/coo.py::sum_duplicates
# See https://github.com/scipy/scipy/blob/master/LICENSE.txt
if not self.has_duplicates:
return self
if not np.prod(self.coords.shape):
return self
self.sort_indices()
linear = self.linear_loc()
unique_mask = np.diff(linear) != 0
if unique_mask.sum() == len(unique_mask): # already unique
self.has_duplicates = False
return self
unique_mask = np.append(True, unique_mask)
coords = self.coords[:, unique_mask]
(unique_inds,) = np.nonzero(unique_mask)
data = np.add.reduceat(self.data, unique_inds, dtype=self.data.dtype)
self.data = data
self.coords = coords
self.has_duplicates = False
return self
def __add__(self, other):
if isinstance(other, numbers.Number) and other == 0:
return self
if not isinstance(other, COO):
return self.maybe_densify() + other
if self.shape == other.shape:
return self.elemwise_binary(operator.add, other)
else:
raise NotImplementedError("Broadcasting not yet supported")
def __radd__(self, other):
return self + other
def __neg__(self):
return COO(self.coords, -self.data, self.shape, self.has_duplicates,
self.sorted)
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
return -self + other
def __mul__(self, other):
if isinstance(other, COO):
return self.elemwise_binary(operator.mul, other)
else:
return self.elemwise(operator.mul, other)
__rmul__ = __mul__
def __truediv__(self, other):
return self.elemwise(operator.truediv, other)
def __floordiv__(self, other):
return self.elemwise(operator.floordiv, other)
__div__ = __truediv__
def __pow__(self, other):
return self.elemwise(operator.pow, other)
def elemwise(self, func, *args, **kwargs):
if kwargs.pop('check', True) and func(0, *args, **kwargs) != 0:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
return COO(self.coords, func(self.data, *args, **kwargs),
shape=self.shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted)
def elemwise_binary(self, func, other, *args, **kwargs):
assert isinstance(other, COO)
if kwargs.pop('check', True) and func(0, 0, *args, **kwargs) != 0:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
if self.shape != other.shape:
raise NotImplementedError("Broadcasting is not supported")
self.sum_duplicates() # TODO: document side-effect or make copy
other.sum_duplicates() # TODO: document side-effect or make copy
# Sort self.coords in lexographical order using record arrays
self_coords = np.rec.fromarrays(self.coords)
i = np.argsort(self_coords)
self_coords = self_coords[i]
self_data = self.data[i]
# Convert other.coords to a record array
other_coords = np.rec.fromarrays(other.coords)
other_data = other.data
# Find matches between self.coords and other.coords
j = np.searchsorted(self_coords, other_coords)
if len(self_coords):
matched_other = (other_coords == self_coords[j % len(self_coords)])
else:
matched_other = np.zeros(shape=(0,), dtype=bool)
matched_self = j[matched_other]
# Locate coordinates without a match
unmatched_other = ~matched_other
unmatched_self = np.ones(len(self_coords), dtype=bool)
unmatched_self[matched_self] = 0
# Concatenate matches and mismatches
data = np.concatenate([func(self_data[matched_self],
other_data[matched_other],
*args, **kwargs),
func(self_data[unmatched_self], 0,
*args, **kwargs),
func(0, other_data[unmatched_other],
*args, **kwargs)])
coords = np.concatenate([self_coords[matched_self],
self_coords[unmatched_self],
other_coords[unmatched_other]])
nonzero = data != 0
data = data[nonzero]
coords = coords[nonzero]
# record array to ND array
coords = np.asarray(coords.view(coords.dtype[0]).reshape(len(coords), self.ndim)).T
return COO(coords, data, shape=self.shape, has_duplicates=False)
def __abs__(self):
return self.elemwise(abs)
def exp(self, out=None):
assert out is None
return np.exp(self.maybe_densify())
def expm1(self, out=None):
assert out is None
return self.elemwise(np.expm1)
def log1p(self, out=None):
assert out is None
return self.elemwise(np.log1p)
def sin(self, out=None):
assert out is None
return self.elemwise(np.sin)
def sinh(self, out=None):
assert out is None
return self.elemwise(np.sinh)
def tan(self, out=None):
assert out is None
return self.elemwise(np.tan)
def tanh(self, out=None):
assert out is None
return self.elemwise(np.tanh)
def sqrt(self, out=None):
assert out is None
return self.elemwise(np.sqrt)
def ceil(self, out=None):
assert out is None
return self.elemwise(np.ceil)
def floor(self, out=None):
assert out is None
return self.elemwise(np.floor)
def round(self, decimals=0, out=None):
assert out is None
return self.elemwise(np.round, decimals)
def rint(self, out=None):
assert out is None
return self.elemwise(np.rint)
def conj(self, out=None):
assert out is None
return self.elemwise(np.conj)
def conjugate(self, out=None):
assert out is None
return self.elemwise(np.conjugate)
def astype(self, dtype, out=None):
assert out is None
return self.elemwise(np.ndarray.astype, dtype, check=False)
def __gt__(self, other):
if not isinstance(other, numbers.Number):
raise NotImplementedError("Only scalars supported")
if other < 0:
raise ValueError("Comparison with negative number would produce "
"dense result")
return self.elemwise(operator.gt, other)
def __ge__(self, other):
if not isinstance(other, numbers.Number):
raise NotImplementedError("Only scalars supported")
if other <= 0:
raise ValueError("Comparison with negative number would produce "
"dense result")
return self.elemwise(operator.ge, other)
def maybe_densify(self, allowed_nnz=1e3, allowed_fraction=0.25):
""" Convert to a dense numpy array if not too costly. Err othrewise """
if reduce(operator.mul, self.shape) <= allowed_nnz or self.nnz >= np.prod(self.shape) * allowed_fraction:
return self.todense()
else:
raise NotImplementedError("Operation would require converting "
"large sparse array to dense")
def tensordot(a, b, axes=2):
# Much of this is stolen from numpy/core/numeric.py::tensordot
# Please see license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
try:
iter(axes)
except:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
# a, b = asarray(a), asarray(b) # <--- modified
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = _dot(at, bt)
if isinstance(res, scipy.sparse.spmatrix):
if res.nnz > reduce(operator.mul, res.shape) / 2:
res = res.todense()
else:
res = COO.from_scipy_sparse(res) # <--- modified
res.has_duplicates = False
if isinstance(res, np.matrix):
res = np.asarray(res)
return res.reshape(olda + oldb)
def dot(a, b):
if not hasattr(a, 'ndim') or not hasattr(b, 'ndim'):
raise NotImplementedError(
"Cannot perform dot product on types %s, %s" %
(type(a), type(b)))
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
def _dot(a, b):
if isinstance(a, COO):
a.sum_duplicates()
if isinstance(b, COO):
b.sum_duplicates()
if isinstance(b, COO) and not isinstance(a, COO):
return _dot(b.T, a.T).T
aa = a.tocsr()
if isinstance(b, (COO, scipy.sparse.spmatrix)):
b = b.tocsc()
return aa.dot(b)
def _keepdims(original, new, axis):
shape = list(original.shape)
for ax in axis:
shape[ax] = 1
return new.reshape(shape)
def _mask(coords, idx):
if isinstance(idx, numbers.Integral):
return coords == idx
elif isinstance(idx, slice):
if idx.step not in (1, None):
raise NotImplementedError("Steped slices not implemented")
start = idx.start if idx.start is not None else 0
stop = idx.stop if idx.stop is not None else np.inf
return (coords >= start) & (coords < stop)
elif isinstance(idx, list):
mask = np.zeros(len(coords), dtype=bool)
for item in idx:
mask |= coords == item
return mask
def concatenate(arrays, axis=0):
arrays = [x if type(x) is COO else COO(x) for x in arrays]
if axis < 0:
axis = axis + arrays[0].ndim
assert all(x.shape[ax] == arrays[0].shape[ax]
for x in arrays
for ax in set(range(arrays[0].ndim)) - {axis})
data = np.concatenate([x.data for x in arrays])
coords = np.concatenate([x.coords for x in arrays], axis=1)
nnz = 0
dim = 0
for x in arrays:
if dim:
coords[axis, nnz:x.nnz + nnz] += dim
dim += x.shape[axis]
nnz += x.nnz
shape = list(arrays[0].shape)
shape[axis] = dim
has_duplicates = any(x.has_duplicates for x in arrays)
return COO(coords, data, shape=shape, has_duplicates=has_duplicates,
sorted=(axis == 0) and all(a.sorted for a in arrays))
def stack(arrays, axis=0):
assert len(set(x.shape for x in arrays)) == 1
arrays = [x if type(x) is COO else COO(x) for x in arrays]
if axis < 0:
axis = axis + arrays[0].ndim + 1
data = np.concatenate([x.data for x in arrays])
coords = np.concatenate([x.coords for x in arrays], axis=1)
nnz = 0
dim = 0
new = np.empty(shape=(coords.shape[1],), dtype=coords.dtype)
for x in arrays:
new[nnz:x.nnz + nnz] = dim
dim += 1
nnz += x.nnz
shape = list(arrays[0].shape)
shape.insert(axis, len(arrays))
has_duplicates = any(x.has_duplicates for x in arrays)
coords = [coords[i] for i in range(coords.shape[0])]
coords.insert(axis, new)
coords = np.stack(coords, axis=0)
return COO(coords, data, shape=shape, has_duplicates=has_duplicates,
sorted=(axis == 0) and all(a.sorted for a in arrays))
| [
"numpy.prod",
"numpy.argsort",
"numpy.rec.fromarrays",
"numpy.multiply",
"collections.deque",
"numpy.searchsorted",
"numpy.where",
"numpy.asarray",
"numpy.diff",
"numpy.max",
"numpy.stack",
"numpy.empty",
"numpy.vstack",
"numpy.concatenate",
"numpy.ones",
"numpy.add",
"functools.redu... | [((27737, 27777), 'numpy.concatenate', 'np.concatenate', (['[x.data for x in arrays]'], {}), '([x.data for x in arrays])\n', (27751, 27777), True, 'import numpy as np\n'), ((27791, 27841), 'numpy.concatenate', 'np.concatenate', (['[x.coords for x in arrays]'], {'axis': '(1)'}), '([x.coords for x in arrays], axis=1)\n', (27805, 27841), True, 'import numpy as np\n'), ((28473, 28513), 'numpy.concatenate', 'np.concatenate', (['[x.data for x in arrays]'], {}), '([x.data for x in arrays])\n', (28487, 28513), True, 'import numpy as np\n'), ((28527, 28577), 'numpy.concatenate', 'np.concatenate', (['[x.coords for x in arrays]'], {'axis': '(1)'}), '([x.coords for x in arrays], axis=1)\n', (28541, 28577), True, 'import numpy as np\n'), ((28613, 28667), 'numpy.empty', 'np.empty', ([], {'shape': '(coords.shape[1],)', 'dtype': 'coords.dtype'}), '(shape=(coords.shape[1],), dtype=coords.dtype)\n', (28621, 28667), True, 'import numpy as np\n'), ((28991, 29015), 'numpy.stack', 'np.stack', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (28999, 29015), True, 'import numpy as np\n'), ((3897, 3913), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3907, 3913), True, 'import numpy as np\n'), ((3936, 3954), 'numpy.asarray', 'np.asarray', (['coords'], {}), '(coords)\n', (3946, 3954), True, 'import numpy as np\n'), ((6151, 6195), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.shape', 'dtype': 'self.dtype'}), '(shape=self.shape, dtype=self.dtype)\n', (6159, 6195), True, 'import numpy as np\n'), ((6423, 6462), 'numpy.empty', 'np.empty', (['(2, x.nnz)'], {'dtype': 'x.row.dtype'}), '((2, x.nnz), dtype=x.row.dtype)\n', (6431, 6462), True, 'import numpy as np\n'), ((7441, 7470), 'numpy.ones', 'np.ones', (['self.nnz'], {'dtype': 'bool'}), '(self.nnz, dtype=bool)\n', (7448, 7470), True, 'import numpy as np\n'), ((8739, 8763), 'numpy.stack', 'np.stack', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (8747, 8763), True, 'import numpy as np\n'), ((12589, 12621), 'functools.reduce', 'reduce', (['operator.mul', 'self.shape'], {}), '(operator.mul, self.shape)\n', (12595, 12621), False, 'from functools import reduce\n'), ((12676, 12697), 'numpy.min_scalar_type', 'np.min_scalar_type', (['n'], {}), '(n)\n', (12694, 12697), True, 'import numpy as np\n'), ((12712, 12743), 'numpy.zeros', 'np.zeros', (['self.nnz'], {'dtype': 'dtype'}), '(self.nnz, dtype=dtype)\n', (12720, 12743), True, 'import numpy as np\n'), ((12758, 12789), 'numpy.zeros', 'np.zeros', (['self.nnz'], {'dtype': 'dtype'}), '(self.nnz, dtype=dtype)\n', (12766, 12789), True, 'import numpy as np\n'), ((14930, 14973), 'numpy.zeros', 'np.zeros', (['(self.shape[0] + 1)'], {'dtype': 'np.int64'}), '(self.shape[0] + 1, dtype=np.int64)\n', (14938, 14973), True, 'import numpy as np\n'), ((16252, 16270), 'numpy.argsort', 'np.argsort', (['linear'], {}), '(linear)\n', (16262, 16270), True, 'import numpy as np\n'), ((16948, 16976), 'numpy.append', 'np.append', (['(True)', 'unique_mask'], {}), '(True, unique_mask)\n', (16957, 16976), True, 'import numpy as np\n'), ((17048, 17071), 'numpy.nonzero', 'np.nonzero', (['unique_mask'], {}), '(unique_mask)\n', (17058, 17071), True, 'import numpy as np\n'), ((17087, 17149), 'numpy.add.reduceat', 'np.add.reduceat', (['self.data', 'unique_inds'], {'dtype': 'self.data.dtype'}), '(self.data, unique_inds, dtype=self.data.dtype)\n', (17102, 17149), True, 'import numpy as np\n'), ((19566, 19596), 'numpy.rec.fromarrays', 'np.rec.fromarrays', (['self.coords'], {}), '(self.coords)\n', (19583, 19596), True, 'import numpy as np\n'), ((19609, 19632), 'numpy.argsort', 'np.argsort', (['self_coords'], {}), '(self_coords)\n', (19619, 19632), True, 'import numpy as np\n'), ((19776, 19807), 'numpy.rec.fromarrays', 'np.rec.fromarrays', (['other.coords'], {}), '(other.coords)\n', (19793, 19807), True, 'import numpy as np\n'), ((19913, 19955), 'numpy.searchsorted', 'np.searchsorted', (['self_coords', 'other_coords'], {}), '(self_coords, other_coords)\n', (19928, 19955), True, 'import numpy as np\n'), ((20855, 20962), 'numpy.concatenate', 'np.concatenate', (['[self_coords[matched_self], self_coords[unmatched_self], other_coords[\n unmatched_other]]'], {}), '([self_coords[matched_self], self_coords[unmatched_self],\n other_coords[unmatched_other]])\n', (20869, 20962), True, 'import numpy as np\n'), ((26068, 26083), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (26078, 26083), True, 'import numpy as np\n'), ((5837, 5848), 'numpy.where', 'np.where', (['x'], {}), '(x)\n', (5845, 5848), True, 'import numpy as np\n'), ((5899, 5916), 'numpy.vstack', 'np.vstack', (['coords'], {}), '(coords)\n', (5908, 5916), True, 'import numpy as np\n'), ((12941, 13009), 'numpy.multiply', 'np.multiply', (['self.coords[-(i + 1), :]', 'strides'], {'out': 'tmp', 'dtype': 'dtype'}), '(self.coords[-(i + 1), :], strides, out=tmp, dtype=dtype)\n', (12952, 13009), True, 'import numpy as np\n'), ((13022, 13047), 'numpy.add', 'np.add', (['tmp', 'out'], {'out': 'out'}), '(tmp, out, out=out)\n', (13028, 13047), True, 'import numpy as np\n'), ((14992, 15033), 'numpy.bincount', 'np.bincount', (['row'], {'minlength': 'self.shape[0]'}), '(row, minlength=self.shape[0])\n', (15003, 15033), True, 'import numpy as np\n'), ((16632, 16658), 'numpy.prod', 'np.prod', (['self.coords.shape'], {}), '(self.coords.shape)\n', (16639, 16658), True, 'import numpy as np\n'), ((16771, 16786), 'numpy.diff', 'np.diff', (['linear'], {}), '(linear)\n', (16778, 16786), True, 'import numpy as np\n'), ((20107, 20139), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0,)', 'dtype': 'bool'}), '(shape=(0,), dtype=bool)\n', (20115, 20139), True, 'import numpy as np\n'), ((4062, 4088), 'numpy.prod', 'np.prod', (['self.coords.shape'], {}), '(self.coords.shape)\n', (4069, 4088), True, 'import numpy as np\n'), ((5713, 5728), 'collections.deque', 'deque', ([], {'maxlen': '(3)'}), '(maxlen=3)\n', (5718, 5728), False, 'from collections import Iterable, defaultdict, deque\n'), ((11089, 11101), 'numpy.max', 'np.max', (['x', '(0)'], {}), '(x, 0)\n', (11095, 11101), True, 'import numpy as np\n'), ((23735, 23767), 'functools.reduce', 'reduce', (['operator.mul', 'self.shape'], {}), '(operator.mul, self.shape)\n', (23741, 23767), False, 'from functools import reduce\n'), ((25835, 25866), 'functools.reduce', 'reduce', (['operator.mul', 'res.shape'], {}), '(operator.mul, res.shape)\n', (25841, 25866), False, 'from functools import reduce\n'), ((3848, 3875), 'numpy.stack', 'np.stack', (['coords[1]'], {'axis': '(0)'}), '(coords[1], axis=0)\n', (3856, 3875), True, 'import numpy as np\n'), ((9862, 9900), 'numpy.prod', 'np.prod', (['[self.shape[d] for d in axis]'], {}), '([self.shape[d] for d in axis])\n', (9869, 9900), True, 'import numpy as np\n'), ((9929, 9971), 'numpy.prod', 'np.prod', (['[self.shape[d] for d in neg_axis]'], {}), '([self.shape[d] for d in neg_axis])\n', (9936, 9971), True, 'import numpy as np\n'), ((13243, 13262), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (13250, 13262), True, 'import numpy as np\n'), ((13289, 13327), 'numpy.prod', 'np.prod', (['[d for d in shape if d != -1]'], {}), '([d for d in shape if d != -1])\n', (13296, 13327), True, 'import numpy as np\n'), ((16134, 16149), 'numpy.diff', 'np.diff', (['linear'], {}), '(linear)\n', (16141, 16149), True, 'import numpy as np\n'), ((23798, 23817), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (23805, 23817), True, 'import numpy as np\n'), ((3706, 3724), 'numpy.asarray', 'np.asarray', (['coords'], {}), '(coords)\n', (3716, 3724), True, 'import numpy as np\n'), ((8261, 8303), 'numpy.empty', 'np.empty', ([], {'shape': 'old.shape', 'dtype': 'old.dtype'}), '(shape=old.shape, dtype=old.dtype)\n', (8269, 8303), True, 'import numpy as np\n'), ((10305, 10318), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (10315, 10318), True, 'import numpy as np\n'), ((8549, 8560), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8557, 8560), True, 'import numpy as np\n')] |
from mltoolkit.mldp.steps.transformers import BaseTransformer
import numpy as np
class InvalidTransformer(BaseTransformer):
def _transform(self, data_chunk):
return np.random.random((10, 5))
| [
"numpy.random.random"
] | [((180, 205), 'numpy.random.random', 'np.random.random', (['(10, 5)'], {}), '((10, 5))\n', (196, 205), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from scipy import interpolate
import os, sys
def pseudo_wells_model(zmin, zmax, sr, no_wells, zones={}, zones_ss={}, depth='Depth', zone_idx='Zone_idx', zone_col='Zone'):
depth_log = np.arange(zmin, zmax, sr)
pseudo_wells = pd.DataFrame(np.zeros((len(depth_log), no_wells)))
pseudo_wells[depth] = depth_log
zones_df = pd.DataFrame()
zones_df[depth] = [float(i) for i in zones.values()]
zones_df[zone_col] = zones.keys()
pseudo_wells = pd.merge_asof(pseudo_wells, zones_df, on=depth)
zone_dict = dict(zip(pseudo_wells[zone_col].unique(), [int(i) for i in range(len(pseudo_wells[zone_col].unique()))]))
pseudo_wells[zone_idx] = pseudo_wells[zone_col].map(zone_dict)
for zone in zones_ss.keys():
if zones_ss[zone] != 0:
for well in range(no_wells):
ntg = 100* (well) / (no_wells - 1)
zone_list = pseudo_wells[pseudo_wells[zone_col] == zone][well].values
locs = []
for i in range(zones_ss[zone]):
if zones_ss[zone] > 1:
locs.append(int((len(zone_list)-1) * i/(zones_ss[zone]-1)))
else:
locs.append(0)
ones = 1
while (sum(zone_list)/len(zone_list)) < ntg/100:
zone_list = 0 * zone_list
disp = np.ones(ones)
if zones_ss[zone] == 1:
zone_list[0:ones] = disp
else:
for i in range(len(locs)):
if i == 0:
zone_list[0:ones] = disp
elif i == len(locs)-1:
zone_list[-ones:] = disp
break
else:
insert = int(locs[i]-(len(disp)/2))
zone_list[insert:insert+len(disp):1] = disp
ones += 1
ind = 0
for idx, row in pseudo_wells[pseudo_wells[zone_col] == zone].iterrows():
pseudo_wells.loc[row.name, well] = zone_list[ind]
ind += 1
return pseudo_wells
def dict_mapper(row, sand, shale, no_wells, zone_col):
for i in range(no_wells):
if row[i] == 0:
row[i] = sand[row[zone_col]]
else:
row[i] = shale[row[zone_col]]
return row
def property_mapper(pseudo_wells, sand_density, shale_density, sand_vp, shale_vp, sand_vs, shale_vs, zone_col='Zone'):
no_wells = len(pseudo_wells.columns) - 3
density = pseudo_wells.apply(dict_mapper, args=(sand_density, shale_density, no_wells, zone_col), axis=1)
vp = pseudo_wells.apply(dict_mapper, args=(sand_vp, shale_vp, no_wells, zone_col), axis=1)
vs = pseudo_wells.apply(dict_mapper, args=(sand_vs, shale_vs, no_wells, zone_col), axis=1)
return density, vp, vs
def time_model(pseudo_wells, density, vp, vs, wcs_file, skip=1, zones={}, time='Time', depth='Depth', zone_idx='Zone_idx', zone='Zone'):
wcs = np.loadtxt(wcs_file, skiprows=skip)
idx1 = (np.abs(np.asarray(wcs[:,0]) - pseudo_wells[depth].min())).argmin()
idx2 = (np.abs(np.asarray(wcs[:,0]) - pseudo_wells[depth].max())).argmin()
time_frame = np.arange(np.around(wcs[idx1,1], decimals=0), np.around(wcs[idx2,1], decimals=0), 2)
depth_frame = time_frame * 0
for i in range(len(depth_frame)):
idx = (np.abs(np.asarray(wcs[:,1]) - time_frame[i])).argmin()
depth_frame[i] = np.around(wcs[idx,0], decimals=0)
df_sampled = pd.DataFrame()
df_sampled[depth] = depth_frame
df_sampled[time] = time_frame
dens_twt = pd.DataFrame()
vp_twt = pd.DataFrame()
vs_twt = pd.DataFrame()
dens_twt[[time,depth]] = df_sampled[[time,depth]]
vp_twt[[time,depth]] = df_sampled[[time,depth]]
vs_twt[[time,depth]] = df_sampled[[time,depth]]
for i, row in dens_twt.iterrows():
if i > 0:
dens_ = density[(density[depth] >= dens_twt.loc[i-1, depth]) & (density[depth] < dens_twt.loc[i, depth])]
vp_ = vp[(vp[depth] >= vp_twt.loc[i-1, depth]) & (vp[depth] <= vp_twt.loc[i, depth])]
vs_ = vs[(vs[depth] >= vs_twt.loc[i-1, depth]) & (vs[depth] <= vs_twt.loc[i, depth])]
for j in range(len(pseudo_wells.columns)-3):
dens_twt.at[i, j] = dens_.mean()[j]
dens_twt.at[i, zone_idx] = dens_.min()[zone_idx]
dens_twt.at[i, zone] = list(zones.keys())[int(dens_.min()[zone_idx])]
vp_twt.loc[i, j] = vp_.mean()[j]
vp_twt.loc[i, zone_idx] = vp_.min()[zone_idx]
vp_twt.loc[i, zone] = list(zones.keys())[int(vp_.min()[zone_idx])]
vs_twt.loc[i, j] = vs_.mean()[j]
vs_twt.loc[i, zone_idx] = vs_.min()[zone_idx]
vs_twt.at[i, zone] = list(zones.keys())[int(vs_.min()[zone_idx])]
dens_twt.loc[0,:] = dens_twt.loc[1,:]
vp_twt.loc[0,:] = vp_twt.loc[1,:]
vs_twt.loc[0,:] = vs_twt.loc[1,:]
return df_sampled, dens_twt, vp_twt, vs_twt
def shuey(df_sampled, vp_twt, vs_twt, dens_twt, no_wells, angles, time='Time', depth='Depth'):
r0_twt = pd.DataFrame()
G_twt = pd.DataFrame()
F_twt = pd.DataFrame()
r0_twt[[time,depth]] = df_sampled[[time,depth]]
G_twt[[time,depth]] = df_sampled[[time,depth]]
F_twt[[time,depth]] = df_sampled[[time,depth]]
for i, row in df_sampled.iterrows():
if i > 0:
for j in range(no_wells):
dens_ = (dens_twt.loc[i,j] + dens_twt.loc[i-1,j]) / 2
vp_ = (vp_twt.loc[i,j] + vp_twt.loc[i-1,j]) / 2
vs_ = (vs_twt.loc[i,j] + vs_twt.loc[i-1,j]) / 2
dens_term = (dens_twt.loc[i,j] - dens_twt.loc[i-1,j]) / dens_
vp_term = (vp_twt.loc[i,j] - vp_twt.loc[i-1,j]) / vp_
vs_term = (vs_twt.loc[i,j] - vs_twt.loc[i-1,j]) / vs_
r0_twt.loc[i, j] = 0.5 * (vp_term + dens_term)
G_twt.loc[i,j] = 0.5 * vp_term - 2 * (vp_twt.loc[i,j]/vs_twt.loc[i,j])**2 * (dens_term + 2 * vs_term)
F_twt.loc[i,j] = 0.5 * vp_term
r0_twt.loc[0,:] = r0_twt.loc[1,:]
G_twt.loc[0,:] = G_twt.loc[1,:]
F_twt.loc[0,:] = F_twt.loc[1,:]
reflectivity = np.zeros((len(r0_twt), len(angles), no_wells))
for i in range(1, len(r0_twt)-1):
for j in range(len(angles)):
for k in range(no_wells):
reflectivity[i-1,j,k] = r0_twt.loc[i,k] + G_twt.loc[i,k] * np.sin(np.radians(angles[j]))**2
reflectivity[i-1,j,k] += F_twt.loc[i,k]*(np.tan(np.radians(angles[j]))**2 - np.sin(np.radians(angles[j]))**2)
return r0_twt, G_twt, F_twt, reflectivity
def ricker(f, length=128, dt=2):
length = length / 1000
dt = dt / 1000
t = np.arange(-length/2, (length-dt)/2, dt)
y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2))
return 1000*t, y
def seis_convolve(reflectivity, wavelet, model, angles=None, time='Time', depth='Depth', zone_idx='Zone_idx', zone='Zone'):
amplitude = 0 * reflectivity
for j in range(reflectivity.shape[1]):
for k in range(reflectivity.shape[2]):
amplitude[:, j, k] = np.convolve(reflectivity[:,j,k], wavelet, mode='same')
stack = pd.DataFrame()
stack[[depth, time, zone, zone_idx]] = model[[depth, time, zone, zone_idx]]
nears = pd.DataFrame()
nears[[depth, time, zone, zone_idx]] = model[[depth, time, zone, zone_idx]]
fars = pd.DataFrame()
fars[[depth, time, zone, zone_idx]] = model[[depth, time, zone, zone_idx]]
for k in range(len(model.columns)-3):
stack[k] = 0
nears[k] = 0
fars[k] = 0
if angles is not None:
for k in range(reflectivity.shape[2]):
for j in range(reflectivity.shape[1]):
stack[k] = stack[k] + amplitude[:,j,k]
if angles[j] <= 10:
nears[k] = nears[k] + amplitude[:,j,k]
elif angles[j] >= 20:
fars[k] = fars[k] + amplitude[:,j,k]
return amplitude, stack, nears, fars
def create_wedge(tmin, tmax, sr, no_traces, top, pad, thick, wedge_dens, wedge_vp, wedge_vs, outer_dens, outer_vp, outer_vs, start, stop, wavelet):
time_log = np.arange(tmin, tmax, sr)
wedge = pd.DataFrame(np.zeros((len(time_log), no_traces)))
wedge['TWT'] = time_log
for idx, row in wedge.iterrows():
if row.TWT > top and row.TWT < top+thick:
for well in range(no_traces):
if well > pad*no_traces:
if row.TWT <= top + thick * (well - pad*no_traces) / ((1-pad)*no_traces):
wedge.loc[idx, well] = 1
avg_dens = 1/2 * (wedge_dens + outer_dens)
avg_vp = 1/2 * (wedge_vp + outer_vp)
avg_vs = 1/2 * (wedge_vs + outer_vs)
dens_top = (wedge_dens - outer_dens) / avg_dens
vp_top = (wedge_vp - outer_vp) / avg_vp
vs_top = (wedge_vs - outer_vs) / avg_vs
r0_top = 0.5 * (vp_top + dens_top)
g_top = 0.5 * vp_top - 2 * (wedge_vp/wedge_vs)**2 * (dens_top + 2 * vs_top)
f_top = 0.5 * vp_top
dens_base = -dens_top
vp_base = -vp_top
vs_base = -vs_top
r0_base = 0.5 * (vp_base + dens_base)
g_base = 0.5 * vp_base - 2 * (outer_vp/outer_vs)**2 * (dens_base + 2 * vs_base)
f_base = 0.5 * vp_base
angles = np.arange(start, stop)
reflectivity = np.zeros((len(wedge), len(angles), no_traces))
amplitude = 0 * reflectivity
wedge_stk = np.zeros((len(wedge), no_traces))
for k in range(no_traces):
if k> pad*no_traces:
top_idx = wedge.loc[wedge[k]==1,k].index[0]
base_idx = wedge.loc[wedge[k]==1,k].index[-1]
for j in range(len(angles)):
theta = np.radians(angles[j])
reflectivity[top_idx,j,k] = r0_top + g_top * np.sin(theta)**2 + f_top*(np.tan(theta)**2 - np.sin(theta)**2)
reflectivity[base_idx,j,k] = r0_base + g_base * np.sin(theta)**2 + f_base*(np.tan(theta)**2 - np.sin(theta)**2)
amplitude[:, j, k] = np.convolve(reflectivity[:,j,k], wavelet, mode='same')
for j in range(len(angles)):
wedge_stk += amplitude[:, j, :]
return no_traces, wedge, amplitude, wedge_stk
def syn_well(zmin, zmax, sr, wcs_path, logs, wcs_names=['MD', 'TWT'], skiprows=0, skipna=-999.25):
depth_well = logs.copy(deep=True)
wcs = pd.read_csv(wcs_path, delim_whitespace=True, names=wcs_names, skiprows=skiprows, na_values=skipna)
idx1 = (np.abs(wcs['MD'] - zmin)).idxmin()
idx2 = (np.abs(wcs['MD'] - zmax)).idxmin()
time_frame = np.arange(np.around(wcs.loc[idx1,'TWT'], decimals=0), np.around(wcs.loc[idx2,'TWT'], decimals=0), sr)
time_well = pd.DataFrame()
time_well['TWT'] = time_frame
interp_func = interpolate.interp1d(wcs['TWT'].values, wcs['MD'].values)
time_well['MD'] = time_well.apply(lambda row: np.around(interp_func(row['TWT']), decimals=3), axis=1)
for col in ['Vp', 'Vs', 'RHOB']: time_well[col] = 0.0
for i, row in time_well.iterrows():
if i > 0:
log_slice = depth_well[(depth_well.MD >= time_well.loc[i-1, 'MD']) & (depth_well.MD < time_well.loc[i, 'MD'])]
time_well.loc[i, ['Vp', 'Vs', 'RHOB']] = log_slice[['Vp', 'Vs', 'RHOB']].mean().values
time_well.loc[0, ['Vp', 'Vs', 'RHOB']] = time_well.loc[1, ['Vp', 'Vs', 'RHOB']]
time_well['VpVs'] = time_well['Vp'] / time_well['Vs']
time_well['PImp'] = time_well['Vp'] * time_well['RHOB']
for col in ['R0', 'G', 'F']: time_well[col] = 0.0
for i, row in time_well.iterrows():
if i > 0:
dens_ = (time_well.loc[i,'RHOB'] + time_well.loc[i-1,'RHOB']) / 2
vp_ = (time_well.loc[i,'Vp'] + time_well.loc[i-1,'Vp']) / 2
vs_ = (time_well.loc[i,'Vs'] + time_well.loc[i-1,'Vs']) / 2
dens_term = (time_well.loc[i,'RHOB'] - time_well.loc[i-1,'RHOB']) / dens_
vp_term = (time_well.loc[i,'Vp'] - time_well.loc[i-1,'Vp']) / vp_
vs_term = (time_well.loc[i,'Vs'] - time_well.loc[i-1,'Vs']) / vs_
time_well.loc[i, 'R0'] = 0.5 * (vp_term + dens_term)
time_well.loc[i,'G'] = 0.5 * vp_term - 2 * (time_well.loc[i,'Vp']/time_well.loc[i,'Vs'])**2 * (dens_term + 2 * vs_term)
time_well.loc[i,'F'] = 0.5 * vp_term
time_well.loc[0, ['R0', 'G', 'F']] = time_well.loc[1, ['R0', 'G', 'F']]
| [
"numpy.radians",
"numpy.abs",
"numpy.convolve",
"numpy.ones",
"pandas.read_csv",
"numpy.tan",
"numpy.sin",
"numpy.asarray",
"scipy.interpolate.interp1d",
"numpy.exp",
"pandas.merge_asof",
"numpy.around",
"pandas.DataFrame",
"numpy.loadtxt",
"numpy.arange"
] | [((230, 255), 'numpy.arange', 'np.arange', (['zmin', 'zmax', 'sr'], {}), '(zmin, zmax, sr)\n', (239, 255), True, 'import numpy as np\n'), ((378, 392), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (390, 392), True, 'import pandas as pd\n'), ((507, 554), 'pandas.merge_asof', 'pd.merge_asof', (['pseudo_wells', 'zones_df'], {'on': 'depth'}), '(pseudo_wells, zones_df, on=depth)\n', (520, 554), True, 'import pandas as pd\n'), ((3279, 3314), 'numpy.loadtxt', 'np.loadtxt', (['wcs_file'], {'skiprows': 'skip'}), '(wcs_file, skiprows=skip)\n', (3289, 3314), True, 'import numpy as np\n'), ((3810, 3824), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3822, 3824), True, 'import pandas as pd\n'), ((3911, 3925), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3923, 3925), True, 'import pandas as pd\n'), ((3940, 3954), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3952, 3954), True, 'import pandas as pd\n'), ((3968, 3982), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3980, 3982), True, 'import pandas as pd\n'), ((5447, 5461), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5459, 5461), True, 'import pandas as pd\n'), ((5474, 5488), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5486, 5488), True, 'import pandas as pd\n'), ((5501, 5515), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5513, 5515), True, 'import pandas as pd\n'), ((7102, 7147), 'numpy.arange', 'np.arange', (['(-length / 2)', '((length - dt) / 2)', 'dt'], {}), '(-length / 2, (length - dt) / 2, dt)\n', (7111, 7147), True, 'import numpy as np\n'), ((7611, 7625), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7623, 7625), True, 'import pandas as pd\n'), ((7718, 7732), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7730, 7732), True, 'import pandas as pd\n'), ((7824, 7838), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7836, 7838), True, 'import pandas as pd\n'), ((8607, 8632), 'numpy.arange', 'np.arange', (['tmin', 'tmax', 'sr'], {}), '(tmin, tmax, sr)\n', (8616, 8632), True, 'import numpy as np\n'), ((9698, 9720), 'numpy.arange', 'np.arange', (['start', 'stop'], {}), '(start, stop)\n', (9707, 9720), True, 'import numpy as np\n'), ((10769, 10872), 'pandas.read_csv', 'pd.read_csv', (['wcs_path'], {'delim_whitespace': '(True)', 'names': 'wcs_names', 'skiprows': 'skiprows', 'na_values': 'skipna'}), '(wcs_path, delim_whitespace=True, names=wcs_names, skiprows=\n skiprows, na_values=skipna)\n', (10780, 10872), True, 'import pandas as pd\n'), ((11100, 11114), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11112, 11114), True, 'import pandas as pd\n'), ((11168, 11225), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["wcs['TWT'].values", "wcs['MD'].values"], {}), "(wcs['TWT'].values, wcs['MD'].values)\n", (11188, 11225), False, 'from scipy import interpolate\n'), ((3507, 3542), 'numpy.around', 'np.around', (['wcs[idx1, 1]'], {'decimals': '(0)'}), '(wcs[idx1, 1], decimals=0)\n', (3516, 3542), True, 'import numpy as np\n'), ((3543, 3578), 'numpy.around', 'np.around', (['wcs[idx2, 1]'], {'decimals': '(0)'}), '(wcs[idx2, 1], decimals=0)\n', (3552, 3578), True, 'import numpy as np\n'), ((3754, 3788), 'numpy.around', 'np.around', (['wcs[idx, 0]'], {'decimals': '(0)'}), '(wcs[idx, 0], decimals=0)\n', (3763, 3788), True, 'import numpy as np\n'), ((7191, 7228), 'numpy.exp', 'np.exp', (['(-np.pi ** 2 * f ** 2 * t ** 2)'], {}), '(-np.pi ** 2 * f ** 2 * t ** 2)\n', (7197, 7228), True, 'import numpy as np\n'), ((10992, 11035), 'numpy.around', 'np.around', (["wcs.loc[idx1, 'TWT']"], {'decimals': '(0)'}), "(wcs.loc[idx1, 'TWT'], decimals=0)\n", (11001, 11035), True, 'import numpy as np\n'), ((11036, 11079), 'numpy.around', 'np.around', (["wcs.loc[idx2, 'TWT']"], {'decimals': '(0)'}), "(wcs.loc[idx2, 'TWT'], decimals=0)\n", (11045, 11079), True, 'import numpy as np\n'), ((7539, 7595), 'numpy.convolve', 'np.convolve', (['reflectivity[:, j, k]', 'wavelet'], {'mode': '"""same"""'}), "(reflectivity[:, j, k], wavelet, mode='same')\n", (7550, 7595), True, 'import numpy as np\n'), ((10880, 10904), 'numpy.abs', 'np.abs', (["(wcs['MD'] - zmin)"], {}), "(wcs['MD'] - zmin)\n", (10886, 10904), True, 'import numpy as np\n'), ((10929, 10953), 'numpy.abs', 'np.abs', (["(wcs['MD'] - zmax)"], {}), "(wcs['MD'] - zmax)\n", (10935, 10953), True, 'import numpy as np\n'), ((10120, 10141), 'numpy.radians', 'np.radians', (['angles[j]'], {}), '(angles[j])\n', (10130, 10141), True, 'import numpy as np\n'), ((10431, 10487), 'numpy.convolve', 'np.convolve', (['reflectivity[:, j, k]', 'wavelet'], {'mode': '"""same"""'}), "(reflectivity[:, j, k], wavelet, mode='same')\n", (10442, 10487), True, 'import numpy as np\n'), ((1458, 1471), 'numpy.ones', 'np.ones', (['ones'], {}), '(ones)\n', (1465, 1471), True, 'import numpy as np\n'), ((3334, 3355), 'numpy.asarray', 'np.asarray', (['wcs[:, 0]'], {}), '(wcs[:, 0])\n', (3344, 3355), True, 'import numpy as np\n'), ((3415, 3436), 'numpy.asarray', 'np.asarray', (['wcs[:, 0]'], {}), '(wcs[:, 0])\n', (3425, 3436), True, 'import numpy as np\n'), ((3680, 3701), 'numpy.asarray', 'np.asarray', (['wcs[:, 1]'], {}), '(wcs[:, 1])\n', (3690, 3701), True, 'import numpy as np\n'), ((6797, 6818), 'numpy.radians', 'np.radians', (['angles[j]'], {}), '(angles[j])\n', (6807, 6818), True, 'import numpy as np\n'), ((6888, 6909), 'numpy.radians', 'np.radians', (['angles[j]'], {}), '(angles[j])\n', (6898, 6909), True, 'import numpy as np\n'), ((6923, 6944), 'numpy.radians', 'np.radians', (['angles[j]'], {}), '(angles[j])\n', (6933, 6944), True, 'import numpy as np\n'), ((10203, 10216), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10209, 10216), True, 'import numpy as np\n'), ((10229, 10242), 'numpy.tan', 'np.tan', (['theta'], {}), '(theta)\n', (10235, 10242), True, 'import numpy as np\n'), ((10248, 10261), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10254, 10261), True, 'import numpy as np\n'), ((10330, 10343), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10336, 10343), True, 'import numpy as np\n'), ((10357, 10370), 'numpy.tan', 'np.tan', (['theta'], {}), '(theta)\n', (10363, 10370), True, 'import numpy as np\n'), ((10376, 10389), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10382, 10389), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# ===========================================================================
# Copyright 2016-2017 TrungNT
# ===========================================================================
from __future__ import print_function, division, absolute_import
import os
import sys
import inspect
import marshal
import warnings
from array import array
from six.moves import builtins
from collections import OrderedDict, defaultdict
from collections import MutableMapping, Mapping
from functools import wraps, partial
from six import string_types
from six.moves import zip, zip_longest, cPickle
import types
import numpy as np
__all__ = [
'typecheck',
'autoattr',
'abstractstatic',
'functionable',
'singleton'
]
# ===========================================================================
# Type enforcement
# ===========================================================================
def _info(fname, expected, actual, flag):
'''Convenience function outputs nicely formatted error/warning msg.'''
def to_str(t):
s = []
for i in t:
if not isinstance(i, (tuple, list)):
s.append(str(i).split("'")[1])
else:
s.append('(' + ', '.join([str(j).split("'")[1] for j in i]) + ')')
return ', '.join(s)
expected, actual = to_str(expected), to_str(actual)
ftype = 'method'
msg = "'{}' {} ".format(fname, ftype) \
+ ("inputs", "outputs")[flag] + " ({}), but ".format(expected) \
+ ("was given", "result is")[flag] + " ({})".format(actual)
return msg
def _compares_types(argtype, force_types):
# True if types is satisfied the force_types
for i, j in zip(argtype, force_types):
if isinstance(j, (tuple, list)):
if i not in j:
return False
elif i != j:
return False
return True
def typecheck(inputs=None, outputs=None, debug=2):
'''Function/Method decorator. Checks decorated function's arguments are
of the expected types.
Parameters
----------
inputs : types
The expected types of the inputs to the decorated function.
Must specify type for each parameter.
outputs : types
The expected type of the decorated function's return value.
Must specify type for each parameter.
debug : int, str
Optional specification of 'debug' level:
0:'ignore', 1:'warn', 2:'raise'
Examples
--------
>>> # Function typecheck
>>> @typecheck(inputs=(int, str, float), outputs=(str))
>>> def function(a, b, c):
... return b
>>> function(1, '1', 1.) # no error
>>> function(1, '1', 1) # error, final argument must be float
...
>>> # method typecheck
>>> class ClassName(object):
... @typecheck(inputs=(str, int), outputs=int)
... def method(self, a, b):
... return b
>>> x = ClassName()
>>> x.method('1', 1) # no error
>>> x.method(1, '1') # error
'''
if inspect.ismethod(inputs) or inspect.isfunction(inputs):
raise ValueError('You must specify either [inputs] types or [outputs]'
' types arguments.')
# ====== parse debug ====== #
if isinstance(debug, str):
debug_str = debug.lower()
if 'raise' in debug_str:
debug = 2
elif 'warn' in debug_str:
debug = 1
else:
debug = 0
elif debug not in (0, 1, 2):
debug = 2
# ====== check types ====== #
if inputs is not None and not isinstance(inputs, (tuple, list)):
inputs = (inputs,)
if outputs is not None and not isinstance(outputs, (tuple, list)):
outputs = (outputs,)
def wrap_function(func):
# ====== fetch arguments order ====== #
sign = inspect.signature(func)
args_name = []
args_defaults = OrderedDict()
for n, p in sign.parameters.items():
if p.kind in (inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD):
continue
args_name.append(n)
if p.default != inspect.Parameter.empty:
args_defaults[n] = p.default
@wraps(func)
def wrapper(*args, **kwargs):
input_args = list(args)
excluded = {i: j for i, j in zip(args_name, input_args)}
# check default kwargs
for i, j in args_defaults.items():
if i in excluded: # already input as positional argument
continue
if i in kwargs: # specified value
input_args.append(kwargs[i])
else: # default value
input_args.append(j)
### main logic
if debug is 0: # ignore
return func(*args, **kwargs)
### Check inputs
if inputs is not None:
# main logic
length = int(min(len(input_args), len(inputs)))
argtypes = tuple(map(type, input_args))
# TODO: smarter way to check argtypes for methods
if not _compares_types(argtypes[:length], inputs[:length]) and\
not _compares_types(argtypes[1:length + 1], inputs[:length]): # wrong types
msg = _info(func.__name__, inputs, argtypes, 0)
if debug is 1:
print('TypeWarning:', msg)
elif debug is 2:
raise TypeError(msg)
### get results
results = func(*args, **kwargs)
### Check outputs
if outputs is not None:
res_types = ((type(results),)
if not isinstance(results, (tuple, list))
else tuple(map(type, results)))
length = min(len(res_types), len(outputs))
if len(outputs) > len(res_types) or \
not _compares_types(res_types[:length], outputs[:length]):
msg = _info(func.__name__, outputs, res_types, 1)
if debug is 1:
print('TypeWarning: ', msg)
elif debug is 2:
raise TypeError(msg)
### finally everything ok
return results
return wrapper
return wrap_function
# ===========================================================================
# Auto set attributes
# ===========================================================================
def autoattr(*args, **kwargs):
'''
Example
-------
>>> class ClassName(object):
..... def __init__(self):
......... super(ClassName, self).__init__()
......... self.arg1 = 1
......... self.arg2 = False
...... @autoattr('arg1', arg1=lambda x: x + 1)
...... def test1(self):
......... print(self.arg1)
...... @autoattr('arg2')
...... def test2(self):
......... print(self.arg2)
>>> c = ClassName()
>>> c.test1() # arg1 = 2
>>> c.test2() # arg2 = True
'''
if len(args) > 0 and (inspect.ismethod(args[0]) or inspect.isfunction(args[0])):
raise ValueError('You must specify at least 1 *args or **kwargs, all '
'attributes in *args will be setted to True, likewise, '
'all attributes in **kwargs will be setted to given '
'value.')
attrs = {i: True for i in args}
attrs.update(kwargs)
def wrap_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
results = func(*args, **kwargs)
if len(args) > 0:
for i, j in attrs.items():
if hasattr(args[0], i):
if hasattr(j, '__call__'):
setattr(args[0], str(i), j(getattr(args[0], i)))
else:
setattr(args[0], str(i), j)
return results
return wrapper
return wrap_function
# ===========================================================================
# Abstract static
# ===========================================================================
class abstractstatic(staticmethod):
__slots__ = ()
def __init__(self, function):
super(abstractstatic, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
# ===========================================================================
# Python utilities
# ===========================================================================
_primitives = (bool, int, float, str,
tuple, list, dict, type, types.ModuleType, types.FunctionType,
type(None), type(type), np.ndarray)
def func_to_str(func):
# conver to byte
code = cPickle.dumps(array("B", marshal.dumps(func.__code__)),
protocol=cPickle.HIGHEST_PROTOCOL)
closure = None
if func.__closure__ is not None:
print("[WARNING] function: %s contains closure, which cannot be "
"serialized." % str(func))
closure = tuple([c.cell_contents for c in func.__closure__])
defaults = func.__defaults__
return (code, closure, defaults)
def str_to_func(s, sandbox=None):
if isinstance(s, (tuple, list)):
code, closure, defaults = s
elif isinstance(s, string_types): # path to file
if os.path.isfile(s):
with open(s, 'rb') as f:
code, closure, defaults = cPickle.load(f)
else: # pickled string
code, closure, defaults = cPickle.loads(s)
else:
raise ValueError("Unsupport str_to_func for type:%s" % type(s))
code = marshal.loads(cPickle.loads(code).tobytes())
func = types.FunctionType(code=code, name=code.co_name,
globals=sandbox if isinstance(sandbox, Mapping) else globals(),
closure=closure, argdefs=defaults)
return func
def _serialize_function_sandbox(function, source):
'''environment, dictionary (e.g. globals(), locals())
Parameters
----------
source : str
source code of the function
Returns
-------
dictionary : cPickle dumps-able dictionary to store as text
'''
import re
sys_module = re.compile(r"__\w+__")
environment = function.__globals__
func_module = function.__module__
sandbox = OrderedDict()
# ====== serialize primitive type ====== #
seen_main_function = False
for name, val in environment.items():
typ = None
# ignore system modules
if sys_module.match(name) is not None:
continue
# support primitive type
if builtins.any(isinstance(val, i) for i in _primitives):
typ = type(val)
if isinstance(val, np.ndarray):
val = (val.tostring(), val.dtype)
typ = 'ndarray'
# special case: import module
elif isinstance(val, types.ModuleType):
val = val.__name__
typ = 'module'
# edward distribution
elif isinstance(val, type) and str(val.__module__) == 'abc' and \
str(type(val).__module__) == "tensorflow.contrib.distributions.python.ops.distribution":
val = val.__name__
typ = 'edward_distribution'
# the FunctionType itself cannot be pickled (weird!)
elif val is types.FunctionType:
val = None
typ = 'function_type'
# for some reason, pickle cannot serialize None type
elif val is None:
val = None
typ = 'None'
elif isinstance(val, Mapping):
val = cPickle.dumps(val, protocol=cPickle.HIGHEST_PROTOCOL)
typ = 'Mapping'
elif inspect.isfunction(val): # special case: function
# function might nested, so cannot find it in globals()
if val == function:
seen_main_function = True
# imported function
_ = '_main' if function == val else ''
if val.__module__ != func_module:
typ = 'imported_function'
val = (val.__name__, val.__module__)
# defined function in the same script file
else:
typ = 'defined_function'
val = func_to_str(val)
typ += _
# finally add to sandbox valid type
if typ is not None:
sandbox[name] = (typ, val)
# ====== not seen the main function ====== #
if not seen_main_function: # mark the main function with "_main"
sandbox['random_name_1234'] = ('defined_function_main',
func_to_str(function))
return sandbox
def _deserialize_function_sandbox(sandbox):
'''
environment : dictionary
create by `serialize_sandbox`
'''
import marshal
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=ImportWarning)
import importlib
environment = {}
defined_function = []
main_func = None
# first pass we deserialize all type except function type
for name, (typ, val) in sandbox.items():
if isinstance(typ, string_types):
if typ == 'None':
val = None
elif typ == 'edward_distribution':
try:
import edward
val = getattr(edward.models, val)
except ImportError:
raise ImportError("Cannot import 'edward' library to deserialize "
"the function.")
# exec("from edward.models import %s as %s" % (val, name))
elif typ == 'function_type':
val = types.FunctionType
elif typ == 'Mapping':
val = cPickle.loads(val)
elif typ == 'ndarray':
val = np.fromstring(val[0], dtype=val[1])
elif typ == 'module':
val = importlib.import_module(val)
elif 'imported_function' == typ:
val = getattr(importlib.import_module(val[1]), val[0])
if '_main' in typ: main_func = val
elif 'defined_function' in typ:
val = str_to_func(val, globals())
if '_main' in typ: main_func = val
defined_function.append(name)
elif builtins.any(isinstance(typ, i) for i in _primitives):
pass
else:
raise ValueError('Unsupport deserializing type: {}, '
'value: {}'.format(typ, val))
environment[name] = val
# ====== create all defined function ====== #
# second pass, function all funciton and set it globales to new environment
for name in defined_function:
func = environment[name]
func.__globals__.update(environment)
return main_func, environment
class _ArgPlaceHolder_(object):
pass
class functionable(object):
""" Class handles save and load a function with its arguments
This function does perfectly for following cases:
- Pickling `lambda` function without external dependencies.
- Pickling top-level function.
- Pickling imported function.
Parameters
----------
func: function
lambda or function
arg: list
stored arguments list for given function
kwargs: dict
stored keyword arguments for given function
Note
----
- Please use this function with care, only primitive variables
are stored in pickling the function.
- Avoid involving closure in creating function (because closure cannot
be serialized with any mean), for example:
Example
-------
>>> # Wrong way:
>>> lambda: obj.y
>>> # Good way (explicitly store the obj in default arguments):
>>> lambda x=obj: x.y
"""
def __init__(self, func, *args, **kwargs):
super(functionable, self).__init__()
self._function = func
self.__name__ = self._function.__name__
try: # sometime cannot get the source
self._source = inspect.getsource(self._function)
except Exception as e:
print("[WARNING] Cannot get source code of function:", func,
"(error:%s)" % str(e))
self._source = None
# try to pickle the function directly
try:
self._sandbox = cPickle.dumps(self._function,
protocol=cPickle.HIGHEST_PROTOCOL)
except Exception:
self._sandbox = _serialize_function_sandbox(func, self._source)
# ====== store argsmap ====== #
sign = inspect.signature(func)
argsmap = OrderedDict()
for i, (n, p) in enumerate(sign.parameters.items()):
if p.kind in (inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD):
continue
if i < len(args):
argsmap[n] = args[i]
elif n in kwargs:
argsmap[n] = kwargs[n]
elif p.default != inspect.Parameter.empty:
argsmap[n] = p.default
else:
argsmap[n] = _ArgPlaceHolder_()
self._argsmap = argsmap
# ==================== Pickling methods ==================== #
def __getstate__(self):
# conver to byte
return (self._sandbox,
self._source,
self._argsmap)
def __setstate__(self, states):
(self._sandbox,
self._source,
self._argsmap) = states
# ====== deserialize the function ====== #
if isinstance(self._sandbox, string_types):
self._function = cPickle.loads(self._sandbox)
else:
self._function, sandbox = _deserialize_function_sandbox(self._sandbox)
if self._function is None:
raise RuntimeError('[funtionable] Cannot find function in sandbox.')
# ==================== properties ==================== #
@property
def function(self):
return self._function
@property
def name(self):
return self._function.__name__
@property
def source(self):
return self._source
@property
def sandbox(self):
return self._sandbox
# ==================== methods ==================== #
def __call__(self, *args, **kwargs):
final_args = self._argsmap.copy()
for i, j in zip(final_args.keys(), args):
final_args[i] = j
final_args.update(kwargs)
final_args = {i: j for i, j in final_args.items()
if not isinstance(j, _ArgPlaceHolder_)}
return self._function(**final_args)
def __str__(self):
s = 'Name: %s\n' % self._function.__name__
s += 'kwargs: %s\n' % str(self._argsmap)
if isinstance(self._sandbox, string_types):
s += 'Sandbox: pickle-able\n'
else:
s += 'Sandbox:%s\n' % str(len(self._sandbox))
s += str(self._source)
return s[:-1]
def __eq__(self, other):
if self._function == other._function and \
self._argsmap == other._argsmap:
return True
return False
# ==================== update kwargs ==================== #
def __setitem__(self, key, value):
if not isinstance(key, (str, int, float)):
raise ValueError('Only accept string for kwargs key or int for '
'index of args, but type(key)={}'.format(type(key)))
if isinstance(key, str):
if key in self._argsmap:
self._argsmap[key] = value
else:
key = int(key)
if key < len(self._argsmap):
key = self._argsmap.keys()[key]
self._argsmap[key] = value
def __getitem__(self, key):
if not isinstance(key, (str, int, float)):
raise ValueError('Only accept string for kwargs key or int for '
'index of args, but type(key)={}'.format(type(key)))
if isinstance(key, str):
return self._argsmap[key]
return self._argsmap(int(key))
# ===========================================================================
# Singleton metaclass
# ===========================================================================
def singleton(cls):
''' Singleton for class instance, all __init__ with same arguments return
same instance
@NOTE: this is copy from six.add_metaclass
'''
if not isinstance(cls, type):
raise Exception('singleton decorator only accept class (type).')
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return Singleton(cls.__name__, cls.__bases__, orig_vars)
class Singleton(type):
# class_type -> [(arguments, instance), ...]
_INSTANCES = defaultdict(list)
@staticmethod
def _dispose(self):
clz = self.__class__
Singleton._INSTANCES[clz] = [(args, obj)
for args, obj in Singleton._INSTANCES[clz]
if obj != self]
def __new__(mcs, name, bases, class_dict):
if '_get_id' not in class_dict:
raise ValueError("Instance of Singleton must define classmethod "
"'_get_id', this method takes the same arguments as __init__ "
"and return the unique identity for an instance.")
return super().__new__(mcs, name, bases, class_dict)
def __call__(cls, *args, **kwargs):
obj_id = cls._get_id(*args, **kwargs)
# check defined instance
instances_list = Singleton._INSTANCES[cls]
for arguments, obj in instances_list:
if arguments == obj_id:
return obj
# Create new instance
obj = super(Singleton, cls).__call__(*args, **kwargs)
instances_list.append((obj_id, obj))
setattr(obj, '__del__',
types.MethodType(Singleton._dispose, obj))
return obj
| [
"re.compile",
"inspect.signature",
"types.MethodType",
"inspect.ismethod",
"six.moves.cPickle.load",
"functools.wraps",
"numpy.fromstring",
"six.moves.zip",
"collections.OrderedDict",
"importlib.import_module",
"os.path.isfile",
"inspect.isfunction",
"warnings.filterwarnings",
"inspect.get... | [((1650, 1675), 'six.moves.zip', 'zip', (['argtype', 'force_types'], {}), '(argtype, force_types)\n', (1653, 1675), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((9425, 9447), 're.compile', 're.compile', (['"""__\\\\w+__"""'], {}), "('__\\\\w+__')\n", (9435, 9447), False, 'import re\n'), ((9534, 9547), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9545, 9547), False, 'from collections import OrderedDict, defaultdict\n'), ((19187, 19204), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (19198, 19204), False, 'from collections import OrderedDict, defaultdict\n'), ((2873, 2897), 'inspect.ismethod', 'inspect.ismethod', (['inputs'], {}), '(inputs)\n', (2889, 2897), False, 'import inspect\n'), ((2901, 2927), 'inspect.isfunction', 'inspect.isfunction', (['inputs'], {}), '(inputs)\n', (2919, 2927), False, 'import inspect\n'), ((3598, 3621), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (3615, 3621), False, 'import inspect\n'), ((3661, 3674), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3672, 3674), False, 'from collections import OrderedDict, defaultdict\n'), ((3955, 3966), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (3960, 3966), False, 'from functools import wraps, partial\n'), ((6873, 6884), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (6878, 6884), False, 'from functools import wraps, partial\n'), ((11798, 11823), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (11821, 11823), False, 'import warnings\n'), ((11829, 11893), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'ImportWarning'}), "(action='ignore', category=ImportWarning)\n", (11852, 11893), False, 'import warnings\n'), ((15187, 15210), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (15204, 15210), False, 'import inspect\n'), ((15225, 15238), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15236, 15238), False, 'from collections import OrderedDict, defaultdict\n'), ((6465, 6490), 'inspect.ismethod', 'inspect.ismethod', (['args[0]'], {}), '(args[0])\n', (6481, 6490), False, 'import inspect\n'), ((6494, 6521), 'inspect.isfunction', 'inspect.isfunction', (['args[0]'], {}), '(args[0])\n', (6512, 6521), False, 'import inspect\n'), ((8080, 8108), 'marshal.dumps', 'marshal.dumps', (['func.__code__'], {}), '(func.__code__)\n', (8093, 8108), False, 'import marshal\n'), ((8620, 8637), 'os.path.isfile', 'os.path.isfile', (['s'], {}), '(s)\n', (8634, 8637), False, 'import os\n'), ((14711, 14744), 'inspect.getsource', 'inspect.getsource', (['self._function'], {}), '(self._function)\n', (14728, 14744), False, 'import inspect\n'), ((14973, 15037), 'six.moves.cPickle.dumps', 'cPickle.dumps', (['self._function'], {'protocol': 'cPickle.HIGHEST_PROTOCOL'}), '(self._function, protocol=cPickle.HIGHEST_PROTOCOL)\n', (14986, 15037), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((16101, 16129), 'six.moves.cPickle.loads', 'cPickle.loads', (['self._sandbox'], {}), '(self._sandbox)\n', (16114, 16129), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((20197, 20238), 'types.MethodType', 'types.MethodType', (['Singleton._dispose', 'obj'], {}), '(Singleton._dispose, obj)\n', (20213, 20238), False, 'import types\n'), ((8779, 8795), 'six.moves.cPickle.loads', 'cPickle.loads', (['s'], {}), '(s)\n', (8792, 8795), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((8895, 8914), 'six.moves.cPickle.loads', 'cPickle.loads', (['code'], {}), '(code)\n', (8908, 8914), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((4066, 4092), 'six.moves.zip', 'zip', (['args_name', 'input_args'], {}), '(args_name, input_args)\n', (4069, 4092), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((8704, 8719), 'six.moves.cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (8716, 8719), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((12612, 12630), 'six.moves.cPickle.loads', 'cPickle.loads', (['val'], {}), '(val)\n', (12625, 12630), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((12674, 12709), 'numpy.fromstring', 'np.fromstring', (['val[0]'], {'dtype': 'val[1]'}), '(val[0], dtype=val[1])\n', (12687, 12709), True, 'import numpy as np\n'), ((10690, 10743), 'six.moves.cPickle.dumps', 'cPickle.dumps', (['val'], {'protocol': 'cPickle.HIGHEST_PROTOCOL'}), '(val, protocol=cPickle.HIGHEST_PROTOCOL)\n', (10703, 10743), False, 'from six.moves import zip, zip_longest, cPickle\n'), ((10779, 10802), 'inspect.isfunction', 'inspect.isfunction', (['val'], {}), '(val)\n', (10797, 10802), False, 'import inspect\n'), ((12752, 12780), 'importlib.import_module', 'importlib.import_module', (['val'], {}), '(val)\n', (12775, 12780), False, 'import importlib\n'), ((12842, 12873), 'importlib.import_module', 'importlib.import_module', (['val[1]'], {}), '(val[1])\n', (12865, 12873), False, 'import importlib\n')] |
import math
import keras
import matplotlib.pyplot as plt
import numpy as np
import hmm
class HMMDataGenerator(keras.utils.Sequence):
def __init__(self, real_hmm, epoch_size, batch_size, seq_len):
super().__init__()
self._real_hmm = real_hmm
self._epoch_size = epoch_size
self._batch_size = batch_size
self._seq_len = seq_len
self._symbols = "".join(self._real_hmm.y)
self._init_tokenizer()
def _init_tokenizer(self):
self._tokenizer = keras.preprocessing.text.Tokenizer(char_level=True)
self._tokenizer.fit_on_texts(self._symbols)
def __len__(self):
# Returns num. batches per epoch
return self._epoch_size
def __getitem__(self, index):
# Generate rand_hmm
rand_hmm = hmm.random_hmm(
x=self._real_hmm.x, y=self._symbols, s=self._real_hmm.s
)
return self.create_batch(rand_hmm)
def create_batch(self, other_hmm):
# Returns a whole batch
# 50% of batch real (from real_hmm), 50% fake (from rand_hmm)???
# New random HMM generated for each each???
# Batch size then equal to number of samples from each HMM
num_other_samples = math.ceil(0.5 * self._batch_size)
num_real_samples = math.floor(0.5 * self._batch_size)
# Sample 0.5 x batch_size sequences from other_hmm
other_samples = [
other_hmm.simulate(self._seq_len, reset_before=True)[1]
for _ in range(num_other_samples)
]
other_samples = ["".join(s) for s in other_samples]
other_labels = np.zeros(num_other_samples)
# Sample 0.5 x batch_size sequences from real_hmm
real_samples = [
self._real_hmm.simulate(self._seq_len, reset_before=True)[1]
for _ in range(num_real_samples)
]
real_samples = ["".join(s) for s in real_samples]
real_labels = np.ones(num_real_samples)
# One-hot encode both sequences
other_samples_enc = self._encode_hmm_outputs(other_samples)
real_samples_enc = self._encode_hmm_outputs(real_samples)
# Concatenate the sequences and labels from both HMMs
X = np.concatenate((other_samples_enc, real_samples_enc))
y = np.concatenate((other_labels, real_labels))
# Shuffle the samples
p = np.random.permutation(self._batch_size)
X = X[p]
y = y[p]
return X, y
def _encode_hmm_outputs(self, hmm_outputs):
tokens = self._tokenizer.texts_to_sequences(hmm_outputs)
tokens = np.array(tokens) - 1
onehot = keras.utils.to_categorical(tokens, num_classes=len(self._symbols))
return onehot
def input_shape(self):
return (self._seq_len, len(self._symbols))
def plot_model(model, to_file):
return keras.utils.plot_model(model, to_file=to_file, show_shapes=True)
def plot_acc(history, to_file=None, val=False):
plt.plot(history.history["accuracy"])
if val:
plt.plot(history.history["val_accuracy"])
plt.title("Model Accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
if val:
plt.legend(["Train", "Val"], loc="center right")
else:
plt.legend(["Train"], loc="center right")
if to_file:
plt.savefig(to_file)
plt.show()
def plot_loss(history, to_file=None, val=False):
plt.plot(history.history["loss"])
if val:
plt.plot(history.history["val_loss"])
plt.title("Model Loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
if val:
plt.legend(["Train", "Val"], loc="center right")
else:
plt.legend(["Train"], loc="center right")
if to_file:
plt.savefig(to_file)
plt.show()
def callbacks(model_name):
model_checkpoint_cb = keras.callbacks.ModelCheckpoint(
f"models/weights-{model_name}.h5",
monitor="loss",
save_best_only=True,
save_weights_only=True,
)
return [model_checkpoint_cb]
| [
"keras.preprocessing.text.Tokenizer",
"math.ceil",
"keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.ylabel",
"math.floor",
"numpy.ones",
"keras.utils.plot_model",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.random.permutation",
"numpy.array",
"numpy.zeros",
"numpy.conca... | [((2840, 2904), 'keras.utils.plot_model', 'keras.utils.plot_model', (['model'], {'to_file': 'to_file', 'show_shapes': '(True)'}), '(model, to_file=to_file, show_shapes=True)\n', (2862, 2904), False, 'import keras\n'), ((2959, 2996), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (2967, 2996), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3091), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Accuracy"""'], {}), "('Model Accuracy')\n", (3073, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3118), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (3106, 3118), True, 'import matplotlib.pyplot as plt\n'), ((3123, 3142), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (3133, 3142), True, 'import matplotlib.pyplot as plt\n'), ((3324, 3334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3332, 3334), True, 'import matplotlib.pyplot as plt\n'), ((3390, 3423), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (3398, 3423), True, 'import matplotlib.pyplot as plt\n'), ((3487, 3510), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Loss"""'], {}), "('Model Loss')\n", (3496, 3510), True, 'import matplotlib.pyplot as plt\n'), ((3515, 3533), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3525, 3533), True, 'import matplotlib.pyplot as plt\n'), ((3538, 3557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (3548, 3557), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3749), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3747, 3749), True, 'import matplotlib.pyplot as plt\n'), ((3805, 3937), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['f"""models/weights-{model_name}.h5"""'], {'monitor': '"""loss"""', 'save_best_only': '(True)', 'save_weights_only': '(True)'}), "(f'models/weights-{model_name}.h5', monitor=\n 'loss', save_best_only=True, save_weights_only=True)\n", (3836, 3937), False, 'import keras\n'), ((513, 564), 'keras.preprocessing.text.Tokenizer', 'keras.preprocessing.text.Tokenizer', ([], {'char_level': '(True)'}), '(char_level=True)\n', (547, 564), False, 'import keras\n'), ((796, 867), 'hmm.random_hmm', 'hmm.random_hmm', ([], {'x': 'self._real_hmm.x', 'y': 'self._symbols', 's': 'self._real_hmm.s'}), '(x=self._real_hmm.x, y=self._symbols, s=self._real_hmm.s)\n', (810, 867), False, 'import hmm\n'), ((1225, 1258), 'math.ceil', 'math.ceil', (['(0.5 * self._batch_size)'], {}), '(0.5 * self._batch_size)\n', (1234, 1258), False, 'import math\n'), ((1286, 1320), 'math.floor', 'math.floor', (['(0.5 * self._batch_size)'], {}), '(0.5 * self._batch_size)\n', (1296, 1320), False, 'import math\n'), ((1614, 1641), 'numpy.zeros', 'np.zeros', (['num_other_samples'], {}), '(num_other_samples)\n', (1622, 1641), True, 'import numpy as np\n'), ((1934, 1959), 'numpy.ones', 'np.ones', (['num_real_samples'], {}), '(num_real_samples)\n', (1941, 1959), True, 'import numpy as np\n'), ((2210, 2263), 'numpy.concatenate', 'np.concatenate', (['(other_samples_enc, real_samples_enc)'], {}), '((other_samples_enc, real_samples_enc))\n', (2224, 2263), True, 'import numpy as np\n'), ((2276, 2319), 'numpy.concatenate', 'np.concatenate', (['(other_labels, real_labels)'], {}), '((other_labels, real_labels))\n', (2290, 2319), True, 'import numpy as np\n'), ((2363, 2402), 'numpy.random.permutation', 'np.random.permutation', (['self._batch_size'], {}), '(self._batch_size)\n', (2384, 2402), True, 'import numpy as np\n'), ((3017, 3058), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (3025, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3164, 3212), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Val']"], {'loc': '"""center right"""'}), "(['Train', 'Val'], loc='center right')\n", (3174, 3212), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3272), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train']"], {'loc': '"""center right"""'}), "(['Train'], loc='center right')\n", (3241, 3272), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3318), 'matplotlib.pyplot.savefig', 'plt.savefig', (['to_file'], {}), '(to_file)\n', (3309, 3318), True, 'import matplotlib.pyplot as plt\n'), ((3444, 3481), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (3452, 3481), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3627), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Val']"], {'loc': '"""center right"""'}), "(['Train', 'Val'], loc='center right')\n", (3589, 3627), True, 'import matplotlib.pyplot as plt\n'), ((3646, 3687), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train']"], {'loc': '"""center right"""'}), "(['Train'], loc='center right')\n", (3656, 3687), True, 'import matplotlib.pyplot as plt\n'), ((3713, 3733), 'matplotlib.pyplot.savefig', 'plt.savefig', (['to_file'], {}), '(to_file)\n', (3724, 3733), True, 'import matplotlib.pyplot as plt\n'), ((2589, 2605), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (2597, 2605), True, 'import numpy as np\n')] |
#coding=utf-8
import os
import cv2
import math
import lmdb
import random
import numpy as np
import mxnet as mx
from mxnet.gluon.data import Dataset
from . import normalize_fn
__all__ = ['FixSizeDataset', 'BucketDataset']
class FixSizeDataset(Dataset):
def __init__(self, line_path, voc_path, augment_fn=None, short_side=32, min_divisor=8,
fix_width=256, max_len=60, start_sym=None, end_sym=None):
self.short_side = short_side
self.fix_width = fix_width
self.max_len = max_len
self.min_divisor = min_divisor
self.add_symbol = False if start_sym==None or end_sym==None else True
self.start_sym = start_sym
self.end_sym = end_sym
self.pad_sym = -1 if end_sym == None else end_sym
self.augment_fn = augment_fn
self.word2id = self._load_voc_dict(voc_path)
self.word_list = list(self.word2id.keys())
self.imgs_list, self.labs_list = self._get_items(line_path)
def _load_voc_dict(self, dict_path):
word2id_dict = {}
if self.add_symbol:
word2id_dict = {'<s>':self.start_sym, '</s>':self.end_sym}
with open(dict_path, 'r', encoding='utf-8') as fi:
line_list = fi.readlines()
idx = len(word2id_dict)
for line in line_list:
word = line.strip('\n')[0]
word2id_dict[word] = idx
idx = idx + 1
return word2id_dict
@property
def voc_size(self):
return len(self.word_list)
def _get_items(self, line_path):
imgs_list = []
labs_list = []
if not isinstance(line_path, list):
line_path = [line_path]
for path in line_path:
with open(path, 'r', encoding='utf-8') as fi:
for i, line in enumerate(fi):
lst = line.strip().split('\t')
if len(lst) < 2:
continue
img_path = lst[0]
label = lst[1]
if not os.path.exists(img_path):
continue
if label == '':
continue
if label == '###' or len(label)>self.max_len - self.add_symbol:
continue
imgs_list.append(img_path)
labs_list.append(label)
return imgs_list, labs_list
def __len__(self):
return len(self.imgs_list)
def text2ids(self, text, text_len):
ids = mx.nd.ones(shape=(text_len), dtype='float32')*self.pad_sym
ids_mask = mx.nd.zeros(shape=(text_len), dtype='float32')
char_list = list(text)
if self.add_symbol:
char_list.append('</s>')
for i, ch in enumerate(char_list):
if ch in self.word_list:
ids[i] = self.word2id[ch]
else:
continue
ids_mask[i] = 1.0
return ids, ids_mask
def ctc_ids2text(self, ids, blank):
if isinstance(ids, np.ndarray):
ids = ids.tolist()
n = len(ids)
words = []
for i in range(n):
if ids[i]!=blank and (not (i>0 and ids[i-1]==ids[i])):
words.append(self.word_list[i])
text = ''.join(words)
return text
def att_ids2text(self, ids):
if isinstance(ids, np.ndarray):
ids = ids.tolist()
text_list = []
for i in ids:
int_i = int(i)
if int_i == self.end_sym:
break
text_list.append(self.word_list[int_i])
return text_list
def image_resize(self, img_np, max_width=512):
h, w = img_np.shape[:2]
if h > w:
img_np = np.rot90(img_np)
h, w = w, h
w = int(math.ceil(w*self.short_side/h/self.min_divisor))*self.min_divisor
if w> max_width:
w = max_width
img_np = cv2.resize(img_np, (w, self.short_side))
return img_np
def __getitem__(self, idx):
img_path = self.imgs_list[idx]
text = self.labs_list[idx]
img_np = cv2.imread(img_path)
if img_np is None:
return self.__getitem__(idx-1)
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
img_np = self.image_resize(img_np, max_width=self.fix_width)
img_np = np.stack([img_np, img_np, img_np], axis=2)
if self.augment_fn is not None:
img_np = self.augment_fn(img_np)
h, w = img_np.shape[:2]
img_nd = mx.nd.array(img_np)
img_nd = normalize_fn(img_nd)
img_data = mx.nd.zeros((3, self.short_side, self.fix_width), dtype='float32')
img_data[:, :h, :w] = img_nd
img_mask = mx.nd.zeros((1, self.short_side, self.fix_width), dtype='float32')
img_mask[:, :h, :w] = 1.0
lab, lab_mask = self.text2ids(text, self.max_len)
if not self.add_symbol:
return img_data, img_mask, lab, lab_mask, idx
targ_data = mx.nd.ones(shape=(self.max_len), dtype='float32')*self.end_sym
targ_data[0] = self.start_sym
targ_data[1:] = lab[:-1]
return img_data, img_mask, targ_data, lab, lab_mask, idx
class BucketDataset(FixSizeDataset):
def __init__(self, line_path, voc_path, augment_fn=None, short_side=32,
fix_width=None, max_len=60, start_sym=None, end_sym=None,
split_width_len=128, split_text_len=10, min_divisor=8):
super(BucketDataset, self).__init__(line_path, voc_path, augment_fn=augment_fn, min_divisor=min_divisor,
short_side=short_side, fix_width=None, max_len=max_len,
start_sym=start_sym, end_sym=end_sym)
self.split_width_len = split_width_len
self.split_text_len = split_text_len
self.max_width = split_width_len*8
self.gen_bucket()
def _get_bucket_key(self, img_shape, text_len):
h, w = img_shape[:2]
text_ratio = math.ceil((text_len+1)/self.split_text_len)
text_len = self.split_text_len*text_ratio
if h > w:
w, h = img_shape[:2]
if w/h > self.max_width/self.short_side:
return (self.short_side, self.max_width, text_len)
ratio = math.ceil(self.short_side * w / h / self.split_width_len)
return (self.short_side, self.split_width_len * ratio, text_len)
def gen_bucket(self):
bucket_keys, bucket_dict = [], {}
for idx in range(len(self.imgs_list)):
img_np = cv2.imread(self.imgs_list[idx])
text = self.labs_list[idx]
if img_np is None:
continue
if len(text) > self.max_len:
continue
bucket_key = self._get_bucket_key(img_np.shape, len(text))
bucket_key = str(bucket_key)
if bucket_key not in bucket_keys:
bucket_keys.append(bucket_key)
bucket_dict[bucket_key] = []
bucket_dict[bucket_key].append(idx)
for key in bucket_keys:
print('bucket key:', key, 'the number of image:', len(bucket_dict[key]))
self.bucket_dict = bucket_dict
self.bucket_keys = bucket_keys
def __getitem__(self, idx):
img_path = self.imgs_list[idx]
text = self.labs_list[idx]
img_np = cv2.imread(img_path)
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
img_np = np.stack([img_np, img_np, img_np], axis=2)
inp_h, inp_w, lab_len = self._get_bucket_key(img_np.shape, len(text))
img_data = mx.nd.zeros(shape=(3, inp_h, inp_w), dtype='float32')
img_mask = mx.nd.zeros(shape=(1, inp_h, inp_w), dtype='float32')
img_np = self.image_resize(img_np, max_width=self.max_width)
h, w = img_np.shape[:2]
if self.augment_fn is not None:
img_np = self.augment_fn(img_np)
img_nd = mx.nd.array(img_np)
img_nd = normalize_fn(img_nd)
img_data[:, :h, :w] = img_nd
img_mask[:, :h, :w] = 1.0
lab, lab_mask = self.text2ids(text, lab_len)
if not self.add_symbol:
return img_data, img_mask, lab, lab_mask, idx
targ_data = mx.nd.ones(shape=(lab_len), dtype='float32')*self.end_sym
targ_data[0] = self.start_sym
targ_data[1:] = lab[:-1]
return img_data, img_mask, targ_data, lab, lab_mask, idx
class LMDBFixSizeDataset(FixSizeDataset):
def __init__(self, line_path, voc_path, augment_fn=None, short_side=32, min_divisor=8,
fix_width=256, max_len=60, start_sym=None, end_sym=None):
super(LMDBFixSizeDataset, self).__init__(line_path, voc_path, augment_fn=None,
short_side=32, min_divisor=8,
fix_width=256, max_len=60,
start_sym=None, end_sym=None)
self.lmdb_set = self.imgs_list
def __len__(self):
return self.lmdb_set['num_samples']
def _get_items(self, line_path):
env = lmdb.open(line_path, readonly=True, lock=False, meminit=False)
txn = env.begin(write=False)
num_samples = int(txn.get('num-samples'.encode()))
lmdb_set = {"env":env, "txn":txn, "num_samples":num_samples}
return lmdb_set, txn
@staticmethod
def get_img_data(value):
"""get_img_data"""
if not value:
return None
imgdata = np.frombuffer(value, dtype='uint8')
if imgdata is None:
return None
imgori = cv2.imdecode(imgdata, 1)
if imgori is None:
return None
return imgori
def __getitem__(self, idx):
label_key = 'label-%09d'.encode() % idx
image_key = 'image-%09d'.encode() % idx
text = self.lmdb_set['txn'].get(label_key).decode('utf-8')
img_buf = self.lmdb_set['txn'].get(image_key)
img_np = self.get_img_data(img_buf)
if img_np is None:
return self.__getitem__(idx-1)
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
img_np = self.image_resize(img_np, max_width=self.fix_width)
if self.augment_fn is not None:
img_np = self.augment_fn(img_np)
h, w = img_np.shape[:2]
img_nd = mx.nd.array(img_np)
img_data = mx.nd.zeros((self.short_side, self.fix_width, 3), dtype='float32')
img_data[:h, :w, :] = img_nd
img_data = normalize_fn(img_data)
img_mask = mx.nd.zeros((1, self.short_side, self.fix_width), dtype='float32')
img_mask[:, :h, :w] = 1.0
lab, lab_mask = self.text2ids(text, self.max_len)
if not self.add_symbol:
return img_data, img_mask, lab, lab_mask, idx
targ_data = mx.nd.ones(shape=(self.max_len), dtype='float32')*self.end_sym
targ_data[0] = self.start_sym
targ_data[1:] = lab[:-1]
return img_data, img_mask, targ_data, lab, lab_mask, idx
class Sampler(object):
def __init__(self, idx_list):
self.idx_list = idx_list
def __iter__(self):
return iter(self.idx_list)
def __len__(self):
return len(self.idx_list)
def __getitem__(self, item):
return self.idx_list[item]
class BucketSampler(object):
'''
last_batch : {'keep', 'discard'}
Specifies how the last batch is handled if batch_size does not evenly
divide sequence length.
If 'keep', the last batch will be returned directly, but will contain
less element than `batch_size` requires.
If 'discard', the last batch will be discarded.
'''
def __init__(self, batch_size, bucket_dict, shuffle=True, last_batch='discard'):
bucket_keys = list(bucket_dict.keys())
self._batch_size = batch_size
self._last_batch = last_batch
self.shuffle = shuffle
self.sampler_list = []
for key in bucket_keys:
self.sampler_list.append(Sampler(bucket_dict[key]))
def __iter__(self):
if self.shuffle:
for sampler in self.sampler_list:
random.shuffle(sampler.idx_list)
random.shuffle(self.sampler_list)
num_sampler = len(self.sampler_list)
sampler_idx_list = list(range(num_sampler))
start_idx_list = [0] * num_sampler
while True:
if sampler_idx_list == []:
break
samp_idx = random.sample(sampler_idx_list, 1)[0]
_sampler = self.sampler_list[samp_idx]
start_idx = start_idx_list[samp_idx]
batch = []
while True:
if len(batch) == self._batch_size:
start_idx_list[samp_idx] = start_idx
break
if start_idx < len(_sampler):
batch.append(_sampler[start_idx])
start_idx = start_idx + 1
else:
sampler_idx_list.remove(samp_idx)
if self._last_batch == 'discard':
batch = []
break
if batch:
yield batch
def __len__(self):
num = 0
for _sampler in self.sampler_list:
if self._last_batch == 'keep':
#num += (len(_sampler) + self._batch_size - 1) // self._batch_size
num += math.ceil(len(_sampler/self._batch_size))
elif self._last_batch == 'discard':
num += len(_sampler) // self._batch_size
else:
raise ValueError(
"last_batch must be one of 'keep', 'discard', or 'rollover', " \
"but got %s" % self._last_batch)
return num | [
"os.path.exists",
"random.sample",
"math.ceil",
"random.shuffle",
"mxnet.nd.zeros",
"mxnet.nd.ones",
"numpy.stack",
"lmdb.open",
"cv2.imdecode",
"cv2.cvtColor",
"numpy.rot90",
"mxnet.nd.array",
"numpy.frombuffer",
"cv2.resize",
"cv2.imread"
] | [((2632, 2676), 'mxnet.nd.zeros', 'mx.nd.zeros', ([], {'shape': 'text_len', 'dtype': '"""float32"""'}), "(shape=text_len, dtype='float32')\n", (2643, 2676), True, 'import mxnet as mx\n'), ((3974, 4014), 'cv2.resize', 'cv2.resize', (['img_np', '(w, self.short_side)'], {}), '(img_np, (w, self.short_side))\n', (3984, 4014), False, 'import cv2\n'), ((4167, 4187), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (4177, 4187), False, 'import cv2\n'), ((4286, 4326), 'cv2.cvtColor', 'cv2.cvtColor', (['img_np', 'cv2.COLOR_BGR2GRAY'], {}), '(img_np, cv2.COLOR_BGR2GRAY)\n', (4298, 4326), False, 'import cv2\n'), ((4417, 4459), 'numpy.stack', 'np.stack', (['[img_np, img_np, img_np]'], {'axis': '(2)'}), '([img_np, img_np, img_np], axis=2)\n', (4425, 4459), True, 'import numpy as np\n'), ((4596, 4615), 'mxnet.nd.array', 'mx.nd.array', (['img_np'], {}), '(img_np)\n', (4607, 4615), True, 'import mxnet as mx\n'), ((4675, 4741), 'mxnet.nd.zeros', 'mx.nd.zeros', (['(3, self.short_side, self.fix_width)'], {'dtype': '"""float32"""'}), "((3, self.short_side, self.fix_width), dtype='float32')\n", (4686, 4741), True, 'import mxnet as mx\n'), ((4798, 4864), 'mxnet.nd.zeros', 'mx.nd.zeros', (['(1, self.short_side, self.fix_width)'], {'dtype': '"""float32"""'}), "((1, self.short_side, self.fix_width), dtype='float32')\n", (4809, 4864), True, 'import mxnet as mx\n'), ((6108, 6155), 'math.ceil', 'math.ceil', (['((text_len + 1) / self.split_text_len)'], {}), '((text_len + 1) / self.split_text_len)\n', (6117, 6155), False, 'import math\n'), ((6381, 6438), 'math.ceil', 'math.ceil', (['(self.short_side * w / h / self.split_width_len)'], {}), '(self.short_side * w / h / self.split_width_len)\n', (6390, 6438), False, 'import math\n'), ((7464, 7484), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (7474, 7484), False, 'import cv2\n'), ((7502, 7542), 'cv2.cvtColor', 'cv2.cvtColor', (['img_np', 'cv2.COLOR_BGR2GRAY'], {}), '(img_np, cv2.COLOR_BGR2GRAY)\n', (7514, 7542), False, 'import cv2\n'), ((7560, 7602), 'numpy.stack', 'np.stack', (['[img_np, img_np, img_np]'], {'axis': '(2)'}), '([img_np, img_np, img_np], axis=2)\n', (7568, 7602), True, 'import numpy as np\n'), ((7700, 7753), 'mxnet.nd.zeros', 'mx.nd.zeros', ([], {'shape': '(3, inp_h, inp_w)', 'dtype': '"""float32"""'}), "(shape=(3, inp_h, inp_w), dtype='float32')\n", (7711, 7753), True, 'import mxnet as mx\n'), ((7773, 7826), 'mxnet.nd.zeros', 'mx.nd.zeros', ([], {'shape': '(1, inp_h, inp_w)', 'dtype': '"""float32"""'}), "(shape=(1, inp_h, inp_w), dtype='float32')\n", (7784, 7826), True, 'import mxnet as mx\n'), ((8039, 8058), 'mxnet.nd.array', 'mx.nd.array', (['img_np'], {}), '(img_np)\n', (8050, 8058), True, 'import mxnet as mx\n'), ((9217, 9279), 'lmdb.open', 'lmdb.open', (['line_path'], {'readonly': '(True)', 'lock': '(False)', 'meminit': '(False)'}), '(line_path, readonly=True, lock=False, meminit=False)\n', (9226, 9279), False, 'import lmdb\n'), ((9613, 9648), 'numpy.frombuffer', 'np.frombuffer', (['value'], {'dtype': '"""uint8"""'}), "(value, dtype='uint8')\n", (9626, 9648), True, 'import numpy as np\n'), ((9718, 9742), 'cv2.imdecode', 'cv2.imdecode', (['imgdata', '(1)'], {}), '(imgdata, 1)\n', (9730, 9742), False, 'import cv2\n'), ((10200, 10239), 'cv2.cvtColor', 'cv2.cvtColor', (['img_np', 'cv2.COLOR_BGR2RGB'], {}), '(img_np, cv2.COLOR_BGR2RGB)\n', (10212, 10239), False, 'import cv2\n'), ((10447, 10466), 'mxnet.nd.array', 'mx.nd.array', (['img_np'], {}), '(img_np)\n', (10458, 10466), True, 'import mxnet as mx\n'), ((10486, 10552), 'mxnet.nd.zeros', 'mx.nd.zeros', (['(self.short_side, self.fix_width, 3)'], {'dtype': '"""float32"""'}), "((self.short_side, self.fix_width, 3), dtype='float32')\n", (10497, 10552), True, 'import mxnet as mx\n'), ((10651, 10717), 'mxnet.nd.zeros', 'mx.nd.zeros', (['(1, self.short_side, self.fix_width)'], {'dtype': '"""float32"""'}), "((1, self.short_side, self.fix_width), dtype='float32')\n", (10662, 10717), True, 'import mxnet as mx\n'), ((2553, 2596), 'mxnet.nd.ones', 'mx.nd.ones', ([], {'shape': 'text_len', 'dtype': '"""float32"""'}), "(shape=text_len, dtype='float32')\n", (2563, 2596), True, 'import mxnet as mx\n'), ((3783, 3799), 'numpy.rot90', 'np.rot90', (['img_np'], {}), '(img_np)\n', (3791, 3799), True, 'import numpy as np\n'), ((5068, 5115), 'mxnet.nd.ones', 'mx.nd.ones', ([], {'shape': 'self.max_len', 'dtype': '"""float32"""'}), "(shape=self.max_len, dtype='float32')\n", (5078, 5115), True, 'import mxnet as mx\n'), ((6653, 6684), 'cv2.imread', 'cv2.imread', (['self.imgs_list[idx]'], {}), '(self.imgs_list[idx])\n', (6663, 6684), False, 'import cv2\n'), ((8332, 8374), 'mxnet.nd.ones', 'mx.nd.ones', ([], {'shape': 'lab_len', 'dtype': '"""float32"""'}), "(shape=lab_len, dtype='float32')\n", (8342, 8374), True, 'import mxnet as mx\n'), ((10921, 10968), 'mxnet.nd.ones', 'mx.nd.ones', ([], {'shape': 'self.max_len', 'dtype': '"""float32"""'}), "(shape=self.max_len, dtype='float32')\n", (10931, 10968), True, 'import mxnet as mx\n'), ((12305, 12338), 'random.shuffle', 'random.shuffle', (['self.sampler_list'], {}), '(self.sampler_list)\n', (12319, 12338), False, 'import random\n'), ((3840, 3893), 'math.ceil', 'math.ceil', (['(w * self.short_side / h / self.min_divisor)'], {}), '(w * self.short_side / h / self.min_divisor)\n', (3849, 3893), False, 'import math\n'), ((12260, 12292), 'random.shuffle', 'random.shuffle', (['sampler.idx_list'], {}), '(sampler.idx_list)\n', (12274, 12292), False, 'import random\n'), ((12584, 12618), 'random.sample', 'random.sample', (['sampler_idx_list', '(1)'], {}), '(sampler_idx_list, 1)\n', (12597, 12618), False, 'import random\n'), ((2052, 2076), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (2066, 2076), False, 'import os\n')] |
# Instructions for developers:
#
# This is the main object of OceanSpy.
# All attributes are stored as global attributes (strings!) of the xr.Dataset.
# When users request an attribute, it is decoded from the global attributes.
# Thus, there are custom attribute setters (class setters are inhibited).
#
# There are private and public objects.
# Private objects use OceanSpy's reference aliases (_ds, _grid),
# while public objects are mirrors of the private objects using custom aliases.
#
# All functions in other modules that operate on od,
# must be added here in the shortcuts section.
#
# Add new attributes/methods in docs/api.rst
##############################################################################
# TODO: create list of OceanSpy name and add link under aliases.
# TODO: create a dictionary with parameters description and add under aliases.
# TODO: add more xgcm options. E.g., default boundary method.
# TODO: implement xgcm autogenerate in _set_coords,
# set_grid_coords, set_coords when released
# TODO: Use the coords parameter to create xgcm grid instead of
# _crate_grid.
# We will pass dictionary in xgcm.Grid,
# and we can have the option of usining comodo attributes
# (currently cleaned up so switched off)
##############################################################################
# Required dependencies (private)
import xarray as _xr
import copy as _copy
import numpy as _np
import warnings as _warnings
import sys as _sys
from collections import OrderedDict as _OrderedDict
# From OceanSpy (private)
from . import utils as _utils
from ._ospy_utils import (_check_instance, _check_oceanspy_axes,
_setter_error_message, _check_list_of_string,
_create_grid, _rename_coord_attrs)
from . subsample import _subsampleMethods
from . compute import _computeMethods
from . plot import _plotMethods
from . animate import _animateMethods
# Recommended dependencies (private)
try:
import cartopy.crs as _ccrs
except ImportError: # pragma: no cover
pass
try:
from scipy import spatial as _spatial
except ImportError: # pragma: no cover
pass
try:
from dask.diagnostics import ProgressBar as _ProgressBar
except ImportError: # pragma: no cover
pass
class OceanDataset:
"""
OceanDataset combines a :py:obj:`xarray.Dataset`
with other objects used by OceanSpy (e.g., xgcm.Grid).
Additional objects are attached to the
:py:obj:`xarray.Dataset` as global attributes.
"""
def __init__(self, dataset):
"""
Parameters
----------
dataset: xarray.Dataset
The multi-dimensional, in memory, array database.
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html
"""
# Check parameters
_check_instance({'dataset': dataset}, 'xarray.Dataset')
# Initialize dataset
self._ds = dataset.copy()
# Apply aliases
self = self._apply_aliases()
def __copy__(self):
"""
Shallow copy
"""
return OceanDataset(dataset=self.dataset.copy())
def __repr__(self):
main_info = ['<oceanspy.OceanDataset>']
main_info.append('\nMain attributes:')
main_info.append(" .dataset: %s" %
self.dataset.__repr__()
[self.dataset.__repr__().find('<'):
self.dataset.__repr__().find('>')+1])
if self.grid is not None:
main_info.append(" .grid: %s" %
self.grid.__repr__()
[self.grid.__repr__().find('<'):
self.grid.__repr__().find('>')+1])
if self.projection is not None:
main_info.append(" .projection: %s" %
self.projection.__repr__()
[self.projection.__repr__().find('<'):
self.projection.__repr__().find('>')+1])
more_info = ['\n\nMore attributes:']
if self.name:
more_info.append(" .name: %s" %
self.name)
if self.description:
more_info.append(" .description: %s" %
self.description)
more_info.append(" .parameters: %s" %
type(self.parameters))
if self.aliases:
more_info.append(" .aliases: %s" %
type(self.aliases))
if self.grid_coords:
more_info.append(" .grid_coords: %s" %
type(self.grid_coords))
if self.grid_periodic:
more_info.append(" .grid_periodic: %s" %
type(self.grid_periodic))
info = '\n'.join(main_info)
info = info + '\n'.join(more_info)
return info
# ===========
# ATTRIBUTES
# ===========
# -------------------
# name
# -------------------
@property
def name(self):
"""
Name of the OceanDataset.
"""
name = self._read_from_global_attr('name')
return name
@name.setter
def name(self, name):
"""
Inhibit setter.
"""
raise AttributeError(_setter_error_message('name'))
def set_name(self, name, overwrite=None):
"""
Set name of the OceanDataset.
Parameters
----------
name: str
Name of the OceanDataset.
overwrite: bool or None
If None, raises error if name has been previously set.
If True, overwrite previous name.
If False, combine with previous name.
"""
# Check parameters
_check_instance({'name': name}, 'str')
# Set name
self = self._store_as_global_attr(name='name',
attr=name,
overwrite=overwrite)
return self
# -------------------
# description
# -------------------
@property
def description(self):
"""
Description of the OceanDataset.
"""
description = self._read_from_global_attr('description')
return description
@description.setter
def description(self, description):
"""
Inhibit setter.
"""
raise AttributeError(_setter_error_message('description'))
def set_description(self, description, overwrite=None):
"""
Set description of the OceanDataset.
Parameters
----------
description: str
Desription of the OceanDataset
overwrite: bool or None
If None, raises error if description has been previously set.
If True, overwrite previous description.
If False, combine with previous description.
"""
# Check parameters
_check_instance({'description': description}, 'str')
# Set description
self = self._store_as_global_attr(name='description',
attr=description,
overwrite=overwrite)
return self
# -------------------
# aliases
# -------------------
@property
def aliases(self):
"""
A dictionary to connect custom variable names
to OceanSpy reference names.
Keys are OceanSpy reference names, values are custom names:
{'ospy_name': 'custom_name'}
"""
aliases = self._read_from_global_attr('aliases')
return aliases
@property
def _aliases_flipped(self):
"""
Flip aliases:
From {'ospy_name': 'custom_name'}
to {'custom_name': 'ospy_name'}
"""
if self.aliases:
aliases_flipped = {custom: ospy
for ospy, custom in self.aliases.items()}
else:
return self.aliases
return aliases_flipped
@aliases.setter
def aliases(self, aliases):
"""
Inhibit setter.
"""
raise AttributeError(_setter_error_message('aliases'))
def set_aliases(self, aliases, overwrite=None):
"""
Set aliases to connect custom variables names
to OceanSpy reference names.
Parameters
----------
aliases: dict
Keys are OceanSpy names, values are custom names:
{'ospy_name': 'custom_name'}
overwrite: bool or None
If None, raises error if aliases has been previously set.
If True, overwrite previous aliases.
If False, combine with previous aliases.
"""
# Check parameters
_check_instance({'aliases': aliases}, 'dict')
# Set aliases
self = self._store_as_global_attr(name='aliases',
attr=aliases,
overwrite=overwrite)
# Apply aliases
self = self._apply_aliases()
return self
def _apply_aliases(self):
"""
Check if there are variables with custom name in _ds,
and rename to OceanSpy reference name
"""
if self._aliases_flipped:
aliases = {custom: ospy
for custom, ospy in self._aliases_flipped.items()
if custom in self._ds.variables
or custom in self._ds.dims}
self._ds = self._ds.rename(aliases)
return self
# -------------------
# dataset
# -------------------
@property
def dataset(self):
"""
xarray.Dataset: A multi-dimensional, in memory, array database.
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html
"""
# Show _ds with renamed variables.
dataset = self._ds.copy()
if self.aliases:
aliases = {ospy: custom for ospy, custom in self.aliases.items()
if ospy in self._ds
or ospy in self._ds.dims}
dataset = dataset.rename(aliases)
return dataset
@dataset.setter
def dataset(self, dataset):
"""
Inhibit setter.
"""
raise AttributeError("Set a new dataset using "
"`oceanspy.OceanDataset(dataset)`")
# -------------------
# parameters
# -------------------
@property
def parameters(self):
"""
A dictionary defining model parameters that are used by OceanSpy.
Default values are used for parameters that have not been set
(see :py:const:`oceanspy.DEFAULT_PARAMETERS`).
"""
from oceanspy import DEFAULT_PARAMETERS
parameters = self._read_from_global_attr('parameters')
if parameters is None:
parameters = DEFAULT_PARAMETERS
else:
parameters = {**DEFAULT_PARAMETERS, **parameters}
return parameters
@parameters.setter
def parameters(self, parameters):
"""
Inhibit setter.
"""
raise AttributeError(_setter_error_message('parameters'))
def set_parameters(self, parameters):
"""
Set model parameters used by OceanSpy.
See :py:const:`oceanspy.DEFAULT_PARAMETERS` for a list of parameters,
and :py:const:`oceanspy.PARAMETERS_PARAMETERS_DESCRIPTION`
for their description.
See :py:const:`oceanspy.AVAILABLE_PARAMETERS` for a list of parameters
with predefined options.
Parameters
----------
parameters: dict
{'name': value}
"""
from oceanspy import (DEFAULT_PARAMETERS,
AVAILABLE_PARAMETERS,
TYPE_PARAMETERS)
# Check parameters
_check_instance({'parameters': parameters}, 'dict')
# Check parameters
warn_params = []
for key, value in parameters.items():
if key not in DEFAULT_PARAMETERS.keys():
warn_params = warn_params + [key]
else:
if not isinstance(value, TYPE_PARAMETERS[key]):
raise TypeError("Invalid [{}]. "
"Check oceanspy.TYPE_PARAMETERS"
"".format(key))
check1 = (key in AVAILABLE_PARAMETERS.keys())
if check1 and (value not in AVAILABLE_PARAMETERS[key]):
raise ValueError("Requested [{}] not available. "
"Check oceanspy.AVAILABLE_PARAMETERS"
"".format(key))
if len(warn_params) != 0:
_warnings.warn("{} are not OceanSpy parameters"
"".format(warn_params), stacklevel=2)
# Set parameters
self = self._store_as_global_attr(name='parameters',
attr=parameters,
overwrite=True)
return self
# -------------------
# grid_coords
# -------------------
@property
def grid_coords(self):
"""
Grid coordinates used by :py:obj:`xgcm.Grid`.
References
----------
https://xgcm.readthedocs.io/en/stable/grids.html#Grid-Metadata
"""
grid_coords = self._read_from_global_attr('grid_coords')
return grid_coords
@grid_coords.setter
def grid_coords(self, grid_coords):
"""
Inhibit setter.
"""
raise AttributeError(_setter_error_message('grid_coords'))
def set_grid_coords(self, grid_coords, add_midp=False, overwrite=None):
"""
Set grid coordinates used by :py:obj:`xgcm.Grid`.
Parameters
----------
grid_coords: str
Grid coordinates used by :py:obj:`xgcm.Grid`.
Keys are axes, and values are dict with
key=dim and value=c_grid_axis_shift.
Available c_grid_axis_shift are {0.5, None, -0.5}.
E.g., {'Y': {'Y': None, 'Yp1': 0.5}}
See :py:const:`oceanspy.OCEANSPY_AXES` for a list of axes
add_midp: bool
If true, add inner dimension (mid points)
to axes with outer dimension only.
The new dimension will be named
as the outer dimension + '_midp'
overwrite: bool or None
If None, raises error if grid_coords has been previously set.
If True, overwrite previous grid_coors.
If False, combine with previous grid_coors.
References
----------
https://xgcm.readthedocs.io/en/stable/grids.html#Grid-Metadata
"""
# Check parameters
_check_instance({'grid_coords': grid_coords,
'add_midp': add_midp},
{'grid_coords': 'dict',
'add_midp': 'bool'})
# Check axes
_check_oceanspy_axes(list(grid_coords.keys()))
# Set grid_coords
self = self._store_as_global_attr(name='grid_coords',
attr=grid_coords,
overwrite=overwrite)
if add_midp:
grid_coords = {}
for axis in self.grid_coords:
check1 = (len(self.grid_coords[axis]) == 1)
check2 = (list(self.grid_coords[axis].values())[0] is not None)
if check1 and check2:
# Deal with aliases
dim = list(self.grid_coords[axis].keys())[0]
if self._aliases_flipped and dim in self._aliases_flipped:
_dim = self._aliases_flipped[dim]
self = self.set_aliases({_dim+'_midp':
dim+'_midp'}, overwrite=False)
else:
_dim = dim
# Midpoints are averages of outpoints
midp = (self._ds[_dim].values[:-1] +
self._ds[_dim].diff(_dim)/2).rename({_dim:
_dim+'_midp'})
self._ds[_dim+'_midp'] = _xr.DataArray(midp,
dims=(_dim+'_midp'))
if 'units' in self._ds[_dim].attrs:
units = self._ds[_dim].attrs['units']
self._ds[_dim+'_midp'].attrs['units'] = units
if 'long_name' in self._ds[_dim].attrs:
long_name = self._ds[_dim].attrs['long_name']
long_name = 'Mid-points of {}'.format(long_name)
self._ds[_dim+'_midp'].attrs['long_name'] = long_name
if 'description' in self._ds[_dim].attrs:
desc = self._ds[_dim].attrs['description']
desc = 'Mid-points of {}'.format(desc)
self._ds[_dim+'_midp'].attrs['description'] = desc
grid_coords[axis] = {**self.grid_coords[axis],
dim+'_midp': None}
self = self._store_as_global_attr(name='grid_coords',
attr=grid_coords,
overwrite=False)
return self
# -------------------
# grid_periodic
# -------------------
@property
def grid_periodic(self):
"""
List of :py:obj:`xgcm.Grid` axes that are periodic.
"""
grid_periodic = self._read_from_global_attr('grid_periodic')
if not grid_periodic:
grid_periodic = []
return grid_periodic
@grid_periodic.setter
def grid_periodic(self, grid_periodic):
"""
Inhibit setter.
"""
raise AttributeError(_setter_error_message('grid_periodic'))
def set_grid_periodic(self, grid_periodic):
"""
Set grid axes that will be treated as periodic by :py:obj:`xgcm.Grid`.
Axes that are not set periodic are non-periodic by default.
Parameters
----------
grid_periodic: list
List of periodic axes.
See :py:const:`oceanspy.OCEANSPY_AXES` for a list of axes
"""
# Check parameters
_check_instance({'grid_periodic': grid_periodic}, 'list')
# Check axes
_check_oceanspy_axes(grid_periodic)
# Set grid_periodic
# Use overwrite True by default because
# xgcm default is all grid_priodic True.
self = self._store_as_global_attr(name='grid_periodic',
attr=grid_periodic,
overwrite=True)
return self
# -------------------
# grid
# -------------------
@property
def grid(self):
"""
:py:obj:`xgcm.Grid`: A collection of axes,
which is a group of coordinates that all lie
along the same physical dimension
but describe different positions relative to a grid cell.
References
----------
https://xgcm.readthedocs.io/en/stable/api.html#Grid
"""
dataset = self.dataset.copy()
coords = self.grid_coords
periodic = self.grid_periodic
grid = _create_grid(dataset, coords, periodic)
return grid
@property
def _grid(self):
"""
:py:obj:`xgcm.Grid` with OceanSpy reference names.
"""
aliases = self.aliases
coords = self.grid_coords
if aliases and coords:
# Flip aliases
aliases = {custom: ospy for ospy, custom in aliases.items()}
# Rename coords
for axis in coords:
for dim in coords[axis].copy():
if dim in aliases:
coords[axis][aliases[dim]] = coords[axis].pop(dim)
dataset = self._ds.copy()
periodic = self.grid_periodic
grid = _create_grid(dataset, coords, periodic)
return grid
@grid.setter
def grid(self, grid):
"""
Inhibit setter.
"""
raise AttributeError("Set a new grid using "
".set_grid_coords and .set_periodic")
@_grid.setter
def _grid(self, grid):
"""
Inhibit setter.
"""
raise AttributeError("Set a new _grid using "
".set_grid_coords and .set_periodic")
# -------------------
# projection
# -------------------
@property
def projection(self):
"""
Cartopy projection of the OceanDataset.
"""
projection = self._read_from_global_attr('projection')
if projection:
if projection == 'None':
projection = eval(projection)
else:
if 'cartopy' not in _sys.modules: # pragma: no cover
_warnings.warn("cartopy is not available,"
" so projection is None", stacklevel=2)
projection = None
else:
projection = eval('_ccrs.{}'.format(projection))
return projection
@projection.setter
def projection(self, projection):
"""
Inhibit setter.
"""
raise AttributeError(_setter_error_message('projection'))
def set_projection(self, projection, **kwargs):
"""
Set Cartopy projection of the OceanDataset.
Parameters
----------
projection: str or None
Cartopy projection of the OceanDataset.
Use None to remove projection.
**kwargs:
Keyword arguments for the projection.
E.g., central_longitude=0.0 for PlateCarree
References
----------
https://scitools.org.uk/cartopy/docs/latest/crs/projections.html
"""
# Check parameters
if projection is not None:
# Check
_check_instance({'projection': projection}, 'str')
if not hasattr(_ccrs, projection):
raise ValueError("{} is not a cartopy projection"
"".format(projection))
projection = '{}(**{})'.format(projection, kwargs)
else:
projection = str(projection)
# Set projection
self = self._store_as_global_attr(name='projection', attr=projection,
overwrite=True)
return self
# ===========
# METHODS
# ===========
def create_tree(self, grid_pos='C'):
"""
Create a scipy.spatial.cKDTree for quick nearest-neighbor lookup.
Parameters
----------
grid_pos: str
Grid position. Options: {'C', 'G', 'U', 'V'}
Returns
-------
tree: scipy.spatial.cKDTree
Return a xKDTree object that can be used to query a point.
References
----------
| cKDTree:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.html
| Grid:
https://mitgcm.readthedocs.io/en/latest/algorithm/horiz-grid.html
"""
# Check parameters
_check_instance({'grid_pos': grid_pos}, 'str')
grid_pos_list = ['C', 'G', 'U', 'V']
if grid_pos not in grid_pos_list:
raise ValueError("`grid_pos` must be one of {}:"
"\nhttps://mitgcm.readthedocs.io"
"/en/latest/algorithm/horiz-grid.html"
"".format(grid_pos_list))
# Convert if it's not cartesian
Y = self._ds['Y'+grid_pos]
X = self._ds['X'+grid_pos]
R = self.parameters['rSphere']
if R:
x, y, z = _utils.spherical2cartesian(Y=Y, X=X, R=R)
else:
x = X
y = Y
z = _xr.zeros_like(Y)
# Stack
x_stack = x.stack(points=x.dims).values
y_stack = y.stack(points=y.dims).values
z_stack = z.stack(points=z.dims).values
# Construct KD-tree
tree = _spatial.cKDTree(_np.column_stack((x_stack, y_stack, z_stack)))
return tree
def merge_into_oceandataset(self, obj, overwrite=False):
"""
Merge a Dataset or DataArray into the OceanDataset.
Parameters
----------
obj: xarray.DataArray or xarray.Dataset
object to merge.
overwrite: bool or None
If True, overwrite existing DataArrays with same name.
If False, use xarray.merge.
"""
# Check and make dataset
if not isinstance(obj, (_xr.DataArray, _xr.Dataset)):
raise TypeError('`obj` must be xarray.DataArray or xarray.Dataset')
_check_instance({'overwrite': overwrite}, 'bool')
# Check name
obj = obj.drop(obj.coords)
if isinstance(obj, _xr.DataArray):
if obj.name is None:
raise ValueError("xarray.DataArray doesn't have a name."
"Set it using da.rename()")
else:
obj = obj.to_dataset()
# Merge
dataset = self.dataset
var2drop = [var for var in obj.variables if var in dataset]
if overwrite is False:
obj = obj.drop(var2drop)
if len(var2drop) != 0:
_warnings.warn('{} will not be merged.'
'\nSet `overwrite=True` if you wish otherwise.'
''.format(var2drop), stacklevel=2)
else:
if len(var2drop) != 0:
_warnings.warn('{} will be overwritten.'
''.format(var2drop), stacklevel=2)
for var in obj.data_vars:
# Store dimension attributes that get lost
attrs = {}
for dim in obj[var].dims:
if dim not in dataset.dims:
pass
elif all([i == j
for i, j in zip(obj[dim].attrs.items(),
dataset[dim].attrs.items())]):
attrs[dim] = dataset[dim].attrs
# Merge
dataset[var] = obj[var]
# Add attributes
for dim, attr in attrs.items():
dataset[dim].attrs = attr
return OceanDataset(dataset)
def to_netcdf(self, path, **kwargs):
"""
Write contents to a netCDF file.
Parameters
----------
path: str
Path to which to save.
**kwargs:
Keyword arguments for :py:func:`xarray.Dataset.to_netcdf()`
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_netcdf.html
"""
# Check parameters
_check_instance({'path': path}, 'str')
# to_netcdf doesn't like coordinates attribute
dataset = _rename_coord_attrs(self.dataset)
# Compute
compute = kwargs.pop('compute', None)
print('Writing dataset to [{}].'.format(path))
if compute is None or compute is False:
delayed_obj = dataset.to_netcdf(path, compute=False, **kwargs)
with _ProgressBar():
delayed_obj.compute()
else:
dataset.to_netcdf(path, compute=compute, **kwargs)
def to_zarr(self, path, **kwargs):
"""
Write contents to a zarr group.
Parameters
----------
path: str
Path to which to save.
**kwargs:
Keyword arguments for :py:func:`xarray.Dataset.to_zarr()`
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_zarr.html
"""
# Check parameters
_check_instance({'path': path}, 'str')
# to_zarr doesn't like coordinates attribute
dataset = _rename_coord_attrs(self.dataset)
# Compute
compute = kwargs.pop('compute', None)
print('Writing dataset to [{}].'.format(path))
if compute is None or compute is False:
delayed_obj = dataset.to_zarr(path, compute=False, **kwargs)
with _ProgressBar():
delayed_obj.compute()
else:
dataset.to_zarr(path, compute=compute, **kwargs)
# ==================================
# IMPORT (used by open_oceandataset)
# ==================================
def shift_averages(self, averageList=None):
"""
Shift average variables to time_midp.
Average variables are defined as
variables with attribute [original_output='average'],
or variables in averageList.
Parameters
----------
averageList: 1D array_like, str, or None
List of variables (strings).
"""
if averageList is not None:
averageList = _check_list_of_string(averageList, 'averageList')
else:
averageList = []
for var in self._ds.data_vars:
original_output = self._ds[var].attrs.pop('original_output', None)
if original_output == 'average' or var in averageList:
ds_tmp = self._ds[var].drop('time').isel(time=slice(1, None))
self._ds[var] = ds_tmp.rename({'time': 'time_midp'})
if original_output is not None:
self._ds[var].attrs['original_output'] = original_output
return self
def manipulate_coords(self, fillna=False, coords1Dfrom2D=False,
coords2Dfrom1D=False, coordsUVfromG=False):
"""
Manipulate coordinates to make them compatible with OceanSpy.
Parameters
----------
fillna: bool
If True, fill NaNs in 2D coordinates
(e.g., NaNs are created by MITgcm exch2).
coords1Dfrom2D: bool
If True, infer 1D coordinates from 2D coordinates (mean of 2D).
Use with rectilinear grid only.
coords2Dfrom1D: bool
If True, infer 2D coordinates from 1D coordinates (brodacast 1D).
coordsUVfromCG: bool
If True, compute missing coords (U and V points) from G points.
References
----------
Grid:
https://mitgcm.readthedocs.io/en/latest/algorithm/horiz-grid.html
"""
# Copy because the dataset will change
self = _copy.copy(self)
# Coordinates are dimensions only
self._ds = self._ds.reset_coords()
# Fill nans (e.g., because of exch2)
if fillna:
coords = ['YC', 'XC', 'YG', 'XG', 'YU', 'XU', 'YV', 'XV']
dims = ['X', 'Y', 'Xp1', 'Yp1', 'Xp1', 'Y', 'X', 'Yp1']
for i, (coord, dim) in enumerate(zip(coords, dims)):
if coord in self._ds.variables:
ds_tmp = self._ds[coord].ffill(dim).bfill(dim).persist()
self._ds[coord] = ds_tmp
# Get U and V by rolling G
if coordsUVfromG:
for i, (point_pos, dim2roll) in enumerate(zip(['U', 'V'],
['Yp1', 'Xp1'])):
for dim in ['Y', 'X']:
coord = self._ds[dim+'G'].rolling(**{dim2roll: 2})
coord = coord.mean().dropna(dim2roll)
coord = coord.drop(coord.coords).rename({dim2roll:
dim2roll[0]})
self._ds[dim+point_pos] = coord
if 'units' in self._ds[dim+'G'].attrs:
units = self._ds[dim+'G'].attrs['units']
self._ds[dim+point_pos].attrs['units'] = units
# For cartesian grid we can use 1D coordinates
if coords1Dfrom2D:
# Take mean
self._ds['Y'] = self._ds['YC'].mean('X',
keep_attrs=True).persist()
self._ds['X'] = self._ds['XC'].mean('Y',
keep_attrs=True).persist()
self._ds['Yp1'] = self._ds['YG'].mean('Xp1',
keep_attrs=True).persist()
self._ds['Xp1'] = self._ds['XG'].mean('Yp1',
keep_attrs=True).persist()
# Get 2D coordinates broadcasting 1D
if coords2Dfrom1D:
# Broadcast
self._ds['YC'], self._ds['XC'] = _xr.broadcast(self._ds['Y'],
self._ds['X'])
self._ds['YG'], self._ds['XG'] = _xr.broadcast(self._ds['Yp1'],
self._ds['Xp1'])
self._ds['YU'], self._ds['XU'] = _xr.broadcast(self._ds['Y'],
self._ds['Xp1'])
self._ds['YV'], self._ds['XV'] = _xr.broadcast(self._ds['Yp1'],
self._ds['X'])
# Add units
dims2 = ['YC', 'XC', 'YG', 'XG',
'YU', 'XU', 'YV', 'XV']
dims1 = ['Y', 'X', 'Yp1', 'Xp1',
'Y', 'Xp1', 'Yp1', 'X']
for i, (D2, D1) in enumerate(zip(dims2, dims1)):
if 'units' in self._ds[D1].attrs:
self._ds[D2].attrs['units'] = self._ds[D1].attrs['units']
# Set 2D coordinates
self._ds = self._ds.set_coords(['YC', 'XC',
'YG', 'XG',
'YU', 'XU',
'YV', 'XV'])
# Attributes (use xmitgcm)
try:
from xmitgcm import variables
if self.parameters['rSphere'] is None:
coords = variables.horizontal_coordinates_cartesian
add_coords = _OrderedDict(
XU=dict(attrs=dict(standard_name="longitude_at_u_location",
long_name="longitude",
units="degrees_east",
coordinate="YU XU")),
YU=dict(attrs=dict(standard_name="latitude_at_u_location",
long_name="latitude",
units="degrees_north",
coordinate="YU XU")),
XV=dict(attrs=dict(standard_name="longitude_at_v_location",
long_name="longitude",
units="degrees_east",
coordinate="YV XV")),
YV=dict(attrs=dict(standard_name="latitude_at_v_location",
long_name="latitude",
units="degrees_north",
coordinate="YV XV")))
else:
coords = variables.horizontal_coordinates_spherical
add_coords = _OrderedDict(
XU=dict(attrs=dict(standard_name=("plane_x_coordinate"
"_at_u_location"),
long_name="x coordinate",
units="m",
coordinate="YU XU")),
YU=dict(attrs=dict(standard_name=("plane_y_coordinate"
"_at_u_location"),
long_name="y coordinate",
units="m",
coordinate="YU XU")),
XV=dict(attrs=dict(standard_name=("plane_x_coordinate"
"_at_v_location"),
long_name="x coordinate",
units="m",
coordinate="YV XV")),
YV=dict(attrs=dict(standard_name=("plane_y_coordinate"
"_at_v_location"),
long_name="y coordinate",
units="m",
coordinate="YV XV")))
coords = _OrderedDict(list(coords.items())
+ list(add_coords.items()))
for var in coords:
attrs = coords[var]['attrs']
for attr in attrs:
if attr not in self._ds[var].attrs:
self._ds[var].attrs[attr] = attrs[attr]
except ImportErrror: # pragma: no cover
pass
return self
# =====
# UTILS
# =====
def _store_as_global_attr(self, name, attr, overwrite):
"""
Store an OceanSpy attribute as dataset global attribute.
Parameters
----------
name: str
Name of the attribute. Attribute is stored as OceanSpy_+name.
attr: str or dict
Attribute to store
overwrite: bool or None
If None, raises error if attr has been previously set.
If True, overwrite previous attributes.
If False, combine with previous attributes.
"""
# Attribute name
name = 'OceanSpy_'+name
if overwrite is None and name in self._ds.attrs:
raise ValueError("[{}] has been previously set: "
"`overwrite` must be bool"
"".format(name.replace("OceanSpy_", "")))
# Copy because attributes are added to _ds
self = _copy.copy(self)
# Store
if not overwrite and name in self._ds.attrs:
prev_attr = self._ds.attrs[name]
if prev_attr[0] == "{" and prev_attr[-1] == "}":
attr = {**eval(prev_attr), **attr}
else:
attr = prev_attr + '_' + attr
self._ds.attrs[name] = str(attr)
return self
def _read_from_global_attr(self, name):
"""
Read an OceanSpy attribute stored as dataset global attribute.
Parameters
----------
name: str
Name of the attribute.
Attribute is decoded from 'OceanSpy_'+name.
Returns
-------
attr: str or dict
Attribute that has been decoded.
"""
# Attribute name
name = 'OceanSpy_'+name
# Check if attributes exists
if name not in self._ds.attrs:
return None
# Read attribute
attr = self._ds.attrs[name]
check_dict = (attr[0] == '{' and attr[-1] == '}')
check_list = (attr[0] == '[' and attr[-1] == ']')
if check_dict or check_list:
attr = eval(attr)
return attr
# ===========
# SHORTCUTS
# ===========
@property
def subsample(self):
"""
Access :py:mod:`oceanspy.subsample` functions.
"""
return _subsampleMethods(self)
@property
def compute(self):
"""
Access :py:mod:`oceanspy.compute` functions,
and merge the computed Dataset into the OceanDataset.
Set overwrite=True
to overwrite DataArrays already existing in the OceanDataset.
"""
return _computeMethods(self)
@property
def plot(self):
"""
Access :py:mod:`oceanspy.plot` functions.
"""
return _plotMethods(self)
@property
def animate(self):
"""
Access :py:mod:`oceanspy.animate` functions.
"""
return _animateMethods(self)
| [
"oceanspy.DEFAULT_PARAMETERS.keys",
"xarray.broadcast",
"numpy.column_stack",
"oceanspy.AVAILABLE_PARAMETERS.keys",
"xarray.zeros_like",
"warnings.warn",
"xarray.DataArray",
"dask.diagnostics.ProgressBar",
"copy.copy"
] | [((30794, 30810), 'copy.copy', '_copy.copy', (['self'], {}), '(self)\n', (30804, 30810), True, 'import copy as _copy\n'), ((38187, 38203), 'copy.copy', '_copy.copy', (['self'], {}), '(self)\n', (38197, 38203), True, 'import copy as _copy\n'), ((24240, 24257), 'xarray.zeros_like', '_xr.zeros_like', (['Y'], {}), '(Y)\n', (24254, 24257), True, 'import xarray as _xr\n'), ((24480, 24525), 'numpy.column_stack', '_np.column_stack', (['(x_stack, y_stack, z_stack)'], {}), '((x_stack, y_stack, z_stack))\n', (24496, 24525), True, 'import numpy as _np\n'), ((32879, 32922), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Y']", "self._ds['X']"], {}), "(self._ds['Y'], self._ds['X'])\n", (32892, 32922), True, 'import xarray as _xr\n'), ((33027, 33074), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Yp1']", "self._ds['Xp1']"], {}), "(self._ds['Yp1'], self._ds['Xp1'])\n", (33040, 33074), True, 'import xarray as _xr\n'), ((33179, 33224), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Y']", "self._ds['Xp1']"], {}), "(self._ds['Y'], self._ds['Xp1'])\n", (33192, 33224), True, 'import xarray as _xr\n'), ((33329, 33374), 'xarray.broadcast', '_xr.broadcast', (["self._ds['Yp1']", "self._ds['X']"], {}), "(self._ds['Yp1'], self._ds['X'])\n", (33342, 33374), True, 'import xarray as _xr\n'), ((12160, 12185), 'oceanspy.DEFAULT_PARAMETERS.keys', 'DEFAULT_PARAMETERS.keys', ([], {}), '()\n', (12183, 12185), False, 'from oceanspy import DEFAULT_PARAMETERS, AVAILABLE_PARAMETERS, TYPE_PARAMETERS\n'), ((27607, 27621), 'dask.diagnostics.ProgressBar', '_ProgressBar', ([], {}), '()\n', (27619, 27621), True, 'from dask.diagnostics import ProgressBar as _ProgressBar\n'), ((28581, 28595), 'dask.diagnostics.ProgressBar', '_ProgressBar', ([], {}), '()\n', (28593, 28595), True, 'from dask.diagnostics import ProgressBar as _ProgressBar\n'), ((12526, 12553), 'oceanspy.AVAILABLE_PARAMETERS.keys', 'AVAILABLE_PARAMETERS.keys', ([], {}), '()\n', (12551, 12553), False, 'from oceanspy import DEFAULT_PARAMETERS, AVAILABLE_PARAMETERS, TYPE_PARAMETERS\n'), ((16434, 16474), 'xarray.DataArray', '_xr.DataArray', (['midp'], {'dims': "(_dim + '_midp')"}), "(midp, dims=_dim + '_midp')\n", (16447, 16474), True, 'import xarray as _xr\n'), ((21242, 21321), 'warnings.warn', '_warnings.warn', (['"""cartopy is not available, so projection is None"""'], {'stacklevel': '(2)'}), "('cartopy is not available, so projection is None', stacklevel=2)\n", (21256, 21321), True, 'import warnings as _warnings\n')] |
import argparse
import os
import random
import torch
import numpy as np
import matplotlib.pyplot as plt
# import matplotlib.animation as animation
from . import model
from .utils import general, landmark_tools
from .utils.dataset import Dataset, load_imagelist_landmark
import pickle
import pickle5
import cv2
from glob import glob
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type=str, default=None, help="path to image")
parser.add_argument('--landmark_path', type=str, default=None, help="path to landmark")
parser.add_argument('--Gckpt', type=str, default=None, help="Generator Checkpoint")
parser.add_argument('--save_dir', type=str, default=None, help="Folder to save generated images")
parser.add_argument('--num_patch', type=int, default=1, help="Number of patch to modify for a image")
parser.add_argument('--image_size', default=64, type=int, help="image patch size")
parser.add_argument('--hidden_size', default=64, type=int, help="network hidden unit size")
parser.add_argument('--num_input_channel', default=3, type=int, help="for RGB, it is 3")
parser.add_argument('--patch_size_ratio', default=15, type=int)
parser.add_argument('--patch_filter_magic', default=1, type=float)
parser.add_argument('--gpu', default='', type=str, help="the id of gpu(s) to use")
args = parser.parse_args()
def process_image(image_path, landmark_path, save_dir, num_patch=1):
##########################
# load image, landmark and generate patch
img = plt.imread( image_path )
with open(landmark_path, 'rb') as fp:
try:
landmark_list = pickle.load(fp)
except ValueError:
landmark_list = pickle5.load(fp)
# tri_points, simplices, triangle_weight, named_lm
tri_params = landmark_tools.obtain_preprocess_triangles(img, landmark_list)
for I in range(num_patch):
ptr, ori_patch = landmark_tools.generate_filtered_patch(img, tri_params,
size_ratio=args.patch_size_ratio,
filter_magic=args.patch_filter_magic)
origin_size = ori_patch.shape[:2]
#########################
#
resized_patch = cv2.resize(ori_patch, (args.image_size, args.image_size) )
patch = torch.FloatTensor(resized_patch) / 255.
patch = patch.permute([2, 0, 1])
patch = torch.unsqueeze(patch, 0) # 1, 3, H, W
patch.to(device)
synpatch, modify, mask = netG(patch)
def process_image(tensor):
tensor = general.to_numpy(tensor)
tensor = np.transpose(tensor, [0, 2, 3, 1])
tensor = tensor[0]
return tensor
synpatch = process_image(synpatch)
modify = process_image(modify)
mask = process_image(mask)
# place back to original image and save
synpatch_fullsize = cv2.resize(synpatch, origin_size)
p = (synpatch_fullsize * 255).astype(img.dtype)
size = origin_size[0] // 2
synimage = img.copy()
synimage[ ptr[1]-size:ptr[1]+size, ptr[0]-size:ptr[0]+size ] = p
fname = os.path.basename(image_path)
fname = '.'.join(fname.split('.')[:-1]) # remove file extension
plt.imsave( os.path.join(save_dir, fname+'_syn%d.jpg' % I ), synimage )
# save compare
fig, axes = plt.subplots(ncols=4, figsize=[16, 4])
axes[0].imshow(resized_patch)
axes[0].set_title("no-acne patch")
axes[1].imshow(synpatch)
axes[1].set_title("synthesized patch")
axes[2].imshow(modify)
axes[2].set_title("acne pixels")
axes[3].imshow(mask, cmap='gray')
axes[3].set_title("mask")
plt.savefig( os.path.join(save_dir, fname+'_compare%d.jpg' % I) )
plt.close(fig)
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device("cuda:0" if len(args.gpu) > 0 else "cpu")
netG = model.Generator(args.hidden_size, num_input_channel=args.num_input_channel)
ckpt = torch.load(args.Gckpt, map_location= device)
netG.load_state_dict(ckpt)
netG.eval()
netG.to(device)
os.makedirs(args.save_dir, exist_ok=True)
process_image(args.image_path, args.landmark_path, args.save_dir, args.num_patch)
| [
"pickle5.load",
"os.makedirs",
"argparse.ArgumentParser",
"torch.unsqueeze",
"torch.load",
"matplotlib.pyplot.imread",
"pickle.load",
"torch.FloatTensor",
"os.path.join",
"matplotlib.pyplot.close",
"os.path.basename",
"cv2.resize",
"numpy.transpose",
"matplotlib.pyplot.subplots"
] | [((364, 389), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (387, 389), False, 'import argparse\n'), ((1513, 1535), 'matplotlib.pyplot.imread', 'plt.imread', (['image_path'], {}), '(image_path)\n', (1523, 1535), True, 'import matplotlib.pyplot as plt\n'), ((4084, 4127), 'torch.load', 'torch.load', (['args.Gckpt'], {'map_location': 'device'}), '(args.Gckpt, map_location=device)\n', (4094, 4127), False, 'import torch\n'), ((4201, 4242), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {'exist_ok': '(True)'}), '(args.save_dir, exist_ok=True)\n', (4212, 4242), False, 'import os\n'), ((2243, 2300), 'cv2.resize', 'cv2.resize', (['ori_patch', '(args.image_size, args.image_size)'], {}), '(ori_patch, (args.image_size, args.image_size))\n', (2253, 2300), False, 'import cv2\n'), ((2416, 2441), 'torch.unsqueeze', 'torch.unsqueeze', (['patch', '(0)'], {}), '(patch, 0)\n', (2431, 2441), False, 'import torch\n'), ((2924, 2957), 'cv2.resize', 'cv2.resize', (['synpatch', 'origin_size'], {}), '(synpatch, origin_size)\n', (2934, 2957), False, 'import cv2\n'), ((3169, 3197), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (3185, 3197), False, 'import os\n'), ((3394, 3432), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(4)', 'figsize': '[16, 4]'}), '(ncols=4, figsize=[16, 4])\n', (3406, 3432), True, 'import matplotlib.pyplot as plt\n'), ((3825, 3839), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3834, 3839), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1637), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1633, 1637), False, 'import pickle\n'), ((2318, 2350), 'torch.FloatTensor', 'torch.FloatTensor', (['resized_patch'], {}), '(resized_patch)\n', (2335, 2350), False, 'import torch\n'), ((2630, 2664), 'numpy.transpose', 'np.transpose', (['tensor', '[0, 2, 3, 1]'], {}), '(tensor, [0, 2, 3, 1])\n', (2642, 2664), True, 'import numpy as np\n'), ((3290, 3338), 'os.path.join', 'os.path.join', (['save_dir', "(fname + '_syn%d.jpg' % I)"], {}), "(save_dir, fname + '_syn%d.jpg' % I)\n", (3302, 3338), False, 'import os\n'), ((3764, 3816), 'os.path.join', 'os.path.join', (['save_dir', "(fname + '_compare%d.jpg' % I)"], {}), "(save_dir, fname + '_compare%d.jpg' % I)\n", (3776, 3816), False, 'import os\n'), ((1693, 1709), 'pickle5.load', 'pickle5.load', (['fp'], {}), '(fp)\n', (1705, 1709), False, 'import pickle5\n')] |
#coding:utf-8
'''
Created on 2015年8月13日
@author: melon
'''
import xlrd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import os
import fnmatch
from matplotlib.font_manager import FontProperties
from matplotlib.lines import lineStyles
from matplotlib.pyplot import gca
from pprint import pprint
xlsx_path = "./data/packet_loss.xls"
def trim_blank(x, y):
index = index_old = 0
while index < len(y):
try:
float(y[index])
float(x[index])
index_old = index
index += 1
except ValueError:
x.pop(index)
y.pop(index)
index = index_old
return (x, y)
if __name__ == '__main__':
x_max_len = 105
gar_xlsx = xlrd.open_workbook(xlsx_path)
gar_sheet0 = gar_xlsx.sheet_by_index(0)
x = gar_sheet0.col_values(1, 1, x_max_len)
x = [int(v[:-1]) for v in x if v]
x = [(v*8)/1000.0 for v in x]
y = gar_sheet0.col_values(2, 1, len(x)+1)
(data_1kbps_x, data_1kbps_y) = trim_blank(x, y)
data_1kbps_y =[v*100 for v in data_1kbps_y]
data_1kbps_x = np.array(data_1kbps_x)
data_1kbps_y = np.array(data_1kbps_y)
# print len(data_1kbps_x), len(data_1kbps_y)
#
x = gar_sheet0.col_values(5, 1, x_max_len)
x = [int(v[:-1]) for v in x if v]
x = [(v*8)/10000.0 for v in x]
y = gar_sheet0.col_values(6, 1, len(x)+1)
(data_10kbps_x, data_10kbps_y) = trim_blank(x, y)
data_10kbps_y =[v*100 for v in data_10kbps_y]
data_10kbps_x = np.array(data_10kbps_x)
data_10kbps_y = np.array(data_10kbps_y)
x = gar_sheet0.col_values(9, 1, x_max_len)
x = [int(v[:-2]) for v in x if v]
x = [(v*8)/100.0 for v in x]
y = gar_sheet0.col_values(10, 1, len(x)+1)
(data_100kbps_x, data_100kbps_y) = trim_blank(x, y)
data_100kbps_y =[v*100 for v in data_100kbps_y]
data_100kbps_x = np.array(data_100kbps_x)
data_100kbps_y = np.array(data_100kbps_y)
# print(data_100kbps_x)
# print(data_100kbps_y)
x = gar_sheet0.col_values(13, 1, x_max_len)
x = [int(v[:-2]) for v in x if v]
x = [(v*8)/1000.0 for v in x]
y = gar_sheet0.col_values(14, 1, len(x)+1)
(data_1mbps_x, data_1mbps_y) = trim_blank(x, y)
data_1mbps_y =[v*100 for v in data_1mbps_y]
data_1mbps_x = np.array(data_1mbps_x)
data_1mbps_y = np.array(data_1mbps_y)
# print(data_1mbps_x)
# print(data_1mbps_y)
x = gar_sheet0.col_values(17, 1, x_max_len)
x = [int(v[:-2]) for v in x if v]
x = [(v*8)/10000.0 for v in x]
y = gar_sheet0.col_values(18, 1, len(x)+1)
(data_10mbps_x, data_10mbps_y) = trim_blank(x, y)
data_10mbps_y =[v*100 for v in data_10mbps_y]
data_10mbps_x = np.array(data_10mbps_x)
data_10mbps_y = np.array(data_10mbps_y)
# the main axes is subplot(111) by default
plt.plot(data_1kbps_x, data_1kbps_y, color='#1f77b4', linestyle='-', linewidth=1.5, label='1Kbps')
plt.plot(data_10kbps_x, data_10kbps_y, color='#aec7e8', linestyle='-.', linewidth=2, label='10Kbps')
plt.plot(data_100kbps_x, data_100kbps_y, color='#ff7f0e', linestyle='--', linewidth=2, label='100Kbps')
plt.plot(data_1mbps_x, data_1mbps_y, color='red', linestyle=':', linewidth=2, label='1Mbps')
plt.plot(data_10mbps_x, data_10mbps_y, color='#2ca02c', linestyle='-', marker='.', linewidth=2, label='10Mbps' )
# plt.axis([0, 1, 1.1*np.amin(s), 2*np.amax(s)])
plt.xlim(0,9)
plt.ylim(0,90)
plt.xlabel('Send Rate (Times)')
plt.ylabel('Packet Loss Rate (%)')
plt.legend(loc=5, fontsize=10)
# plt.title('Gaussian colored noise')
# this is another inset axes over the main axes
a = plt.axes([0.42, 0.4, .2, .2], axisbg='w')
plt.plot(data_1kbps_x, data_1kbps_y, color='#1f77b4', linestyle='-', linewidth=1.5)
plt.plot(data_10kbps_x, data_10kbps_y, color='#aec7e8', linestyle='-.', linewidth=2)
plt.plot(data_100kbps_x, data_100kbps_y, color='#ff7f0e', linestyle='--', linewidth=1.5)
plt.plot(data_1mbps_x, data_1mbps_y, color='red', linestyle=':', linewidth=2)
plt.plot(data_10mbps_x, data_10mbps_y, color='#2ca02c', linestyle='-', marker='.', linewidth=1.5 )
# plt.axis([0, 1, 1.1*np.amin(s), 2*np.amax(s)])
plt.xlim(2.01,2.11)
plt.ylim(42.5,45.5)
plt.title('Zoom to rectangle', fontsize=10)
# plt.xlim(0, 0.2)
plt.xticks([])
plt.yticks([])
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"xlrd.open_workbook",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.py... | [((820, 849), 'xlrd.open_workbook', 'xlrd.open_workbook', (['xlsx_path'], {}), '(xlsx_path)\n', (838, 849), False, 'import xlrd\n'), ((1184, 1206), 'numpy.array', 'np.array', (['data_1kbps_x'], {}), '(data_1kbps_x)\n', (1192, 1206), True, 'import numpy as np\n'), ((1226, 1248), 'numpy.array', 'np.array', (['data_1kbps_y'], {}), '(data_1kbps_y)\n', (1234, 1248), True, 'import numpy as np\n'), ((1596, 1619), 'numpy.array', 'np.array', (['data_10kbps_x'], {}), '(data_10kbps_x)\n', (1604, 1619), True, 'import numpy as np\n'), ((1640, 1663), 'numpy.array', 'np.array', (['data_10kbps_y'], {}), '(data_10kbps_y)\n', (1648, 1663), True, 'import numpy as np\n'), ((1964, 1988), 'numpy.array', 'np.array', (['data_100kbps_x'], {}), '(data_100kbps_x)\n', (1972, 1988), True, 'import numpy as np\n'), ((2010, 2034), 'numpy.array', 'np.array', (['data_100kbps_y'], {}), '(data_100kbps_y)\n', (2018, 2034), True, 'import numpy as np\n'), ((2383, 2405), 'numpy.array', 'np.array', (['data_1mbps_x'], {}), '(data_1mbps_x)\n', (2391, 2405), True, 'import numpy as np\n'), ((2425, 2447), 'numpy.array', 'np.array', (['data_1mbps_y'], {}), '(data_1mbps_y)\n', (2433, 2447), True, 'import numpy as np\n'), ((2798, 2821), 'numpy.array', 'np.array', (['data_10mbps_x'], {}), '(data_10mbps_x)\n', (2806, 2821), True, 'import numpy as np\n'), ((2842, 2865), 'numpy.array', 'np.array', (['data_10mbps_y'], {}), '(data_10mbps_y)\n', (2850, 2865), True, 'import numpy as np\n'), ((2918, 3020), 'matplotlib.pyplot.plot', 'plt.plot', (['data_1kbps_x', 'data_1kbps_y'], {'color': '"""#1f77b4"""', 'linestyle': '"""-"""', 'linewidth': '(1.5)', 'label': '"""1Kbps"""'}), "(data_1kbps_x, data_1kbps_y, color='#1f77b4', linestyle='-',\n linewidth=1.5, label='1Kbps')\n", (2926, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3021, 3125), 'matplotlib.pyplot.plot', 'plt.plot', (['data_10kbps_x', 'data_10kbps_y'], {'color': '"""#aec7e8"""', 'linestyle': '"""-."""', 'linewidth': '(2)', 'label': '"""10Kbps"""'}), "(data_10kbps_x, data_10kbps_y, color='#aec7e8', linestyle='-.',\n linewidth=2, label='10Kbps')\n", (3029, 3125), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3233), 'matplotlib.pyplot.plot', 'plt.plot', (['data_100kbps_x', 'data_100kbps_y'], {'color': '"""#ff7f0e"""', 'linestyle': '"""--"""', 'linewidth': '(2)', 'label': '"""100Kbps"""'}), "(data_100kbps_x, data_100kbps_y, color='#ff7f0e', linestyle='--',\n linewidth=2, label='100Kbps')\n", (3134, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3331), 'matplotlib.pyplot.plot', 'plt.plot', (['data_1mbps_x', 'data_1mbps_y'], {'color': '"""red"""', 'linestyle': '""":"""', 'linewidth': '(2)', 'label': '"""1Mbps"""'}), "(data_1mbps_x, data_1mbps_y, color='red', linestyle=':', linewidth=\n 2, label='1Mbps')\n", (3242, 3331), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3446), 'matplotlib.pyplot.plot', 'plt.plot', (['data_10mbps_x', 'data_10mbps_y'], {'color': '"""#2ca02c"""', 'linestyle': '"""-"""', 'marker': '"""."""', 'linewidth': '(2)', 'label': '"""10Mbps"""'}), "(data_10mbps_x, data_10mbps_y, color='#2ca02c', linestyle='-',\n marker='.', linewidth=2, label='10Mbps')\n", (3339, 3446), True, 'import matplotlib.pyplot as plt\n'), ((3501, 3515), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(9)'], {}), '(0, 9)\n', (3509, 3515), True, 'import matplotlib.pyplot as plt\n'), ((3519, 3534), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(90)'], {}), '(0, 90)\n', (3527, 3534), True, 'import matplotlib.pyplot as plt\n'), ((3538, 3569), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Send Rate (Times)"""'], {}), "('Send Rate (Times)')\n", (3548, 3569), True, 'import matplotlib.pyplot as plt\n'), ((3574, 3608), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Packet Loss Rate (%)"""'], {}), "('Packet Loss Rate (%)')\n", (3584, 3608), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3643), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(5)', 'fontsize': '(10)'}), '(loc=5, fontsize=10)\n', (3623, 3643), True, 'import matplotlib.pyplot as plt\n'), ((3756, 3799), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.42, 0.4, 0.2, 0.2]'], {'axisbg': '"""w"""'}), "([0.42, 0.4, 0.2, 0.2], axisbg='w')\n", (3764, 3799), True, 'import matplotlib.pyplot as plt\n'), ((3802, 3889), 'matplotlib.pyplot.plot', 'plt.plot', (['data_1kbps_x', 'data_1kbps_y'], {'color': '"""#1f77b4"""', 'linestyle': '"""-"""', 'linewidth': '(1.5)'}), "(data_1kbps_x, data_1kbps_y, color='#1f77b4', linestyle='-',\n linewidth=1.5)\n", (3810, 3889), True, 'import matplotlib.pyplot as plt\n'), ((3890, 3978), 'matplotlib.pyplot.plot', 'plt.plot', (['data_10kbps_x', 'data_10kbps_y'], {'color': '"""#aec7e8"""', 'linestyle': '"""-."""', 'linewidth': '(2)'}), "(data_10kbps_x, data_10kbps_y, color='#aec7e8', linestyle='-.',\n linewidth=2)\n", (3898, 3978), True, 'import matplotlib.pyplot as plt\n'), ((3979, 4071), 'matplotlib.pyplot.plot', 'plt.plot', (['data_100kbps_x', 'data_100kbps_y'], {'color': '"""#ff7f0e"""', 'linestyle': '"""--"""', 'linewidth': '(1.5)'}), "(data_100kbps_x, data_100kbps_y, color='#ff7f0e', linestyle='--',\n linewidth=1.5)\n", (3987, 4071), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4149), 'matplotlib.pyplot.plot', 'plt.plot', (['data_1mbps_x', 'data_1mbps_y'], {'color': '"""red"""', 'linestyle': '""":"""', 'linewidth': '(2)'}), "(data_1mbps_x, data_1mbps_y, color='red', linestyle=':', linewidth=2)\n", (4080, 4149), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4255), 'matplotlib.pyplot.plot', 'plt.plot', (['data_10mbps_x', 'data_10mbps_y'], {'color': '"""#2ca02c"""', 'linestyle': '"""-"""', 'marker': '"""."""', 'linewidth': '(1.5)'}), "(data_10mbps_x, data_10mbps_y, color='#2ca02c', linestyle='-',\n marker='.', linewidth=1.5)\n", (4162, 4255), True, 'import matplotlib.pyplot as plt\n'), ((4310, 4330), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(2.01)', '(2.11)'], {}), '(2.01, 2.11)\n', (4318, 4330), True, 'import matplotlib.pyplot as plt\n'), ((4334, 4354), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(42.5)', '(45.5)'], {}), '(42.5, 45.5)\n', (4342, 4354), True, 'import matplotlib.pyplot as plt\n'), ((4358, 4401), 'matplotlib.pyplot.title', 'plt.title', (['"""Zoom to rectangle"""'], {'fontsize': '(10)'}), "('Zoom to rectangle', fontsize=10)\n", (4367, 4401), True, 'import matplotlib.pyplot as plt\n'), ((4429, 4443), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4439, 4443), True, 'import matplotlib.pyplot as plt\n'), ((4448, 4462), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4458, 4462), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4482), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4480, 4482), True, 'import matplotlib.pyplot as plt\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/11_dcmm.ipynb (unless otherwise specified).
__all__ = ['dcmm']
# Internal Cell
#exporti
import numpy as np
from .latent_factor_fxns import forecast_marginal_lf_dcmm, forecast_path_lf_dcmm
from .dglm import bern_dglm, pois_dglm
from .update import update_F
from scipy.special import expit
# Cell
class dcmm:
def __init__(self,
a0_bern = None,
R0_bern = None,
nregn_bern = 0,
ntrend_bern = 0,
nlf_bern = 0,
nhol_bern = 0,
seasPeriods_bern = [],
seasHarmComponents_bern = [],
deltrend_bern = 1, delregn_bern = 1,
delhol_bern = 1,
delseas_bern = 1, dellf_bern = 1,
a0_pois = None,
R0_pois = None,
nregn_pois = 0,
ntrend_pois = 0,
nlf_pois = 0,
nhol_pois = 0,
seasPeriods_pois = [],
seasHarmComponents_pois = [],
deltrend_pois = 1, delregn_pois = 1,
delhol_pois = 1,
delseas_pois = 1, dellf_pois = 1,
rho = 1,
interpolate=True,
adapt_discount=False):
"""
:param a0_bern: Prior mean vector for bernoulli DGLM
:param R0_bern: Prior covariance matrix for bernoulli DGLM
:param nregn_bern: Number of regression components in bernoulli DGLM
:param ntrend_bern: Number of trend components in bernoulli DGLM
:param nlf_bern: Number of latent factor components in bernoulli DGLM
:param seasPeriods_bern: List of periods of seasonal components in bernoulli DGLM
:param seasHarmComponents_bern: List of harmonic components included for each period in bernoulli DGLM
:param deltrend_bern: Discount factor on trend components in bernoulli DGLM
:param delregn_bern: Discount factor on regression components in bernoulli DGLM
:param delhol_bern: Discount factor on holiday component in bernoulli DGLM (currently deprecated)
:param delseas_bern: Discount factor on seasonal components in bernoulli DGLM
:param dellf_bern: Discount factor on latent factor components in bernoulli DGLM
:param a0_pois: Prior mean vector for poisson DGLM
:param R0_pois: Prior covariance matrix for poisson DGLM
:param nregn_pois: Number of regression components in poisson DGLM
:param ntrend_pois: Number of trend components in poisson DGLM
:param nlf_pois: Number of latent factor components in poisson DGLM
:param seasPeriods_pois: List of periods of seasonal components in poisson DGLM
:param seasHarmComponents_pois: List of harmonic components included for each period in poisson DGLM
:param deltrend_pois: Discount factor on trend components in poisson DGLM
:param delregn_pois: Discount factor on regression components in poisson DGLM
:param delhol_pois: Discount factor on holiday component in poisson DGLM (currently deprecated)
:param delseas_pois: Discount factor on seasonal components in poisson DGLM
:param dellf_pois: Discount factor on latent factor components in poisson DGLM
:param rho: Discount factor for random effects extension in poisson DGLM (smaller rho increases variance)
"""
self.bern_mod = bern_dglm(a0=a0_bern,
R0=R0_bern,
nregn=nregn_bern,
ntrend=ntrend_bern,
nlf=nlf_bern,
nhol=nhol_bern,
seasPeriods=seasPeriods_bern,
seasHarmComponents=seasHarmComponents_bern,
deltrend=deltrend_bern, delregn=delregn_bern,
delhol=delhol_bern, delseas=delseas_bern,
dellf=dellf_bern,
interpolate=interpolate,
adapt_discount=adapt_discount)
self.pois_mod = pois_dglm(a0=a0_pois,
R0=R0_pois,
nregn=nregn_pois,
ntrend=ntrend_pois,
nlf=nlf_pois,
nhol=nhol_pois,
seasPeriods=seasPeriods_pois,
seasHarmComponents=seasHarmComponents_pois,
deltrend=deltrend_pois, delregn=delregn_pois,
delhol=delhol_pois, delseas=delseas_pois,
dellf=dellf_pois,
rho=rho,
interpolate=interpolate,
adapt_discount=adapt_discount)
self.t = 0
# X is a list or tuple of length 2. The first component is data for the bernoulli DGLM, the next is for the Poisson DGLM.
def update(self, y = None, X = None):
X = self.make_pair(X)
if y is None:
self.bern_mod.update(y=y)
self.pois_mod.update(y=y)
elif y == 0:
self.bern_mod.update(y = 0, X = X[0])
self.pois_mod.update(y = np.nan, X = X[1])
else: # only update beta model if we have significant uncertainty in the forecast
# get the lower end forecast on the logit scale
F = update_F(self.bern_mod, X[0], F=self.bern_mod.F.copy())
ft, qt = self.bern_mod.get_mean_and_var(F, self.bern_mod.a, self.bern_mod.R)
fcast_logit_lb = ft - np.sqrt(qt)
# translate to a prod for a rough idea of whether we're already pretty confident for this forecast
if expit(fcast_logit_lb) < 0.975:
self.bern_mod.update(y=1, X = X[0])
else:
self.bern_mod.update(y=np.nan, X=X[0])
self.pois_mod.update(y = y - 1, X = X[1]) # Shifted Y values in the Poisson DGLM
self.t += 1
def update_lf_sample(self, y = None, X = None, phi_samps = None, parallel=False):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
if y is None:
self.bern_mod.update_lf_sample(y=y)
self.pois_mod.update_lf_sample(y=y)
elif y == 0:
self.bern_mod.update_lf_sample(y = 0, X = X[0], phi_samps = phi_samps[0], parallel = parallel)
self.pois_mod.update_lf_sample(y = np.nan, X = X[1], phi_samps = phi_samps[1], parallel = parallel)
else:
self.bern_mod.update_lf_sample(y = 1, X = X[0], phi_samps = phi_samps[0], parallel = parallel)
# Shifted Y values in the Poisson DGLM
self.pois_mod.update_lf_sample(y =y - 1, X = X[1], phi_samps = phi_samps[1], parallel = parallel)
self.t += 1
def update_lf_analytic(self, y = None, X = None, phi_mu = None, phi_sigma = None):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if y is None:
self.bern_mod.update_lf_analytic(y=y)
self.pois_mod.update_lf_analytic(y=y)
elif y == 0:
self.bern_mod.update_lf_analytic(y = 0, X = X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0])
self.pois_mod.update_lf_analytic(y = np.nan, X = X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1])
else:
self.bern_mod.update_lf_analytic(y = 1, X = X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0])
# Shifted Y values in the Poisson DGLM
self.pois_mod.update_lf_analytic(y =y - 1, X = X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1])
self.t += 1
def forecast_marginal(self, k, X = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal(k, X[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal(k, X[1], nsamps, mean_only)
return mean_bern * (mean_pois + 1)
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal(k, X[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal(k, X[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
samps_bern = self.bern_mod.forecast_marginal(k, X[0], nsamps)
samps_pois = self.pois_mod.forecast_marginal(k, X[1], nsamps) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_marginal_lf_analytic(self, k, X = None, phi_mu = None, phi_sigma = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], nsamps, mean_only)
return np.array([[mean_bern * (mean_pois + 1)]])
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
samps_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0], nsamps = nsamps)
samps_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1], nsamps = nsamps) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_marginal_lf_analytic_new(self, k, X = None, phi_mu = None, phi_sigma = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], nsamps, mean_only)
return np.array([[mean_bern * (mean_pois + 1)]])
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
return forecast_marginal_lf_dcmm(self, k, X[0], phi_mu[0], phi_sigma[0], nsamps=nsamps)
def forecast_marginal_lf_sample(self, k, X = None, phi_samps = None, nsamps = 1, mean_only = False):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
samps_bern = self.bern_mod.forecast_marginal_lf_sample(k, X[0], phi_samps[0], mean_only)
samps_pois = self.pois_mod.forecast_marginal_lf_sample(k, X[1], phi_samps[1], mean_only) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_lf_sample(self, k, X = None, phi_samps=None, nsamps = 1):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
samps_bern = self.bern_mod.forecast_path_lf_sample(k, X[0], phi_samps[0], nsamps)
samps_pois = self.pois_mod.forecast_path_lf_sample(k, X[1], phi_samps[1], nsamps) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path(self, k, X = None, nsamps = 1):
X = self.make_pair(X)
samps_bern = self.bern_mod.forecast_path(k, X[0], nsamps)
samps_pois = self.pois_mod.forecast_path(k, X[1], nsamps) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_copula(self, k, X = None, nsamps = 1, **kwargs):
X = self.make_pair(X)
samps_bern = self.bern_mod.forecast_path_copula(k, X[0], nsamps, **kwargs)
samps_pois = self.pois_mod.forecast_path_copula(k, X[1], nsamps, **kwargs) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_lf_copula(self, k, X = None, phi_mu = None, phi_sigma = None, phi_psi = None, nsamps = 1, **kwargs):
X = self.make_pair(X)
if k == 2 and isinstance(phi_mu, (list, tuple)):
if not isinstance(phi_mu[0], (list, tuple)):
phi_mu = (phi_mu, phi_mu)
phi_sigma = (phi_sigma, phi_sigma)
phi_psi = (phi_psi, phi_psi)
else:
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
phi_psi = self.make_pair(phi_psi)
samps_bern = self.bern_mod.forecast_path_lf_copula(k, X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0], phi_psi = phi_psi[0], nsamps = nsamps, **kwargs)
samps_pois = self.pois_mod.forecast_path_lf_copula(k, X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1], phi_psi = phi_psi[1], nsamps = nsamps, **kwargs) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_lf_copula_new(self, k, X = None, phi_mu = None, phi_sigma = None, phi_psi = None, nsamps = 1, **kwargs):
X = self.make_pair(X)
if k == 2 and isinstance(phi_mu, (list, tuple)):
if not isinstance(phi_mu[0], (list, tuple)):
phi_mu = (phi_mu, phi_mu)
phi_sigma = (phi_sigma, phi_sigma)
phi_psi = (phi_psi, phi_psi)
else:
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
phi_psi = self.make_pair(phi_psi)
return forecast_path_lf_dcmm(self, k, X[0], phi_mu[0], phi_sigma[0], phi_psi[0], nsamps=nsamps, **kwargs)
def forecast_path_lf_copula_density(self, y, k, X = None, phi_mu = None, phi_sigma = None, phi_psi = (None, None), nsamps = 1, **kwargs):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
phi_psi = self.make_pair(phi_psi)
z = np.zeros([k])
y = y.reshape(-1)
z[y > 0] = 1
logdens_bern = self.bern_mod.forecast_path_lf_copula(k, X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0], phi_psi = phi_psi[0], nsamps = nsamps, y = z, **kwargs)
# Shifted Y values in the Poisson DGLM
y = y - 1
y = y.astype('float')
# 0's in the original data (now -1's) are considered 'missing by the Poisson model
y[y < 0] = np.nan
logdens_pois = self.pois_mod.forecast_path_lf_copula(k, X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1], phi_psi = phi_psi[1], nsamps = nsamps, y = y, **kwargs)
return logdens_bern, logdens_pois
def forecast_state_mean_and_var(self, k = 1, X = None):
mean_var_bern = self.bern_mod.forecast_state_mean_and_var(k, X[0])
mean_var_pois = self.pois_mod.forecast_state_mean_and_var(k, X[1])
return mean_var_bern, mean_var_pois
def make_pair(self, x):
if isinstance(x, (list, tuple)):
if len(x) == 2:
return x
else:
return (x, x)
else:
return (x, x) | [
"numpy.sqrt",
"numpy.ones",
"scipy.special.expit",
"numpy.array",
"numpy.zeros"
] | [((14774, 14787), 'numpy.zeros', 'np.zeros', (['[k]'], {}), '([k])\n', (14782, 14787), True, 'import numpy as np\n'), ((9380, 9421), 'numpy.array', 'np.array', (['[[mean_bern * (mean_pois + 1)]]'], {}), '([[mean_bern * (mean_pois + 1)]])\n', (9388, 9421), True, 'import numpy as np\n'), ((10702, 10743), 'numpy.array', 'np.array', (['[[mean_bern * (mean_pois + 1)]]'], {}), '([[mean_bern * (mean_pois + 1)]])\n', (10710, 10743), True, 'import numpy as np\n'), ((11568, 11585), 'numpy.ones', 'np.ones', (['[nsamps]'], {}), '([nsamps])\n', (11575, 11585), True, 'import numpy as np\n'), ((12004, 12024), 'numpy.ones', 'np.ones', (['[nsamps, k]'], {}), '([nsamps, k])\n', (12011, 12024), True, 'import numpy as np\n'), ((12323, 12343), 'numpy.ones', 'np.ones', (['[nsamps, k]'], {}), '([nsamps, k])\n', (12330, 12343), True, 'import numpy as np\n'), ((12693, 12713), 'numpy.ones', 'np.ones', (['[nsamps, k]'], {}), '([nsamps, k])\n', (12700, 12713), True, 'import numpy as np\n'), ((13681, 13701), 'numpy.ones', 'np.ones', (['[nsamps, k]'], {}), '([nsamps, k])\n', (13688, 13701), True, 'import numpy as np\n'), ((5835, 5846), 'numpy.sqrt', 'np.sqrt', (['qt'], {}), '(qt)\n', (5842, 5846), True, 'import numpy as np\n'), ((5973, 5994), 'scipy.special.expit', 'expit', (['fcast_logit_lb'], {}), '(fcast_logit_lb)\n', (5978, 5994), False, 'from scipy.special import expit\n'), ((8733, 8750), 'numpy.ones', 'np.ones', (['[nsamps]'], {}), '([nsamps])\n', (8740, 8750), True, 'import numpy as np\n'), ((10051, 10068), 'numpy.ones', 'np.ones', (['[nsamps]'], {}), '([nsamps])\n', (10058, 10068), True, 'import numpy as np\n')] |
import time
import curses
import sys
import os
import multiprocessing as mp
import pandas as pd
import numpy as np
import emcee
import h5py
from radvel import utils
import radvel
class StateVars(object):
def __init__(self):
self.oac = 0
self.autosamples = []
self.automean = []
self.automin = []
self.automax = []
self.proceed_started = 0
def reset(self):
self.__init__()
statevars = StateVars()
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def _closescr():
if isnotebook() == False:
try:
curses.endwin()
except:
pass
def _progress_bar(step, totsteps, width=50):
fltot = float(totsteps)
numsym = int(np.round(width * (step / fltot)))
bar = ''.join(["=" for s in range(numsym)])
bar += ''.join([" " for s in range(width - numsym)])
msg = "[" + bar + "]"
return(msg)
def _status_message_NB(statevars):
msg1 = (
"{:d}/{:d} ({:3.1f}%) steps complete; "
"Running {:.2f} steps/s; Mean acceptance rate = {:3.1f}%; "
"Min Auto Factor = {:3.0f}; Max Auto Relative-Change = {:5.3}; "
"Min Tz = {:.1f}; Max G-R = {:5.3f}\r"
).format(statevars.ncomplete, statevars.totsteps, statevars.pcomplete, statevars.rate, statevars.ar,
statevars.minafactor, statevars.maxarchange, statevars.mintz, statevars.maxgr)
sys.stdout.write(msg1)
sys.stdout.flush()
def _status_message_CLI(statevars):
statevars.screen = curses.initscr()
statevars.screen.clear()
barline = _progress_bar(statevars.ncomplete, statevars.totsteps)
msg1 = (
barline + " {:d}/{:d} ({:3.1f}%) steps complete; "
).format(statevars.ncomplete, statevars.totsteps, statevars.pcomplete)
msg2 = (
"Running {:.2f} steps/s; Mean acceptance rate = {:3.1f}%; "
"Min Auto Factor = {:3.0f}; \nMax Auto Relative-Change = {:5.3}; "
"Min Tz = {:.1f}; Max G-R = {:5.3f}\n"
).format(statevars.rate, statevars.ar, statevars.minafactor, statevars.maxarchange,
statevars.mintz, statevars.maxgr)
statevars.screen.addstr(0, 0, msg1+ '\n' + msg2)
statevars.screen.refresh()
def convergence_check(minAfactor, maxArchange, maxGR, minTz, minsteps, minpercent):
"""Check for convergence
Check for convergence for a list of emcee samplers
Args:
minAfactor (float): Minimum autocorrelation time factor for chains to be deemed well-mixed and halt the MCMC run
maxArchange (float): Maximum relative change in the autocorrelative time to be deemed well-mixed and
halt the MCMC run
maxGR (float): Maximum G-R statistic for chains to be deemed well-mixed and halt the MCMC run
minTz (int): Minimum Tz to consider well-mixed
minsteps (int): Minimum number of steps per walker before convergence tests are performed. Convergence checks
will start after the minsteps threshold or the minpercent threshold has been hit.
minpercent (float): Minimum percentage of total steps before convergence tests are performed. Convergence checks
will start after the minsteps threshold or the minpercent threshold has been hit.
"""
statevars.ar = 0
statevars.ncomplete = statevars.nburn
statevars.lnprob = []
statevars.autocorrelation = []
statevars.chains = []
for i,sampler in enumerate(statevars.samplers):
statevars.ncomplete += sampler.get_log_prob(flat=True).shape[0]
statevars.ar += sampler.acceptance_fraction.mean() * 100
statevars.chains.append(sampler.get_chain()[:,:,:].T)
statevars.lnprob.append(sampler.get_log_prob(flat=True))
statevars.ar /= statevars.ensembles
statevars.pcomplete = statevars.ncomplete/float(statevars.totsteps) * 100
statevars.rate = (statevars.checkinterval*statevars.nwalkers*statevars.ensembles) / statevars.interval
if statevars.ensembles < 3:
# if less than 3 ensembles then GR between ensembles does
# not work so just calculate it on the last sampler
statevars.tchains = sampler.chain.transpose()
# Must have completed at least 5% or minsteps steps per walker before
# attempting to calculate GR
if statevars.pcomplete < minpercent and sampler.get_log_prob(flat=True).shape[0] <= minsteps*statevars.nwalkers:
(statevars.ismixed, statevars.minafactor, statevars.maxarchange, statevars.maxgr,
statevars.mintz) = 0, -1.0, np.inf, np.inf, -1.0
else:
(statevars.ismixed, afactor, archange, oac, gr, tz) \
= convergence_calculate(statevars.chains,
oldautocorrelation=statevars.oac, minAfactor=minAfactor, maxArchange=maxArchange,
maxGR=maxGR, minTz=minTz)
statevars.mintz = min(tz)
statevars.maxgr = max(gr)
statevars.minafactor = np.amin(afactor)
statevars.maxarchange = np.amax(archange)
statevars.oac = oac
if statevars.burn_complete:
statevars.autosamples.append(len(statevars.chains)*statevars.chains[0].shape[2])
statevars.automean.append(np.mean(statevars.oac))
statevars.automin.append(np.amin(statevars.oac))
statevars.automax.append(np.amax(statevars.oac))
if statevars.ismixed:
statevars.mixcount += 1
else:
statevars.mixcount = 0
if isnotebook():
_status_message_NB(statevars)
else:
_status_message_CLI(statevars)
def _domcmc(input_tuple):
"""Function to be run in parallel on different CPUs
Input is a tuple: first element is an emcee sampler object, second is an array of
initial positions, third is number of steps to run before doing a convergence check
"""
sampler = input_tuple[0]
ipos = input_tuple[1]
check_interval = input_tuple[2]
sampler.run_mcmc(ipos, check_interval)
return sampler
def mcmc(post, nwalkers=50, nrun=10000, ensembles=8, checkinterval=50, minAfactor=40, maxArchange=.03, burnAfactor=25,
burnGR=1.03, maxGR=1.01, minTz=1000, minsteps=1000, minpercent=5, thin=1, serial=False, save=False,
savename=None, proceed=False, proceedname=None):
"""Run MCMC
Run MCMC chains using the emcee EnsambleSampler
Args:
post (radvel.posterior): radvel posterior object
nwalkers (int): (optional) number of MCMC walkers
nrun (int): (optional) number of steps to take
ensembles (int): (optional) number of ensembles to run. Will be run
in parallel on separate CPUs
checkinterval (int): (optional) check MCMC convergence statistics every
`checkinterval` steps
minAfactor (float): Minimum autocorrelation time factor to deem chains as well-mixed and halt the MCMC run
maxArchange (float): Maximum relative change in autocorrelation time to deem chains and well-mixed
burnAfactor (float): Minimum autocorrelation time factor to stop burn-in period. Burn-in ends once burnGr
or burnAfactor are reached.
burnGR (float): (optional) Maximum G-R statistic to stop burn-in period. Burn-in ends once burnGr or
burnAfactor are reached.
maxGR (float): (optional) Maximum G-R statistic for chains to be deemed well-mixed and halt the MCMC run
minTz (int): (optional) Minimum Tz to consider well-mixed
minsteps (int): Minimum number of steps per walker before convergence tests are performed. Convergence checks
will start after the minsteps threshold or the minpercent threshold has been hit.
minpercent (float): Minimum percentage of total steps before convergence tests are performed. Convergence checks
will start after the minsteps threshold or the minpercent threshold has been hit.
thin (int): (optional) save one sample every N steps (default=1, save every sample)
serial (bool): set to true if MCMC should be run in serial
save (bool): set to true to save MCMC chains that can be continued in a future run
savename (string): location of h5py file where MCMC chains will be saved for future use
proceed (bool): set to true to continue a previously saved run
proceedname (string): location of h5py file with previously MCMC run chains
Returns:
DataFrame: DataFrame containing the MCMC samples
"""
try:
if save and savename is None:
raise ValueError('save set to true but no savename provided')
if save:
h5f = h5py.File(savename, 'a')
if proceed:
if proceedname is None:
raise ValueError('proceed set to true but no proceedname provided')
else:
h5p = h5py.File(savename, 'r')
msg = 'Loading chains and run information from previous MCMC'
print(msg)
statevars.prechains = []
statevars.prelog_probs = []
statevars.preaccepted = []
statevars.preburned = h5p['burned'][0]
statevars.minafactor = h5p['crit'][0]
statevars.maxarchange = h5p['crit'][1]
statevars.mintz = h5p['crit'][2]
statevars.maxgr = h5p['crit'][3]
statevars.autosamples = list(h5p['autosample'])
statevars.automin = list(h5p['automin'])
statevars.automean = list(h5p['automean'])
statevars.automax = list(h5p['automax'])
for i in range(0,int((len(h5p.keys()) - 6)/3)):
str_chain = str(i) + '_chain'
str_log_prob = str(i) + '_log_prob'
str_accepted = str(i) + '_accepted'
statevars.prechains.append(h5p[str_chain])
statevars.prelog_probs.append(h5p[str_log_prob])
statevars.preaccepted.append(h5p[str_accepted])
# check if one or more likelihoods are GPs
if isinstance(post.likelihood, radvel.likelihood.CompositeLikelihood):
check_gp = [like for like in post.likelihood.like_list if isinstance(like, radvel.likelihood.GPLikelihood)]
else:
check_gp = isinstance(post.likelihood, radvel.likelihood.GPLikelihood)
np_info = np.__config__.blas_opt_info
if 'extra_link_args' in np_info.keys() \
and check_gp \
and ('-Wl,Accelerate' in np_info['extra_link_args']) \
and serial == False:
print("WARNING: Parallel processing with Gaussian Processes will not work with your current"
+ " numpy installation. See radvel.readthedocs.io/en/latest/OSX-multiprocessing.html"
+ " for more details. Running in serial with " + str(ensembles) + " ensembles.")
serial = True
statevars.ensembles = ensembles
statevars.nwalkers = nwalkers
statevars.checkinterval = checkinterval - 1
nrun = int(nrun)
# Get an initial array value
pi = post.get_vary_params()
statevars.ndim = pi.size
if nwalkers < 2 * statevars.ndim:
print("WARNING: Number of walkers is less than 2 times number of free parameters. " +
"Adjusting number of walkers to {}".format(2 * statevars.ndim))
statevars.nwalkers = 2 * statevars.ndim
if proceed:
if len(h5p.keys()) != (3 * statevars.ensembles + 6) or h5p['0_chain'].shape[2] != statevars.ndim \
or h5p['0_chain'].shape[1] != statevars.nwalkers:
raise ValueError('nensembles, nwalkers, and the number of ' +
'parameters must be equal to those from previous run.')
# set up perturbation size
pscales = []
for par in post.list_vary_params():
val = post.params[par].value
if post.params[par].mcmcscale is None:
if par.startswith('per'):
pscale = np.abs(val * 1e-5*np.log10(val))
elif par.startswith('logper'):
pscale = np.abs(1e-5 * val)
elif par.startswith('tc'):
pscale = 0.1
else:
pscale = np.abs(0.10 * val)
post.params[par].mcmc_scale = pscale
else:
pscale = post.params[par].mcmcscale
pscales.append(pscale)
pscales = np.array(pscales)
statevars.samplers = []
statevars.samples = []
statevars.initial_positions = []
for e in range(ensembles):
pi = post.get_vary_params()
p0 = np.vstack([pi]*statevars.nwalkers)
p0 += [np.random.rand(statevars.ndim)*pscales for i in range(statevars.nwalkers)]
if not proceed:
statevars.initial_positions.append(p0)
else:
statevars.initial_positions.append(statevars.prechains[i][-1, :, :])
statevars.samplers.append(emcee.EnsembleSampler(statevars.nwalkers, statevars.ndim, post.logprob_array,
threads=1))
if proceed:
for i, sampler in enumerate(statevars.samplers):
sampler.backend.grow(statevars.prechains[i].shape[0], None)
sampler.backend.chain = statevars.prechains[i]
sampler.backend.log_prob = statevars.prelog_probs[i]
sampler.backend.accepted = statevars.preaccepted[i]
sampler.backend.iteration = statevars.prechains[i].shape[0]
num_run = int(np.round(nrun / (checkinterval -1)))
statevars.totsteps = nrun*statevars.nwalkers*statevars.ensembles
statevars.mixcount = 0
statevars.ismixed = 0
if proceed and statevars.preburned != 0:
statevars.burn_complete = True
statevars.nburn = statevars.preburned
else:
statevars.burn_complete = False
statevars.nburn = 0
statevars.ncomplete = statevars.nburn
statevars.pcomplete = 0
statevars.rate = 0
statevars.ar = 0
statevars.minAfactor = -1
statevars.maxArchange = np.inf
statevars.mintz = -1
statevars.maxgr = np.inf
statevars.t0 = time.time()
for r in range(num_run):
t1 = time.time()
mcmc_input_array = []
for i, sampler in enumerate(statevars.samplers):
if sampler.iteration <= 1 or statevars.proceed_started == 0:
p1 = statevars.initial_positions[i]
statevars.proceed_started = 1
else:
p1 = sampler.get_last_sample()
for sample in sampler.sample(p1, store=True):
mcmc_input = (sampler, p1, (checkinterval - 1))
mcmc_input_array.append(mcmc_input)
if serial:
statevars.samplers = []
for i in range(ensembles):
result = _domcmc(mcmc_input_array[i])
statevars.samplers.append(result)
else:
pool = mp.Pool(statevars.ensembles)
statevars.samplers = pool.map(_domcmc, mcmc_input_array)
pool.close() # terminates worker processes once all work is done
pool.join() # waits for all processes to finish before proceeding
t2 = time.time()
statevars.interval = t2 - t1
convergence_check(minAfactor=minAfactor, maxArchange=maxArchange, maxGR=maxGR, minTz=minTz,
minsteps=minsteps, minpercent=minpercent)
if save:
for i, sampler in enumerate(statevars.samplers):
str_chain = str(i) + '_chain'
str_log_prob = str(i) + '_log_prob'
str_accepted = str(i) + '_accepted'
if str_chain in h5f.keys():
del h5f[str_chain]
if str_log_prob in h5f.keys():
del h5f[str_log_prob]
if str_accepted in h5f.keys():
del h5f[str_accepted]
if 'crit' in h5f.keys():
del h5f['crit']
if 'autosample' in h5f.keys():
del h5f['autosample']
if 'automin' in h5f.keys():
del h5f['automin']
if 'automean' in h5f.keys():
del h5f['automean']
if 'automax' in h5f.keys():
del h5f['automax']
if 'burned' in h5f.keys():
del h5f['burned']
h5f.create_dataset(str_chain, data=sampler.get_chain())
h5f.create_dataset(str_log_prob, data=sampler.get_log_prob())
h5f.create_dataset(str_accepted, data=sampler.backend.accepted)
h5f.create_dataset('crit', data=[statevars.minafactor, statevars.maxarchange, statevars.mintz,
statevars.maxgr])
h5f.create_dataset('autosample', data=statevars.autosamples)
h5f.create_dataset('automin', data=statevars.automin)
h5f.create_dataset('automean', data=statevars.automean)
h5f.create_dataset('automax', data=statevars.automax)
if statevars.burn_complete==True:
h5f.create_dataset('burned', data=[statevars.nburn])
else:
h5f.create_dataset('burned', data=[0])
# Burn-in complete after maximum G-R statistic first reaches burnGR or minAfactor reaches burnAfactor
# reset samplers
if not statevars.burn_complete and (statevars.maxgr <= burnGR or burnAfactor <= statevars.minafactor):
for i, sampler in enumerate(statevars.samplers):
statevars.initial_positions[i] = sampler.get_last_sample()
sampler.reset()
statevars.samplers[i] = sampler
msg = (
"\nDiscarding burn-in now that the chains are marginally "
"well-mixed\n"
)
print(msg)
statevars.nburn = statevars.ncomplete
statevars.burn_complete = True
if statevars.mixcount >= 2:
tf = time.time()
tdiff = tf - statevars.t0
tdiff,units = utils.time_print(tdiff)
msg = (
"\nChains are well-mixed after {:d} steps! MCMC completed in "
"{:3.1f} {:s}"
).format(statevars.ncomplete, tdiff, units)
_closescr()
print(msg)
break
print("\n")
if statevars.ismixed and statevars.mixcount < 2:
msg = (
"MCMC: WARNING: chains did not pass 2 consecutive convergence "
"tests. They may be marginally well=mixed."
)
_closescr()
print(msg)
elif not statevars.ismixed:
msg = (
"MCMC: WARNING: chains did not pass convergence tests. They are "
"likely not well-mixed."
)
_closescr()
print(msg)
preshaped = np.dstack(statevars.chains)
df = pd.DataFrame(
preshaped.reshape(preshaped.shape[0], preshaped.shape[1]*preshaped.shape[2]).transpose(),
columns=post.list_vary_params())
df['lnprobability'] = np.hstack(statevars.lnprob)
df = df.iloc[::thin]
statevars.factor = [minAfactor] * len(statevars.autosamples)
return df
except KeyboardInterrupt:
curses.endwin()
def convergence_calculate(chains, oldautocorrelation, minAfactor, maxArchange, minTz, maxGR):
"""Calculate Convergence Criterion
Calculates the Gelman-Rubin statistic, autocorrelation time factor,
relative change in autocorrellation time, and the number of
independent draws for each parameter, as defined by Ford et
al. (2006) (http://adsabs.harvard.edu/abs/2006ApJ...642..505F).
The chain is considered well-mixed if all parameters have a
Gelman-Rubin statistic of <= 1.03, the min autocorrelation time factor >= 75,
a max relative change in autocorrelation time <= .01, and >= 1000 independent draws.
Args:
chains (array): A 3 dimensional array of parameter values
oldautocorrelation (float): previously calculated autocorrelation time
minAfactor (float): minimum autocorrelation
time factor to consider well-mixed
maxArchange (float): maximum relative change in
autocorrelation time to consider well-mixed
minTz (int): minimum Tz to consider well-mixed
maxGR (float): maximum Gelman-Rubin statistic to
consider well-mixed
Returns:
tuple: tuple containing:
ismixed (bool):
Are the chains well-mixed?
afactor (array):
A matrix containing the
autocorrelation time factor for each parameter and ensemble combination
archange (matrix):
A matrix containing the relative
change in the autocorrelation time factor for each parameter and ensemble combination
autocorrelation (matrix):
A matrix containing the autocorrelation time for each parameter and ensemble combination
gelmanrubin (array):
An NPARS element array containing the
Gelman-Rubin statistic for each parameter (equation
25)
Tz (array):
An NPARS element array containing the number
of independent draws for each parameter (equation 26)
History:
2010/03/01:
Written: <NAME> - The Ohio State University
2012/10/08:
Ported to Python by <NAME> - University of Hawaii,
Institute for Astronomy
2016/04/20:
Adapted for use in RadVel. Removed "angular" parameter.
2019/10/24:
Adapted to calculate and consider autocorrelation times
"""
gr_chains = chains.copy()
for i in range(0,len(chains)):
gr_chains[i] = gr_chains[i].reshape(gr_chains[i].shape[0], gr_chains[i].shape[1]*gr_chains[i].shape[2])
pars = np.dstack(gr_chains)
sz = pars.shape
msg = 'MCMC: GELMAN_RUBIN: ERROR: pars must have 3 dimensions'
assert pars.ndim == 3, msg
npars = float(sz[0])
nsteps = float(sz[1])
nchains = float(sz[2])
msg = 'MCMC: GELMAN_RUBIN: ERROR: NSTEPS must be greater than 1'
assert nsteps > 1, msg
# Equation 21: W(z) in Ford 2006
variances = np.var(pars,axis=1, dtype=np.float64)
meanofvariances = np.mean(variances,axis=1)
withinChainVariances = np.mean(variances, axis=1)
# Equation 23: B(z) in Ford 2006
means = np.mean(pars,axis=1)
betweenChainVariances = np.var(means,axis=1, dtype=np.float64) * nsteps
varianceofmeans = np.var(means,axis=1, dtype=np.float64) / (nchains-1)
varEstimate = (
(1.0 - 1.0/nsteps) * withinChainVariances
+ 1.0 / nsteps * betweenChainVariances
)
bz = varianceofmeans * nsteps
# Equation 24: varhat+(z) in Ford 2006
varz = (nsteps-1.0)/bz + varianceofmeans
# Equation 25: Rhat(z) in Ford 2006
gelmanrubin = np.sqrt(varEstimate/withinChainVariances)
# Equation 26: T(z) in Ford 2006
vbz = varEstimate / bz
tz = nchains*nsteps*vbz[vbz < 1]
if tz.size == 0:
tz = [-1]
chains = np.dstack(chains)
chains = np.swapaxes(chains, 0, 2)
autocorrelation = emcee.autocorr.integrated_time(chains, tol = 0)
afactor = np.divide(chains.shape[0], autocorrelation)
archange = np.divide(np.abs(np.subtract(autocorrelation, oldautocorrelation)), oldautocorrelation)
# well-mixed criteria
ismixed = min(tz) > minTz and max(gelmanrubin) < maxGR and \
np.amin(afactor) > minAfactor and np.amax(archange) < maxArchange
return (ismixed, afactor, archange, autocorrelation, gelmanrubin, tz)
| [
"numpy.log10",
"numpy.sqrt",
"numpy.random.rand",
"numpy.hstack",
"curses.endwin",
"emcee.EnsembleSampler",
"numpy.array",
"numpy.divide",
"numpy.mean",
"numpy.subtract",
"radvel.utils.time_print",
"numpy.vstack",
"sys.stdout.flush",
"numpy.round",
"numpy.abs",
"numpy.amin",
"h5py.Fi... | [((1783, 1805), 'sys.stdout.write', 'sys.stdout.write', (['msg1'], {}), '(msg1)\n', (1799, 1805), False, 'import sys\n'), ((1810, 1828), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1826, 1828), False, 'import sys\n'), ((1891, 1907), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (1905, 1907), False, 'import curses\n'), ((22995, 23015), 'numpy.dstack', 'np.dstack', (['gr_chains'], {}), '(gr_chains)\n', (23004, 23015), True, 'import numpy as np\n'), ((23365, 23403), 'numpy.var', 'np.var', (['pars'], {'axis': '(1)', 'dtype': 'np.float64'}), '(pars, axis=1, dtype=np.float64)\n', (23371, 23403), True, 'import numpy as np\n'), ((23425, 23451), 'numpy.mean', 'np.mean', (['variances'], {'axis': '(1)'}), '(variances, axis=1)\n', (23432, 23451), True, 'import numpy as np\n'), ((23478, 23504), 'numpy.mean', 'np.mean', (['variances'], {'axis': '(1)'}), '(variances, axis=1)\n', (23485, 23504), True, 'import numpy as np\n'), ((23555, 23576), 'numpy.mean', 'np.mean', (['pars'], {'axis': '(1)'}), '(pars, axis=1)\n', (23562, 23576), True, 'import numpy as np\n'), ((24033, 24076), 'numpy.sqrt', 'np.sqrt', (['(varEstimate / withinChainVariances)'], {}), '(varEstimate / withinChainVariances)\n', (24040, 24076), True, 'import numpy as np\n'), ((24230, 24247), 'numpy.dstack', 'np.dstack', (['chains'], {}), '(chains)\n', (24239, 24247), True, 'import numpy as np\n'), ((24261, 24286), 'numpy.swapaxes', 'np.swapaxes', (['chains', '(0)', '(2)'], {}), '(chains, 0, 2)\n', (24272, 24286), True, 'import numpy as np\n'), ((24310, 24355), 'emcee.autocorr.integrated_time', 'emcee.autocorr.integrated_time', (['chains'], {'tol': '(0)'}), '(chains, tol=0)\n', (24340, 24355), False, 'import emcee\n'), ((24373, 24416), 'numpy.divide', 'np.divide', (['chains.shape[0]', 'autocorrelation'], {}), '(chains.shape[0], autocorrelation)\n', (24382, 24416), True, 'import numpy as np\n'), ((1110, 1142), 'numpy.round', 'np.round', (['(width * (step / fltot))'], {}), '(width * (step / fltot))\n', (1118, 1142), True, 'import numpy as np\n'), ((5306, 5322), 'numpy.amin', 'np.amin', (['afactor'], {}), '(afactor)\n', (5313, 5322), True, 'import numpy as np\n'), ((5355, 5372), 'numpy.amax', 'np.amax', (['archange'], {}), '(archange)\n', (5362, 5372), True, 'import numpy as np\n'), ((12830, 12847), 'numpy.array', 'np.array', (['pscales'], {}), '(pscales)\n', (12838, 12847), True, 'import numpy as np\n'), ((14696, 14707), 'time.time', 'time.time', ([], {}), '()\n', (14705, 14707), False, 'import time\n'), ((19901, 19928), 'numpy.dstack', 'np.dstack', (['statevars.chains'], {}), '(statevars.chains)\n', (19910, 19928), True, 'import numpy as np\n'), ((20133, 20160), 'numpy.hstack', 'np.hstack', (['statevars.lnprob'], {}), '(statevars.lnprob)\n', (20142, 20160), True, 'import numpy as np\n'), ((23604, 23643), 'numpy.var', 'np.var', (['means'], {'axis': '(1)', 'dtype': 'np.float64'}), '(means, axis=1, dtype=np.float64)\n', (23610, 23643), True, 'import numpy as np\n'), ((23674, 23713), 'numpy.var', 'np.var', (['means'], {'axis': '(1)', 'dtype': 'np.float64'}), '(means, axis=1, dtype=np.float64)\n', (23680, 23713), True, 'import numpy as np\n'), ((968, 983), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (981, 983), False, 'import curses\n'), ((8992, 9016), 'h5py.File', 'h5py.File', (['savename', '"""a"""'], {}), "(savename, 'a')\n", (9001, 9016), False, 'import h5py\n'), ((13045, 13081), 'numpy.vstack', 'np.vstack', (['([pi] * statevars.nwalkers)'], {}), '([pi] * statevars.nwalkers)\n', (13054, 13081), True, 'import numpy as np\n'), ((14005, 14041), 'numpy.round', 'np.round', (['(nrun / (checkinterval - 1))'], {}), '(nrun / (checkinterval - 1))\n', (14013, 14041), True, 'import numpy as np\n'), ((14759, 14770), 'time.time', 'time.time', ([], {}), '()\n', (14768, 14770), False, 'import time\n'), ((15854, 15865), 'time.time', 'time.time', ([], {}), '()\n', (15863, 15865), False, 'import time\n'), ((20318, 20333), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (20331, 20333), False, 'import curses\n'), ((24450, 24498), 'numpy.subtract', 'np.subtract', (['autocorrelation', 'oldautocorrelation'], {}), '(autocorrelation, oldautocorrelation)\n', (24461, 24498), True, 'import numpy as np\n'), ((24627, 24643), 'numpy.amin', 'np.amin', (['afactor'], {}), '(afactor)\n', (24634, 24643), True, 'import numpy as np\n'), ((24661, 24678), 'numpy.amax', 'np.amax', (['archange'], {}), '(archange)\n', (24668, 24678), True, 'import numpy as np\n'), ((5569, 5591), 'numpy.mean', 'np.mean', (['statevars.oac'], {}), '(statevars.oac)\n', (5576, 5591), True, 'import numpy as np\n'), ((5630, 5652), 'numpy.amin', 'np.amin', (['statevars.oac'], {}), '(statevars.oac)\n', (5637, 5652), True, 'import numpy as np\n'), ((5691, 5713), 'numpy.amax', 'np.amax', (['statevars.oac'], {}), '(statevars.oac)\n', (5698, 5713), True, 'import numpy as np\n'), ((9198, 9222), 'h5py.File', 'h5py.File', (['savename', '"""r"""'], {}), "(savename, 'r')\n", (9207, 9222), False, 'import h5py\n'), ((13398, 13491), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['statevars.nwalkers', 'statevars.ndim', 'post.logprob_array'], {'threads': '(1)'}), '(statevars.nwalkers, statevars.ndim, post.\n logprob_array, threads=1)\n', (13419, 13491), False, 'import emcee\n'), ((15568, 15596), 'multiprocessing.Pool', 'mp.Pool', (['statevars.ensembles'], {}), '(statevars.ensembles)\n', (15575, 15596), True, 'import multiprocessing as mp\n'), ((18954, 18965), 'time.time', 'time.time', ([], {}), '()\n', (18963, 18965), False, 'import time\n'), ((19038, 19061), 'radvel.utils.time_print', 'utils.time_print', (['tdiff'], {}), '(tdiff)\n', (19054, 19061), False, 'from radvel import utils\n'), ((13099, 13129), 'numpy.random.rand', 'np.random.rand', (['statevars.ndim'], {}), '(statevars.ndim)\n', (13113, 13129), True, 'import numpy as np\n'), ((12489, 12508), 'numpy.abs', 'np.abs', (['(1e-05 * val)'], {}), '(1e-05 * val)\n', (12495, 12508), True, 'import numpy as np\n'), ((12398, 12411), 'numpy.log10', 'np.log10', (['val'], {}), '(val)\n', (12406, 12411), True, 'import numpy as np\n'), ((12635, 12652), 'numpy.abs', 'np.abs', (['(0.1 * val)'], {}), '(0.1 * val)\n', (12641, 12652), True, 'import numpy as np\n')] |
import string
import re
import numpy as np
import pandas as pd
import json
import pickle
import nltk
from nltk.stem import PorterStemmer
from nltk.stem.lancaster import LancasterStemmer
from nltk.tag import pos_tag
from nltk.tag import str2tuple
from nltk.chunk import ne_chunk
import spacy
from spacy import displacy
from gensim import corpora, models, similarities, matutils
from gensim.models.keyedvectors import KeyedVectors
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve,f1_score, fbeta_score
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
import tensorflow
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, Input, load_model
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, concatenate
import keras.backend.tensorflow_backend as tb
tb._SYMBOLIC_SCOPE.value = True
from flask import Flask,url_for,render_template,request
from flaskext.markdown import Markdown
HTML_WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem">{}</div>"""
'''Load word to word encoding crosswalks'''
for file in ['word2idx','idx2word']:
with open(f"{file}.pickle", "rb") as pfile:
exec(f"{file} = pickle.load(pfile)")
'''Load created embeddings matrix'''
with open("bio_embeddings.pickle", "rb") as pfile:
bio_embbeding_matrix = pickle.load(pfile)
'''Get pretrained BiLSTM modelmodel'''
global model
model = tensorflow.keras.models.load_model('hospitalnotes_model.h5')
app = Flask(__name__)
Markdown(app)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/extract',methods=['GET','POST'])
def extract():
'''Function to intake raw hospital note text, process it and output HTML tagged text with problems, treatments and tests'''
'''Define basic string functions to perform through processing'''
stemmer=PorterStemmer()
spaces = re.compile(r'\ +')
stemfunc = lambda x: stemmer.stem(x).lower()
puncfix = lambda x: x.replace("’","").replace(":"," : ").replace("“"," “ ").replace("?”"," ? ” ").replace("?"," ? ").replace("!"," ! ").replace("."," . ").replace(","," , ")
'''Tokenize raw text'''
raw_text = request.form['rawtext']
note_tokens_func = raw_text.split('\n')
all_note_tokens_func = [re.split('\ +', puncfix(ln)) for ln in note_tokens_func]
'''Turn tokens into sequences of tuples - word and its part of speech'''
data_func= []
data_stem_func = []
for i, line in enumerate(all_note_tokens_func):
ln = []
ln_stem = []
for j, word in enumerate(line):
if all_note_tokens_func[i][j]=='':
all_note_tokens_func[i][j]=' '
ln.append((all_note_tokens_func[i][j], pos_tag([all_note_tokens_func[i][j]])[0][1]))
ln_stem.append((stemfunc(all_note_tokens_func[i][j]), pos_tag([all_note_tokens_func[i][j]])[0][1]))
'''Retain both stemmed and full versions of each word token'''
data_func.append(ln)
data_stem_func.append(ln_stem)
'''Create index number encoded copy of the tokenized document'''
document_idx = []
for s in data_stem_func:
line_idx=[]
for w in s:
try:
line_idx.append(word2idx[w[0]] )
except:
line_idx.append(word2idx['_padding'] )
document_idx.append(line_idx)
'''Turn document line sequences into uniform 60 token padded sequences'''
document_idx_eval = pad_sequences(maxlen=60, sequences=document_idx, padding="post",value=11555 - 2)
document_idx_df=pd.DataFrame(document_idx).reset_index()
document_idx_eval = np.array(document_idx_eval)
'''Use BiLSTM model to recognize the medical concepts in the uniform sequences'''
res_func_sentence=[]
for i, doc in enumerate((document_idx_eval)):
line_res=[]
p = model.predict(np.array([document_idx_eval[i]]))
p_arg = np.argmax(p, axis=-1)
for j, word in enumerate(document_idx_eval[i]):
nn=[]
nn.append(idx2word[word])
nn.append(p_arg[0][j])
line_res.append(nn)
res_func_sentence.append(line_res)
'''Prepare predictions for display purposes - first, create the tokenized list of original words'''
f_display_text = []
for i in list(document_idx_df['index']):
words_only=[]
for item in (data_func[i][0:60]):
words_only.append(item[0])
f_display_text.append(words_only)
'''Turn predictions into simple problem, treatment and test'''
idx2tag_simple = {0: 'problem', 1: 'test', 2: 'treatment', 3: 'problem', 4: 'test'
, 5: 'treatment', 6: 'outside'}
'''Initialize sentence and entity lists that will be used in the HTML tagging'''
f_test_set_translate=[]
f_tags_translate=[]
f_test_spans = []
f_full_sents = []
f_entities = []
'''Specify the character by character position of predicted tags'''
for i, line in enumerate(document_idx_eval):
line_translate = []
line_tags = []
sent_pos = 0
s = []
for j, word in enumerate(f_display_text[i]):
if word!='_padding':
line_translate.append(word)
line_tags.append(idx2tag_simple[res_func_sentence[i][j][1]])
length = len(word)
if j == 0:
s.append((0,0+length))
sent_pos+=length
else:
s.append((sent_pos+1,sent_pos+1+length))
sent_pos+=length+1
f_test_set_translate.append(line_translate)
f_full_sents.append(' '.join(f_display_text[i]))
f_tags_translate.append(line_tags)
f_test_spans.append(s)
'''Create a structure linking the predicted entities to the specific sentence character spans'''
f_ents=[]
for i, line in enumerate(f_test_spans):
sent_ents = []
for j, span in enumerate(line):
if f_tags_translate[i][j] !='outside':
sent_ents.append({'start': span[0], 'end':span[1], 'label': f_tags_translate[i][j]})
f_ents.append(sent_ents)
'''Final linkage of sentences to entity character spans'''
f_spacy_sentences=[]
for i, sent in enumerate(f_full_sents):
newsent=[{'text':sent, 'ents':f_ents[i],'title':None}]
f_spacy_sentences.append(newsent)
'''Use spaCy display html tagged sentences along entity character spans'''
html_text=''
for sentence in f_spacy_sentences:
html_text+=displacy.render(sentence, style='ent', manual=True, options= {'colors': {
"PROBLEM": "#7aecec",
"TREATMENT": "#bfeeb7",
"TEST": "#aa9cfc"
}})
html_text = html_text.replace("\n\n","\n")
result = HTML_WRAPPER.format(html_text)
return render_template('result.html',rawtext=raw_text,result=result)
if __name__ == '__main__':
app.run(debug=True, threaded=False) | [
"flask.render_template",
"flask.Flask",
"re.compile",
"nltk.tag.pos_tag",
"pickle.load",
"nltk.stem.PorterStemmer",
"numpy.argmax",
"spacy.displacy.render",
"numpy.array",
"tensorflow.keras.models.load_model",
"flaskext.markdown.Markdown",
"pandas.DataFrame",
"keras.preprocessing.sequence.pa... | [((1691, 1751), 'tensorflow.keras.models.load_model', 'tensorflow.keras.models.load_model', (['"""hospitalnotes_model.h5"""'], {}), "('hospitalnotes_model.h5')\n", (1725, 1751), False, 'import tensorflow\n'), ((1763, 1778), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1768, 1778), False, 'from flask import Flask, url_for, render_template, request\n'), ((1779, 1792), 'flaskext.markdown.Markdown', 'Markdown', (['app'], {}), '(app)\n', (1787, 1792), False, 'from flaskext.markdown import Markdown\n'), ((1611, 1629), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (1622, 1629), False, 'import pickle\n'), ((1833, 1861), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (1848, 1861), False, 'from flask import Flask, url_for, render_template, request\n'), ((2143, 2158), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (2156, 2158), False, 'from nltk.stem import PorterStemmer\n'), ((2172, 2190), 're.compile', 're.compile', (['"""\\\\ +"""'], {}), "('\\\\ +')\n", (2182, 2190), False, 'import re\n'), ((3769, 3855), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', ([], {'maxlen': '(60)', 'sequences': 'document_idx', 'padding': '"""post"""', 'value': '(11555 - 2)'}), "(maxlen=60, sequences=document_idx, padding='post', value=\n 11555 - 2)\n", (3782, 3855), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3935, 3962), 'numpy.array', 'np.array', (['document_idx_eval'], {}), '(document_idx_eval)\n', (3943, 3962), True, 'import numpy as np\n'), ((7470, 7533), 'flask.render_template', 'render_template', (['"""result.html"""'], {'rawtext': 'raw_text', 'result': 'result'}), "('result.html', rawtext=raw_text, result=result)\n", (7485, 7533), False, 'from flask import Flask, url_for, render_template, request\n'), ((4239, 4260), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(-1)'}), '(p, axis=-1)\n', (4248, 4260), True, 'import numpy as np\n'), ((6901, 7044), 'spacy.displacy.render', 'displacy.render', (['sentence'], {'style': '"""ent"""', 'manual': '(True)', 'options': "{'colors': {'PROBLEM': '#7aecec', 'TREATMENT': '#bfeeb7', 'TEST': '#aa9cfc'}}"}), "(sentence, style='ent', manual=True, options={'colors': {\n 'PROBLEM': '#7aecec', 'TREATMENT': '#bfeeb7', 'TEST': '#aa9cfc'}})\n", (6916, 7044), False, 'from spacy import displacy\n'), ((3870, 3896), 'pandas.DataFrame', 'pd.DataFrame', (['document_idx'], {}), '(document_idx)\n', (3882, 3896), True, 'import pandas as pd\n'), ((4188, 4220), 'numpy.array', 'np.array', (['[document_idx_eval[i]]'], {}), '([document_idx_eval[i]])\n', (4196, 4220), True, 'import numpy as np\n'), ((3013, 3050), 'nltk.tag.pos_tag', 'pos_tag', (['[all_note_tokens_func[i][j]]'], {}), '([all_note_tokens_func[i][j]])\n', (3020, 3050), False, 'from nltk.tag import pos_tag\n'), ((3125, 3162), 'nltk.tag.pos_tag', 'pos_tag', (['[all_note_tokens_func[i][j]]'], {}), '([all_note_tokens_func[i][j]])\n', (3132, 3162), False, 'from nltk.tag import pos_tag\n')] |
import logging
import math
from typing import Dict
import numpy as np
from pyquaternion import Quaternion
from paralleldomain.utilities.transformation import Transformation
logger = logging.getLogger(__name__)
class CoordinateSystem:
_axis_char_map: Dict[str, np.ndarray] = dict(
**{character: axis for character, axis in zip("FLU", np.identity(3))},
**{character: axis for character, axis in zip("BRD", -np.identity(3))},
)
def __init__(self, axis_directions: str):
self.axis_directions = axis_directions
self._base_matrix = self._create_base_matrix(axis_directions=axis_directions)
@staticmethod
def _create_base_matrix(axis_directions: str) -> np.ndarray:
if len(axis_directions) != 3:
raise ValueError("The axis string needs to have exactly 3 orthogonal values from {RUBLDF}!")
axis_directions = axis_directions.upper()
base_change = np.identity(4)
for i, direction in enumerate(axis_directions):
base_change[:3, i] = CoordinateSystem._axis_char_map[direction]
if np.linalg.det(base_change) == 0.0:
raise ValueError(f"{axis_directions} is not a valid coordinate system!")
return base_change
def __gt__(self, other: "CoordinateSystem") -> Transformation:
return Transformation.from_transformation_matrix(mat=(other._base_matrix.transpose() @ self._base_matrix))
def __lt__(self, other: "CoordinateSystem") -> Transformation:
return other > self
@staticmethod
def get_base_change_from_to(from_axis_directions: str, to_axis_directions: str) -> Transformation:
return CoordinateSystem(from_axis_directions) > CoordinateSystem(to_axis_directions)
@staticmethod
def print_convention():
logger.info(f"Front axis: {CoordinateSystem._axis_char_map['F']}")
logger.info(f"Left axis: {CoordinateSystem._axis_char_map['L']}")
logger.info(f"Up axis: {CoordinateSystem._axis_char_map['U']}")
def quaternion_from_rpy(self, roll: float, pitch: float, yaw: float, degrees: bool = False, order: str = "xyz"):
transform = CoordinateSystem("FLU") > self
front = transform.rotation @ CoordinateSystem._axis_char_map["F"].reshape((3, 1))
left = transform.rotation @ CoordinateSystem._axis_char_map["L"].reshape((3, 1))
up = transform.rotation @ CoordinateSystem._axis_char_map["U"].reshape((3, 1))
rotations = {
"x": Quaternion(axis=front, radians=roll if not degrees else math.radians(roll)),
"y": Quaternion(axis=left, radians=pitch if not degrees else math.radians(pitch)),
"z": Quaternion(axis=up, radians=yaw if not degrees else math.radians(yaw)),
}
q = Quaternion()
for rot in order:
q = q * rotations[rot]
return q
INTERNAL_COORDINATE_SYSTEM = CoordinateSystem("FLU")
| [
"logging.getLogger",
"numpy.identity",
"math.radians",
"numpy.linalg.det",
"pyquaternion.Quaternion"
] | [((185, 212), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (202, 212), False, 'import logging\n'), ((933, 947), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (944, 947), True, 'import numpy as np\n'), ((2759, 2771), 'pyquaternion.Quaternion', 'Quaternion', ([], {}), '()\n', (2769, 2771), False, 'from pyquaternion import Quaternion\n'), ((1091, 1117), 'numpy.linalg.det', 'np.linalg.det', (['base_change'], {}), '(base_change)\n', (1104, 1117), True, 'import numpy as np\n'), ((350, 364), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (361, 364), True, 'import numpy as np\n'), ((2532, 2550), 'math.radians', 'math.radians', (['roll'], {}), '(roll)\n', (2544, 2550), False, 'import math\n'), ((2626, 2645), 'math.radians', 'math.radians', (['pitch'], {}), '(pitch)\n', (2638, 2645), False, 'import math\n'), ((2717, 2734), 'math.radians', 'math.radians', (['yaw'], {}), '(yaw)\n', (2729, 2734), False, 'import math\n'), ((430, 444), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (441, 444), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
import math
def deskew():
im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M), (orig_image.shape[1], orig_image.shape[0]))
plt.imshow(im_out, 'gray')
plt.show()
orig_image = cv2.imread(r'../images/example4/1.jpg', 0)
skewed_image = cv2.imread(r'../images/example4/2.jpg', 0)
surf = cv2.xfeatures2d.SURF_create(400)
kp1, des1 = surf.detectAndCompute(orig_image, None)
kp2, des2 = surf.detectAndCompute(skewed_image, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good
]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
ss = M[0, 1]
sc = M[0, 0]
scaleRecovered = math.sqrt(ss * ss + sc * sc)
thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
print("Calculated scale difference: %.2f\nCalculated rotation difference: %.2f" % (scaleRecovered, thetaRecovered))
deskew()
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
| [
"matplotlib.pyplot.imshow",
"cv2.findHomography",
"math.sqrt",
"cv2.xfeatures2d.SURF_create",
"numpy.linalg.inv",
"cv2.FlannBasedMatcher",
"math.atan2",
"cv2.imread",
"numpy.float32",
"matplotlib.pyplot.show"
] | [((264, 305), 'cv2.imread', 'cv2.imread', (['"""../images/example4/1.jpg"""', '(0)'], {}), "('../images/example4/1.jpg', 0)\n", (274, 305), False, 'import cv2\n'), ((322, 363), 'cv2.imread', 'cv2.imread', (['"""../images/example4/2.jpg"""', '(0)'], {}), "('../images/example4/2.jpg', 0)\n", (332, 363), False, 'import cv2\n'), ((373, 405), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', (['(400)'], {}), '(400)\n', (400, 405), False, 'import cv2\n'), ((635, 685), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (656, 685), False, 'import cv2\n'), ((208, 234), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_out', '"""gray"""'], {}), "(im_out, 'gray')\n", (218, 234), True, 'from matplotlib import pyplot as plt\n'), ((239, 249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (247, 249), True, 'from matplotlib import pyplot as plt\n'), ((1157, 1210), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', '(5.0)'], {}), '(src_pts, dst_pts, cv2.RANSAC, 5.0)\n', (1175, 1210), False, 'import cv2\n'), ((1403, 1431), 'math.sqrt', 'math.sqrt', (['(ss * ss + sc * sc)'], {}), '(ss * ss + sc * sc)\n', (1412, 1431), False, 'import math\n'), ((142, 158), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (155, 158), True, 'import numpy as np\n'), ((944, 990), 'numpy.float32', 'np.float32', (['[kp1[m.queryIdx].pt for m in good]'], {}), '([kp1[m.queryIdx].pt for m in good])\n', (954, 990), True, 'import numpy as np\n'), ((1050, 1096), 'numpy.float32', 'np.float32', (['[kp2[m.trainIdx].pt for m in good]'], {}), '([kp2[m.trainIdx].pt for m in good])\n', (1060, 1096), True, 'import numpy as np\n'), ((1453, 1471), 'math.atan2', 'math.atan2', (['ss', 'sc'], {}), '(ss, sc)\n', (1463, 1471), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""EIP Phase 2 Final Project
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1qqGmnSn-lZOg81BgjtMmTjf9NwH35fkf
"""
import time
from glob import glob
import PIL
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Input, Model
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, \
ZeroPadding2D, LeakyReLU
from tensorflow.keras.optimizers import Adam
from imageio import imresize, imread
import os
def get_image_list(path='./images', young_lower=18, young_upper=29, old_lower=30, old_upper=85):
images = os.listdir(path)
ages = {
'young': [],
'old': []
}
for image in images:
age = int(image.split('_')[0])
if young_lower <= age <= young_upper:
ages['young'].append(image)
if old_upper >= age >= old_lower:
ages['old'].append(image)
return ages
image_dataset = get_image_list()
from sklearn.utils import shuffle
def generator(gen_func, *args):
while True:
yield gen_func(*args)
def load_images(shuffle_data=True):
X1 = []
X2 = []
x = 128
images = get_image_list()['young']
for img in images:
image = cv2.imread('images/' + img)
image = cv2.resize(image, (x, x))
X1.append(image)
images = get_image_list()['old']
for img in images:
image = cv2.imread('images/' + img)
image = cv2.resize(image, (x, x))
X2.append(image)
X1 = np.stack(X1).astype(np.float32)
X2 = np.stack(X2).astype(np.float32)
# if shuffle_data:
# X1, X2 = shuffle(X1, X2)
return (X1.astype(np.float32) / 127.5) - 1.0, (X2.astype(np.float32) / 127.5) - 1.0
# def load_test_batch(data_dir, batch_size=2, shuffle_data=True):
# X1 = []
# X2 = []
# size = 128
# images = get_image_list()['young']
# to_create = images[:batch_size]
# for img in to_create:
# image = cv2.imread('images/' + img)
# image = cv2.resize(image, (size, size))
# X1.append(image)
# images = get_image_list()['old']
# to_create = images[:batch_size]
# for img in to_create:
# image = cv2.imread('images/' + img)
# image = cv2.resize(image, (size, size))
# X2.append(image)
# X1 = np.stack(X1).astype(np.float32)
# X2 = np.stack(X2).astype(np.float32)
# if shuffle_data:
# X1, X2 = shuffle(X1, X2)
# return (X1.astype(np.float32) / 127.5) - 1., (X2.astype(np.float32) / 127.5) - 1.
class InstanceNormalization(tf.keras.layers.Layer):
def __init__(self, axis=1, epsilon=1e-5):
super(InstanceNormalization, self).__init__()
self.epsilon = epsilon
self.axis = axis
def build(self, shape):
self.scale = self.add_weight(name='scale', shape=shape[-1:], initializer=tf.keras.initializers.RandomNormal(0.0, 0.002), trainable=True)
self.offset = self.add_weight(name='offset', shape=shape[-1:], initializer='zeros', trainable=True)
def call(self, inputs):
mean, variance = tf.nn.moments(inputs, axes=[1], keepdims=True)
inv = tf.math.rsqrt(variance + self.epsilon)
x = (inputs - mean) * inv
return self.scale * x + self.offset
def residual_block(x):
"""
Residual block
"""
res = Conv2D(filters=128, kernel_size=3, strides=1, padding="same")(x)
res = BatchNormalization(axis=3, momentum=0.9, epsilon=1e-5)(res)
res = Activation('relu')(res)
res = Conv2D(filters=128, kernel_size=3, strides=1, padding="same")(res)
res = BatchNormalization(axis=3, momentum=0.9, epsilon=1e-5)(res)
return Add()([res, x])
def build_generator():
"""
Create a generator network using the hyperparameter values defined below
"""
input_shape = (128, 128, 3)
residual_blocks = 6
input_layer = Input(shape=input_shape)
# First Convolution block
x = Conv2D(filters=32, kernel_size=7, strides=1, padding="same")(input_layer)
x = InstanceNormalization(axis=1)(x)
x = Activation("relu")(x)
# 2nd Convolution block
x = Conv2D(filters=64, kernel_size=3, strides=2, padding="same")(x)
x = InstanceNormalization(axis=1)(x)
x = Activation("relu")(x)
# 3rd Convolution block
x = Conv2D(filters=128, kernel_size=3, strides=2, padding="same")(x)
x = InstanceNormalization(axis=1)(x)
x = Activation("relu")(x)
# Residual blocks
for _ in range(residual_blocks):
x = residual_block(x)
# Upsampling blocks
# 1st Upsampling block
x = Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding='same', use_bias=False)(x)
x = InstanceNormalization(axis=1)(x)
x = Activation("relu")(x)
# 2nd Upsampling block
x = Conv2DTranspose(filters=32, kernel_size=3, strides=2, padding='same', use_bias=False)(x)
x = InstanceNormalization(axis=1)(x)
x = Activation("relu")(x)
# Last Convolution layer
x = Conv2D(filters=3, kernel_size=7, strides=1, padding="same")(x)
output = Activation('tanh')(x)
model = Model(inputs=[input_layer], outputs=[output])
return model
def build_discriminator():
"""
Create a discriminator network using the hyperparameter values defined below
"""
input_shape = (128, 128, 3)
hidden_layers = 3
input_layer = Input(shape=input_shape)
x = ZeroPadding2D(padding=(1, 1))(input_layer)
# 1st Convolutional block
x = Conv2D(filters=64, kernel_size=4, strides=2, padding="valid")(x)
x = LeakyReLU(alpha=0.2)(x)
x = ZeroPadding2D(padding=(1, 1))(x)
# 3 Hidden Convolution blocks
for i in range(1, hidden_layers + 1):
x = Conv2D(filters=2 ** i * 64, kernel_size=4, strides=2, padding="valid")(x)
x = InstanceNormalization(axis=1)(x)
x = LeakyReLU(alpha=0.2)(x)
x = ZeroPadding2D(padding=(1, 1))(x)
# Last Convolution layer
output = Conv2D(filters=1, kernel_size=4, strides=1, activation="sigmoid")(x)
model = Model(inputs=[input_layer], outputs=[output])
return model
# def load_images(data_dir):
# imagesA = glob(data_dir + '/testA/*.*')
# imagesB = glob(data_dir + '/testB/*.*')
# allImagesA = []
# allImagesB = []
# for index, filename in enumerate(imagesA):
# imgA = imread(filename, mode='RGB')
# imgB = imread(imagesB[index], mode='RGB')
# imgA = imresize(imgA, (128, 128))
# imgB = imresize(imgB, (128, 128))
# if np.random.random() > 0.5:
# imgA = np.fliplr(imgA)
# imgB = np.fliplr(imgB)
# allImagesA.append(imgA)
# allImagesB.append(imgB)
# # Normalize images
# allImagesA = np.array(allImagesA) / 127.5 - 1.
# allImagesB = np.array(allImagesB) / 127.5 - 1.
# return allImagesA, allImagesB
def load_test_batch(data_dir, batch_size=5):
imagesA = glob(data_dir)
imagesB = glob(data_dir)
imagesA = np.random.choice(imagesA, batch_size)
imagesB = np.random.choice(imagesB, batch_size)
allA = []
allB = []
for i in range(len(imagesA)):
# Load images and resize images
imgA = imresize(imread(imagesA[i], mode='RGB').astype(np.float32), (128, 128))
imgB = imresize(imread(imagesB[i], mode='RGB').astype(np.float32), (128, 128))
allA.append(imgA)
allB.append(imgB)
return np.array(allA) / 127.5 - 1.0, np.array(allB) / 127.5 - 1.0
def save_images(originalA, generatedB, recosntructedA, originalB, generatedA, reconstructedB, path):
"""
Save images
"""
fig = plt.figure()
ax = fig.add_subplot(2, 3, 1)
ax.imshow(originalA)
ax.axis("off")
ax.set_title("Original")
ax = fig.add_subplot(2, 3, 2)
ax.imshow(generatedB)
ax.axis("off")
ax.set_title("Generated")
ax = fig.add_subplot(2, 3, 3)
ax.imshow(recosntructedA)
ax.axis("off")
ax.set_title("Reconstructed")
ax = fig.add_subplot(2, 3, 4)
ax.imshow(originalB)
ax.axis("off")
ax.set_title("Original")
ax = fig.add_subplot(2, 3, 5)
ax.imshow(generatedA)
ax.axis("off")
ax.set_title("Generated")
ax = fig.add_subplot(2, 3, 6)
ax.imshow(reconstructedB)
ax.axis("off")
ax.set_title("Reconstructed")
plt.savefig(path)
# def write_log(callback, name, loss, batch_no):
# """
# Write training summary to TensorBoard
# """
# summary = tf.Summary()
# summary_value = summary.value.add()
# summary_value.simple_value = loss
# summary_value.tag = name
# callback.writer.add_summary(summary, batch_no)
# callback.writer.flush()
data_dir = "./images"
batch_size = 1
epochs = 50
mode = 'predict'
if mode == 'train':
"""
Load dataset
"""
imagesA, imagesB = load_images()
# Define the common optimizer
common_optimizer = Adam(0.0002, 0.5)
# Build and compile generator networks
discriminatorA = build_discriminator()
discriminatorB = build_discriminator()
discriminatorA.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
discriminatorB.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build generator networks
generatorAToB = build_generator()
generatorBToA = build_generator()
"""
Create an adversarial network
"""
inputA = Input(shape=(128, 128, 3))
inputB = Input(shape=(128, 128, 3))
# Generated images using both of the generator networks
generatedB = generatorAToB(inputA)
generatedA = generatorBToA(inputB)
# Reconstruct images back to original images
reconstructedA = generatorBToA(generatedB)
reconstructedB = generatorAToB(generatedA)
generatedAId = generatorBToA(inputA)
generatedBId = generatorAToB(inputB)
# Make both of the discriminator networks non-trainable
discriminatorA.trainable = False
discriminatorB.trainable = False
probsA = discriminatorA(generatedA)
probsB = discriminatorB(generatedB)
adversarial_model = Model(inputs=[inputA, inputB],
outputs=[probsA, probsB, reconstructedA, reconstructedB,
generatedAId, generatedBId])
adversarial_model.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],
loss_weights=[1, 1, 10.0, 10.0, 1.0, 1.0],
optimizer=common_optimizer)
# tensorboard = TensorBoard(log_dir="logs/{}".format(time.time()), write_images=True, write_grads=True,
# write_graph=True)
# tensorboard.set_model(generatorAToB)
# tensorboard.set_model(generatorBToA)
# tensorboard.set_model(discriminatorA)
# tensorboard.set_model(discriminatorB)
real_labels = np.ones((batch_size, 7, 7, 1))
fake_labels = np.zeros((batch_size, 7, 7, 1))
for epoch in range(epochs):
print("Epoch:{}".format(epoch))
dis_losses = []
gen_losses = []
num_batches = int(min(imagesA.shape[0], imagesB.shape[0]) / batch_size)
print("Number of batches:{}".format(num_batches))
for index in range(num_batches):
#print("Batch:{}".format(index))
# Sample images
batchA = imagesA[index * batch_size:(index + 1) * batch_size]
batchB = imagesB[index * batch_size:(index + 1) * batch_size]
# Translate images to opposite domain
generatedB = generatorAToB.predict(batchA)
generatedA = generatorBToA.predict(batchB)
# Train the discriminator A on real and fake images
dALoss1 = discriminatorA.train_on_batch(batchA, real_labels)
dALoss2 = discriminatorA.train_on_batch(generatedA, fake_labels)
# Train the discriminator B on ral and fake images
dBLoss1 = discriminatorB.train_on_batch(batchB, real_labels)
dbLoss2 = discriminatorB.train_on_batch(generatedB, fake_labels)
# Calculate the total discriminator loss
d_loss = 0.5 * np.add(0.5 * np.add(dALoss1, dALoss2), 0.5 * np.add(dBLoss1, dbLoss2))
"""
Train the generator networks
"""
g_loss = adversarial_model.train_on_batch([batchA, batchB],
[real_labels, real_labels, batchA, batchB, batchA, batchB])
dis_losses.append(d_loss)
gen_losses.append(g_loss)
if index % 1000 == 0:
print("d_loss:{}".format(d_loss))
print("g_loss:{}".format(g_loss))
"""
Save losses to Tensorboard after each epoch
"""
# write_log(tensorboard, 'discriminator_loss', np.mean(dis_losses), epoch)
# write_log(tensorboard, 'generator_loss', np.mean(gen_losses), epoch)
# Sample and save images after every 10 epochs
if epoch % 10 == 0:
# Get a batch of test data
batchA, batchB = load_test_batch(batch_size=2)
# Generate images
generatedB = generatorAToB.predict(batchA)
generatedA = generatorBToA.predict(batchB)
# Get reconstructed images
reconsA = generatorBToA.predict(generatedB)
reconsB = generatorAToB.predict(generatedA)
# Save original, generated and reconstructed images
for i in range(len(generatedA)):
save_images(originalA=batchA[i], generatedB=generatedB[i], recosntructedA=reconsA[i],
originalB=batchB[i], generatedA=generatedA[i], reconstructedB=reconsB[i],
path="results/gen_{}_{}".format(epoch, i))
# Save models
generatorAToB.save_weights("generatorAToB.h5")
generatorBToA.save_weights("generatorBToA.h5")
discriminatorA.save_weights("discriminatorA.h5")
discriminatorB.save_weights("discriminatorB.h5")
elif mode == 'predict':
# Build generator networks
generatorAToB = build_generator()
generatorBToA = build_generator()
generatorAToB.load_weights("generatorAToB.h5")
generatorBToA.load_weights("generatorBToA.h5")
# Get a batch of test data
batchA, batchB = load_test_batch(data_dir=data_dir, batch_size=2)
# Save images
generatedB = generatorAToB.predict(batchA)
generatedA = generatorBToA.predict(batchB)
reconsA = generatorBToA.predict(generatedB)
reconsB = generatorAToB.predict(generatedA)
for i in range(len(generatedA)):
save_images(originalA=batchA[i], generatedB=generatedB[i], recosntructedA=reconsA[i],
originalB=batchB[i], generatedA=generatedA[i], reconstructedB=reconsB[i],
path="results/test_{}".format(i))
| [
"tensorflow.nn.moments",
"tensorflow.keras.layers.BatchNormalization",
"numpy.array",
"os.listdir",
"tensorflow.keras.layers.Conv2D",
"numpy.stack",
"glob.glob",
"matplotlib.pyplot.savefig",
"numpy.ones",
"tensorflow.keras.initializers.RandomNormal",
"numpy.add",
"numpy.random.choice",
"tens... | [((753, 769), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (763, 769), False, 'import os\n'), ((4083, 4107), 'tensorflow.keras.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4088, 4107), False, 'from tensorflow.keras import Input, Model\n'), ((5293, 5338), 'tensorflow.keras.Model', 'Model', ([], {'inputs': '[input_layer]', 'outputs': '[output]'}), '(inputs=[input_layer], outputs=[output])\n', (5298, 5338), False, 'from tensorflow.keras import Input, Model\n'), ((5555, 5579), 'tensorflow.keras.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (5560, 5579), False, 'from tensorflow.keras import Input, Model\n'), ((6225, 6270), 'tensorflow.keras.Model', 'Model', ([], {'inputs': '[input_layer]', 'outputs': '[output]'}), '(inputs=[input_layer], outputs=[output])\n', (6230, 6270), False, 'from tensorflow.keras import Input, Model\n'), ((7105, 7119), 'glob.glob', 'glob', (['data_dir'], {}), '(data_dir)\n', (7109, 7119), False, 'from glob import glob\n'), ((7134, 7148), 'glob.glob', 'glob', (['data_dir'], {}), '(data_dir)\n', (7138, 7148), False, 'from glob import glob\n'), ((7164, 7201), 'numpy.random.choice', 'np.random.choice', (['imagesA', 'batch_size'], {}), '(imagesA, batch_size)\n', (7180, 7201), True, 'import numpy as np\n'), ((7216, 7253), 'numpy.random.choice', 'np.random.choice', (['imagesB', 'batch_size'], {}), '(imagesB, batch_size)\n', (7232, 7253), True, 'import numpy as np\n'), ((7800, 7812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7810, 7812), True, 'import matplotlib.pyplot as plt\n'), ((8489, 8506), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (8500, 8506), True, 'import matplotlib.pyplot as plt\n'), ((9063, 9080), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.0002)', '(0.5)'], {}), '(0.0002, 0.5)\n', (9067, 9080), False, 'from tensorflow.keras.optimizers import Adam\n'), ((9562, 9588), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(128, 128, 3)'}), '(shape=(128, 128, 3))\n', (9567, 9588), False, 'from tensorflow.keras import Input, Model\n'), ((9602, 9628), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(128, 128, 3)'}), '(shape=(128, 128, 3))\n', (9607, 9628), False, 'from tensorflow.keras import Input, Model\n'), ((10236, 10356), 'tensorflow.keras.Model', 'Model', ([], {'inputs': '[inputA, inputB]', 'outputs': '[probsA, probsB, reconstructedA, reconstructedB, generatedAId, generatedBId]'}), '(inputs=[inputA, inputB], outputs=[probsA, probsB, reconstructedA,\n reconstructedB, generatedAId, generatedBId])\n', (10241, 10356), False, 'from tensorflow.keras import Input, Model\n'), ((10984, 11014), 'numpy.ones', 'np.ones', (['(batch_size, 7, 7, 1)'], {}), '((batch_size, 7, 7, 1))\n', (10991, 11014), True, 'import numpy as np\n'), ((11033, 11064), 'numpy.zeros', 'np.zeros', (['(batch_size, 7, 7, 1)'], {}), '((batch_size, 7, 7, 1))\n', (11041, 11064), True, 'import numpy as np\n'), ((3300, 3346), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs'], {'axes': '[1]', 'keepdims': '(True)'}), '(inputs, axes=[1], keepdims=True)\n', (3313, 3346), True, 'import tensorflow as tf\n'), ((3361, 3399), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['(variance + self.epsilon)'], {}), '(variance + self.epsilon)\n', (3374, 3399), True, 'import tensorflow as tf\n'), ((3547, 3608), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=128, kernel_size=3, strides=1, padding='same')\n", (3553, 3608), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((3622, 3677), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'momentum': '(0.9)', 'epsilon': '(1e-05)'}), '(axis=3, momentum=0.9, epsilon=1e-05)\n', (3640, 3677), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((3692, 3710), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3702, 3710), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((3727, 3788), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=128, kernel_size=3, strides=1, padding='same')\n", (3733, 3788), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((3804, 3859), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'momentum': '(0.9)', 'epsilon': '(1e-05)'}), '(axis=3, momentum=0.9, epsilon=1e-05)\n', (3822, 3859), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((3876, 3881), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (3879, 3881), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4147, 4207), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(7)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=32, kernel_size=7, strides=1, padding='same')\n", (4153, 4207), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4270, 4288), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4280, 4288), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4329, 4389), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=64, kernel_size=3, strides=2, padding='same')\n", (4335, 4389), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4442, 4460), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4452, 4460), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4501, 4562), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=128, kernel_size=3, strides=2, padding='same')\n", (4507, 4562), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4615, 4633), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4625, 4633), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4788, 4877), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(filters=64, kernel_size=3, strides=2, padding='same',\n use_bias=False)\n", (4803, 4877), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4926, 4944), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4936, 4944), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((4984, 5073), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': '(32)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(filters=32, kernel_size=3, strides=2, padding='same',\n use_bias=False)\n", (4999, 5073), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((5122, 5140), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5132, 5140), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((5182, 5241), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(3)', 'kernel_size': '(7)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=3, kernel_size=7, strides=1, padding='same')\n", (5188, 5241), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((5258, 5276), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (5268, 5276), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((5589, 5618), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(1, 1)'}), '(padding=(1, 1))\n', (5602, 5618), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((5671, 5732), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(4)', 'strides': '(2)', 'padding': '"""valid"""'}), "(filters=64, kernel_size=4, strides=2, padding='valid')\n", (5677, 5732), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((5744, 5764), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5753, 5764), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((5777, 5806), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(1, 1)'}), '(padding=(1, 1))\n', (5790, 5806), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((6143, 6208), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1)', 'kernel_size': '(4)', 'strides': '(1)', 'activation': '"""sigmoid"""'}), "(filters=1, kernel_size=4, strides=1, activation='sigmoid')\n", (6149, 6208), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((1688, 1700), 'numpy.stack', 'np.stack', (['X1'], {}), '(X1)\n', (1696, 1700), True, 'import numpy as np\n'), ((1729, 1741), 'numpy.stack', 'np.stack', (['X2'], {}), '(X2)\n', (1737, 1741), True, 'import numpy as np\n'), ((5899, 5969), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(2 ** i * 64)', 'kernel_size': '(4)', 'strides': '(2)', 'padding': '"""valid"""'}), "(filters=2 ** i * 64, kernel_size=4, strides=2, padding='valid')\n", (5905, 5969), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((6030, 6050), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6039, 6050), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((6067, 6096), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(1, 1)'}), '(padding=(1, 1))\n', (6080, 6096), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, Add, Conv2DTranspose, ZeroPadding2D, LeakyReLU\n'), ((3074, 3120), 'tensorflow.keras.initializers.RandomNormal', 'tf.keras.initializers.RandomNormal', (['(0.0)', '(0.002)'], {}), '(0.0, 0.002)\n', (3108, 3120), True, 'import tensorflow as tf\n'), ((7597, 7611), 'numpy.array', 'np.array', (['allA'], {}), '(allA)\n', (7605, 7611), True, 'import numpy as np\n'), ((7627, 7641), 'numpy.array', 'np.array', (['allB'], {}), '(allB)\n', (7635, 7641), True, 'import numpy as np\n'), ((7382, 7412), 'imageio.imread', 'imread', (['imagesA[i]'], {'mode': '"""RGB"""'}), "(imagesA[i], mode='RGB')\n", (7388, 7412), False, 'from imageio import imresize, imread\n'), ((7469, 7499), 'imageio.imread', 'imread', (['imagesB[i]'], {'mode': '"""RGB"""'}), "(imagesB[i], mode='RGB')\n", (7475, 7499), False, 'from imageio import imresize, imread\n'), ((12274, 12298), 'numpy.add', 'np.add', (['dALoss1', 'dALoss2'], {}), '(dALoss1, dALoss2)\n', (12280, 12298), True, 'import numpy as np\n'), ((12306, 12330), 'numpy.add', 'np.add', (['dBLoss1', 'dbLoss2'], {}), '(dBLoss1, dbLoss2)\n', (12312, 12330), True, 'import numpy as np\n')] |
import logging
import itertools
import os
import numpy as np
from typing import List, Text, Optional, Union, Any
import matplotlib
from rasa.constants import RESULTS_FILE
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
# At first, matplotlib will be initialized with default OS-specific available backend
# if that didn't happen, we'll try to set it up manually
if matplotlib.get_backend() is not None:
pass
else: # pragma: no cover
try:
# If the `tkinter` package is available, we can use the `TkAgg` backend
import tkinter
matplotlib.use("TkAgg")
except ImportError:
matplotlib.use("agg")
def plot_confusion_matrix(
confusion_matrix: np.ndarray,
classes: Union[np.ndarray, List[Text]],
normalize: bool = False,
title: Text = "Confusion matrix",
color_map: Any = None,
zmin: int = 1,
output_file: Optional[Text] = None,
) -> None:
"""
Print and plot the provided confusion matrix.
Normalization can be applied by setting `normalize=True`.
Args:
confusion_matrix: confusion matrix to plot
classes: class labels
normalize: If set to true, normalization will be applied.
title: title of the plot
color_map: color mapping
zmin:
output_file: output file to save plot to
"""
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
zmax = confusion_matrix.max()
plt.clf()
if not color_map:
color_map = plt.cm.Blues
plt.imshow(
confusion_matrix,
interpolation="nearest",
cmap=color_map,
aspect="auto",
norm=LogNorm(vmin=zmin, vmax=zmax),
)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
confusion_matrix = (
confusion_matrix.astype("float")
/ confusion_matrix.sum(axis=1)[:, np.newaxis]
)
logger.info(f"Normalized confusion matrix: \n{confusion_matrix}")
else:
logger.info(f"Confusion matrix, without normalization: \n{confusion_matrix}")
thresh = confusion_matrix.max() / 2.0
for i, j in itertools.product(
range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])
):
plt.text(
j,
i,
confusion_matrix[i, j],
horizontalalignment="center",
color="white" if confusion_matrix[i, j] > thresh else "black",
)
plt.ylabel("True label")
plt.xlabel("Predicted label")
# save confusion matrix to file before showing it
if output_file:
fig = plt.gcf()
fig.set_size_inches(20, 20)
fig.savefig(output_file, bbox_inches="tight")
def plot_histogram(
hist_data: List[List[float]], title: Text, output_file: Optional[Text] = None
) -> None:
"""
Plot a histogram of the confidence distribution of the predictions in two columns.
Args:
hist_data: histogram data
output_file: output file to save the plot ot
"""
import matplotlib.pyplot as plt
plt.gcf().clear()
# Wine-ish colour for the confidences of hits.
# Blue-ish colour for the confidences of misses.
colors = ["#009292", "#920000"]
bins = [0.05 * i for i in range(1, 21)]
plt.xlim([0, 1])
plt.hist(hist_data, bins=bins, color=colors)
plt.xticks(bins)
plt.title(title)
plt.xlabel("Confidence")
plt.ylabel("Number of Samples")
plt.legend(["hits", "misses"])
if output_file:
fig = plt.gcf()
fig.set_size_inches(10, 10)
fig.savefig(output_file, bbox_inches="tight")
def plot_curve(
output_directory: Text,
number_of_examples: List[int],
x_label_text: Text,
y_label_text: Text,
graph_path: Text,
) -> None:
"""Plot the results from a model comparison.
Args:
output_directory: Output directory to save resulting plots to
number_of_examples: Number of examples per run
x_label_text: text for the x axis
y_label_text: text for the y axis
graph_path: output path of the plot
"""
import matplotlib.pyplot as plt
ax = plt.gca()
# load results from file
data = io_utils.read_json_file(os.path.join(output_directory, RESULTS_FILE))
x = number_of_examples
# compute mean of all the runs for different configs
for label in data.keys():
if len(data[label]) == 0:
continue
mean = np.mean(data[label], axis=0)
std = np.std(data[label], axis=0)
ax.plot(x, mean, label=label, marker=".")
ax.fill_between(
x,
[m - s for m, s in zip(mean, std)],
[m + s for m, s in zip(mean, std)],
color="#6b2def",
alpha=0.2,
)
ax.legend(loc=4)
ax.set_xlabel(x_label_text)
ax.set_ylabel(y_label_text)
plt.savefig(graph_path, format="pdf")
logger.info(f"Comparison graph saved to '{graph_path}'.")
| [
"logging.getLogger",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.LogNorm",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.get_backend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.use",
"matplotlib.pyplot.... | [((216, 243), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (233, 243), False, 'import logging\n'), ((392, 416), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (414, 416), False, 'import matplotlib\n'), ((1468, 1477), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1475, 1477), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1725), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1718, 1725), True, 'import matplotlib.pyplot as plt\n'), ((1730, 1744), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1742, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1790, 1834), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(90)'}), '(tick_marks, classes, rotation=90)\n', (1800, 1834), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1870), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (1849, 1870), True, 'import matplotlib.pyplot as plt\n'), ((2578, 2602), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (2588, 2602), True, 'import matplotlib.pyplot as plt\n'), ((2607, 2636), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (2617, 2636), True, 'import matplotlib.pyplot as plt\n'), ((3391, 3407), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (3399, 3407), True, 'import matplotlib.pyplot as plt\n'), ((3412, 3456), 'matplotlib.pyplot.hist', 'plt.hist', (['hist_data'], {'bins': 'bins', 'color': 'colors'}), '(hist_data, bins=bins, color=colors)\n', (3420, 3456), True, 'import matplotlib.pyplot as plt\n'), ((3461, 3477), 'matplotlib.pyplot.xticks', 'plt.xticks', (['bins'], {}), '(bins)\n', (3471, 3477), True, 'import matplotlib.pyplot as plt\n'), ((3482, 3498), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3491, 3498), True, 'import matplotlib.pyplot as plt\n'), ((3503, 3527), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence"""'], {}), "('Confidence')\n", (3513, 3527), True, 'import matplotlib.pyplot as plt\n'), ((3532, 3563), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Samples"""'], {}), "('Number of Samples')\n", (3542, 3563), True, 'import matplotlib.pyplot as plt\n'), ((3568, 3598), 'matplotlib.pyplot.legend', 'plt.legend', (["['hits', 'misses']"], {}), "(['hits', 'misses'])\n", (3578, 3598), True, 'import matplotlib.pyplot as plt\n'), ((4263, 4272), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4270, 4272), True, 'import matplotlib.pyplot as plt\n'), ((4979, 5016), 'matplotlib.pyplot.savefig', 'plt.savefig', (['graph_path'], {'format': '"""pdf"""'}), "(graph_path, format='pdf')\n", (4990, 5016), True, 'import matplotlib.pyplot as plt\n'), ((586, 609), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (600, 609), False, 'import matplotlib\n'), ((2370, 2506), 'matplotlib.pyplot.text', 'plt.text', (['j', 'i', 'confusion_matrix[i, j]'], {'horizontalalignment': '"""center"""', 'color': "('white' if confusion_matrix[i, j] > thresh else 'black')"}), "(j, i, confusion_matrix[i, j], horizontalalignment='center', color=\n 'white' if confusion_matrix[i, j] > thresh else 'black')\n", (2378, 2506), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2735), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2733, 2735), True, 'import matplotlib.pyplot as plt\n'), ((3634, 3643), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3641, 3643), True, 'import matplotlib.pyplot as plt\n'), ((4338, 4382), 'os.path.join', 'os.path.join', (['output_directory', 'RESULTS_FILE'], {}), '(output_directory, RESULTS_FILE)\n', (4350, 4382), False, 'import os\n'), ((4569, 4597), 'numpy.mean', 'np.mean', (['data[label]'], {'axis': '(0)'}), '(data[label], axis=0)\n', (4576, 4597), True, 'import numpy as np\n'), ((4612, 4639), 'numpy.std', 'np.std', (['data[label]'], {'axis': '(0)'}), '(data[label], axis=0)\n', (4618, 4639), True, 'import numpy as np\n'), ((642, 663), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (656, 663), False, 'import matplotlib\n'), ((1668, 1697), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': 'zmin', 'vmax': 'zmax'}), '(vmin=zmin, vmax=zmax)\n', (1675, 1697), False, 'from matplotlib.colors import LogNorm\n'), ((3183, 3192), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3190, 3192), True, 'import matplotlib.pyplot as plt\n')] |
"""
Train swimmer body using ES algorithm (from ESTorch).
"""
import click
import os
import sys
import time
import shutil
import torch
import numpy as np
from mpi4py import MPI
from dowel import logger, tabular
from dm_control import suite
from acme import wrappers
from garage import wrap_experiment, rollout
from garage.envs.dm_control import DMControlEnv
from garage.experiment import deterministic
from garage.experiment.snapshotter import Snapshotter
from garage.torch.policies import GaussianMLPPolicy
from neurobo.ncap.swimmer import SwimmerPolicy
import neurobo.ncap.envs
import estorch
@click.command(context_settings={'show_default': True})
@click.option('--job', default='0', help='Job id on cluster.')
@click.option('--task', default='swim', type=click.Choice(['swim', 'navigate']), help='Task name.')
@click.option('--n_epochs', default=2, help='Num epochs total.')
@click.option('--n_proc', default=1, help='Num of processes to use for distributed training.')
@click.option('--population_size', default=8, help='Num perturbations per epoch. Must be even and multiple of number of processes.')
@click.option('--sigma', default=0.02, help='Standard deviation of Gaussian noise perturbation.')
@click.option('--optim', default='adam', type=click.Choice(['adam', 'sgd']), help='Name of optimizer.')
@click.option('--lr', default=0.01, help='Learning rate of optimizer.')
@click.option('--l2_coeff', default=0.005, help='L2 regularization coefficient.')
@click.option('--n_evals', default=1, help='Num evaluation episodes per epoch.')
@click.option('--policy', default='mlp', type=click.Choice(['mlp', 'ncap']), help='Policy network architecture.')
@click.option('--policy_hidden_sizes', default='64,64', help='Policy network hidden layers sizes.')
@click.option('--ncap_opts', default='111', help='Bit string for NCAP options [ws, sign, mag].')
@click.option('--osc_period', default=60, help='Swimmer oscilator period.')
@click.option('--seed', default=1, help='Random seed.')
@wrap_experiment(
prefix='experiments/swimmer/swimmer_es',
name_parameters='passed',
snapshot_mode='gap_and_last',
snapshot_gap=5,
)
def swimmer_es(
ctxt,
job,
task,
n_epochs,
n_proc,
population_size,
sigma,
optim,
lr,
l2_coeff,
n_evals,
policy,
policy_hidden_sizes,
ncap_opts,
osc_period,
seed,
):
assert population_size % n_proc == 0 and population_size % 2 == 0
# Extract MPI multi-process info.
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
seed = 1000 * seed + rank # Worker-specific seed based on task id.
deterministic.set_seed(seed)
env = DMControlEnv(suite.load('swimmer', task, task_kwargs={'random': seed}))
assert policy in ('mlp', 'ncap')
if policy == 'mlp':
Policy = GaussianMLPPolicy
policy_kwargs = dict(
env_spec=env.spec,
hidden_sizes=tuple(int(h) for h in policy_hidden_sizes.split(',')),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
elif policy == 'ncap':
assert len(ncap_opts) == 3
sharing, constraints, constant_init = tuple(bool(int(x)) for x in ncap_opts)
Policy = SwimmerPolicy
policy_kwargs = dict(
env_spec=env.spec,
n_joints=env.spec.action_space.shape[0],
oscillator_period=osc_period,
use_task=None,
use_weight_sharing=sharing,
use_weight_constraints=constraints,
use_weight_constant_init=constant_init,
)
assert optim in ('adam', 'sgd')
if optim == 'adam':
optimizer = torch.optim.Adam
elif optim == 'sgd':
optimizer = torch.optim.SGD
class Agent():
def __init__(self, device=torch.device('cpu')):
self.device = device
self.env = DMControlEnv(suite.load('swimmer', task, task_kwargs={'random': seed}))
def rollout(self, policy):
total_reward = 0.
for _ in range(n_evals):
data = rollout(self.env, policy, deterministic=True)
total_reward += sum(data['rewards'])
return total_reward / n_evals
start_time = time.time()
epoch_time = time.time()
snapshotter = Snapshotter(
snapshot_dir=ctxt.snapshot_dir,
snapshot_mode=ctxt.snapshot_mode,
snapshot_gap=ctxt.snapshot_gap,
)
class ES(estorch.ES):
def log(self):
nonlocal epoch_time, start_time
now = time.time()
logger.log(f'Time {now - start_time:.2f} s')
logger.log(f'EpochTime {now - epoch_time:.2f} s')
epoch_time = now
epoch = self.step + 1
tabular.record('Epoch', epoch)
tabular.record('TotalEnvSteps', epoch * population_size * n_evals * env.spec.max_episode_length)
tabular.record('CurrentReturn', self.episode_reward)
tabular.record('PopulationAvgReturn', np.mean(self.population_returns))
tabular.record('PopulationMinReturn', np.min(self.population_returns))
tabular.record('PopulationMaxReturn', np.max(self.population_returns))
logger.log(tabular)
logger.dump_all(epoch)
tabular.clear()
snapshotter.save_itr_params(
epoch,
dict(
policy_cls=Policy,
policy_kwargs=policy_kwargs,
policy_dict=es.policy.state_dict(),
best_policy_dict=es.best_policy_dict,
),
)
es = ES(
policy=Policy,
agent=Agent,
optimizer=torch.optim.Adam,
population_size=population_size,
sigma=sigma,
device=torch.device('cpu'),
policy_kwargs=policy_kwargs,
agent_kwargs=dict(),
optimizer_kwargs=dict(lr=lr, weight_decay=l2_coeff),
)
if rank == 0 and os.getenv('MPI_PARENT') is not None:
# Manager process is only one that logs.
logger.log(f'Training started for {n_epochs} epochs on worker {rank} of {size} ...')
elif n_proc > 1:
# Worker processes shouldn't have a log dir.
shutil.rmtree(ctxt.snapshot_dir)
es.train(n_steps=n_epochs, n_proc=n_proc, hwthread=True)
logger.log(f'Training done.')
if __name__ == '__main__':
swimmer_es()
| [
"click.Choice",
"dowel.logger.log",
"numpy.mean",
"garage.wrap_experiment",
"click.option",
"dowel.tabular.clear",
"dowel.logger.dump_all",
"numpy.max",
"garage.experiment.deterministic.set_seed",
"numpy.min",
"garage.rollout",
"click.command",
"dowel.tabular.record",
"time.time",
"torch... | [((599, 653), 'click.command', 'click.command', ([], {'context_settings': "{'show_default': True}"}), "(context_settings={'show_default': True})\n", (612, 653), False, 'import click\n'), ((655, 716), 'click.option', 'click.option', (['"""--job"""'], {'default': '"""0"""', 'help': '"""Job id on cluster."""'}), "('--job', default='0', help='Job id on cluster.')\n", (667, 716), False, 'import click\n'), ((818, 881), 'click.option', 'click.option', (['"""--n_epochs"""'], {'default': '(2)', 'help': '"""Num epochs total."""'}), "('--n_epochs', default=2, help='Num epochs total.')\n", (830, 881), False, 'import click\n'), ((883, 981), 'click.option', 'click.option', (['"""--n_proc"""'], {'default': '(1)', 'help': '"""Num of processes to use for distributed training."""'}), "('--n_proc', default=1, help=\n 'Num of processes to use for distributed training.')\n", (895, 981), False, 'import click\n'), ((978, 1119), 'click.option', 'click.option', (['"""--population_size"""'], {'default': '(8)', 'help': '"""Num perturbations per epoch. Must be even and multiple of number of processes."""'}), "('--population_size', default=8, help=\n 'Num perturbations per epoch. Must be even and multiple of number of processes.'\n )\n", (990, 1119), False, 'import click\n'), ((1111, 1212), 'click.option', 'click.option', (['"""--sigma"""'], {'default': '(0.02)', 'help': '"""Standard deviation of Gaussian noise perturbation."""'}), "('--sigma', default=0.02, help=\n 'Standard deviation of Gaussian noise perturbation.')\n", (1123, 1212), False, 'import click\n'), ((1313, 1383), 'click.option', 'click.option', (['"""--lr"""'], {'default': '(0.01)', 'help': '"""Learning rate of optimizer."""'}), "('--lr', default=0.01, help='Learning rate of optimizer.')\n", (1325, 1383), False, 'import click\n'), ((1385, 1470), 'click.option', 'click.option', (['"""--l2_coeff"""'], {'default': '(0.005)', 'help': '"""L2 regularization coefficient."""'}), "('--l2_coeff', default=0.005, help='L2 regularization coefficient.'\n )\n", (1397, 1470), False, 'import click\n'), ((1467, 1546), 'click.option', 'click.option', (['"""--n_evals"""'], {'default': '(1)', 'help': '"""Num evaluation episodes per epoch."""'}), "('--n_evals', default=1, help='Num evaluation episodes per epoch.')\n", (1479, 1546), False, 'import click\n'), ((1662, 1765), 'click.option', 'click.option', (['"""--policy_hidden_sizes"""'], {'default': '"""64,64"""', 'help': '"""Policy network hidden layers sizes."""'}), "('--policy_hidden_sizes', default='64,64', help=\n 'Policy network hidden layers sizes.')\n", (1674, 1765), False, 'import click\n'), ((1762, 1862), 'click.option', 'click.option', (['"""--ncap_opts"""'], {'default': '"""111"""', 'help': '"""Bit string for NCAP options [ws, sign, mag]."""'}), "('--ncap_opts', default='111', help=\n 'Bit string for NCAP options [ws, sign, mag].')\n", (1774, 1862), False, 'import click\n'), ((1859, 1933), 'click.option', 'click.option', (['"""--osc_period"""'], {'default': '(60)', 'help': '"""Swimmer oscilator period."""'}), "('--osc_period', default=60, help='Swimmer oscilator period.')\n", (1871, 1933), False, 'import click\n'), ((1935, 1989), 'click.option', 'click.option', (['"""--seed"""'], {'default': '(1)', 'help': '"""Random seed."""'}), "('--seed', default=1, help='Random seed.')\n", (1947, 1989), False, 'import click\n'), ((1991, 2124), 'garage.wrap_experiment', 'wrap_experiment', ([], {'prefix': '"""experiments/swimmer/swimmer_es"""', 'name_parameters': '"""passed"""', 'snapshot_mode': '"""gap_and_last"""', 'snapshot_gap': '(5)'}), "(prefix='experiments/swimmer/swimmer_es', name_parameters=\n 'passed', snapshot_mode='gap_and_last', snapshot_gap=5)\n", (2006, 2124), False, 'from garage import wrap_experiment, rollout\n'), ((2581, 2609), 'garage.experiment.deterministic.set_seed', 'deterministic.set_seed', (['seed'], {}), '(seed)\n', (2603, 2609), False, 'from garage.experiment import deterministic\n'), ((4004, 4015), 'time.time', 'time.time', ([], {}), '()\n', (4013, 4015), False, 'import time\n'), ((4031, 4042), 'time.time', 'time.time', ([], {}), '()\n', (4040, 4042), False, 'import time\n'), ((4060, 4174), 'garage.experiment.snapshotter.Snapshotter', 'Snapshotter', ([], {'snapshot_dir': 'ctxt.snapshot_dir', 'snapshot_mode': 'ctxt.snapshot_mode', 'snapshot_gap': 'ctxt.snapshot_gap'}), '(snapshot_dir=ctxt.snapshot_dir, snapshot_mode=ctxt.\n snapshot_mode, snapshot_gap=ctxt.snapshot_gap)\n', (4071, 4174), False, 'from garage.experiment.snapshotter import Snapshotter\n'), ((5858, 5887), 'dowel.logger.log', 'logger.log', (['f"""Training done."""'], {}), "(f'Training done.')\n", (5868, 5887), False, 'from dowel import logger, tabular\n'), ((2632, 2689), 'dm_control.suite.load', 'suite.load', (['"""swimmer"""', 'task'], {'task_kwargs': "{'random': seed}"}), "('swimmer', task, task_kwargs={'random': seed})\n", (2642, 2689), False, 'from dm_control import suite\n'), ((5606, 5695), 'dowel.logger.log', 'logger.log', (['f"""Training started for {n_epochs} epochs on worker {rank} of {size} ..."""'], {}), "(\n f'Training started for {n_epochs} epochs on worker {rank} of {size} ...')\n", (5616, 5695), False, 'from dowel import logger, tabular\n'), ((762, 796), 'click.Choice', 'click.Choice', (["['swim', 'navigate']"], {}), "(['swim', 'navigate'])\n", (774, 796), False, 'import click\n'), ((1254, 1283), 'click.Choice', 'click.Choice', (["['adam', 'sgd']"], {}), "(['adam', 'sgd'])\n", (1266, 1283), False, 'import click\n'), ((1593, 1622), 'click.Choice', 'click.Choice', (["['mlp', 'ncap']"], {}), "(['mlp', 'ncap'])\n", (1605, 1622), False, 'import click\n'), ((3621, 3640), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3633, 3640), False, 'import torch\n'), ((4281, 4292), 'time.time', 'time.time', ([], {}), '()\n', (4290, 4292), False, 'import time\n'), ((4299, 4343), 'dowel.logger.log', 'logger.log', (['f"""Time {now - start_time:.2f} s"""'], {}), "(f'Time {now - start_time:.2f} s')\n", (4309, 4343), False, 'from dowel import logger, tabular\n'), ((4350, 4399), 'dowel.logger.log', 'logger.log', (['f"""EpochTime {now - epoch_time:.2f} s"""'], {}), "(f'EpochTime {now - epoch_time:.2f} s')\n", (4360, 4399), False, 'from dowel import logger, tabular\n'), ((4458, 4488), 'dowel.tabular.record', 'tabular.record', (['"""Epoch"""', 'epoch'], {}), "('Epoch', epoch)\n", (4472, 4488), False, 'from dowel import logger, tabular\n'), ((4495, 4596), 'dowel.tabular.record', 'tabular.record', (['"""TotalEnvSteps"""', '(epoch * population_size * n_evals * env.spec.max_episode_length)'], {}), "('TotalEnvSteps', epoch * population_size * n_evals * env.\n spec.max_episode_length)\n", (4509, 4596), False, 'from dowel import logger, tabular\n'), ((4598, 4650), 'dowel.tabular.record', 'tabular.record', (['"""CurrentReturn"""', 'self.episode_reward'], {}), "('CurrentReturn', self.episode_reward)\n", (4612, 4650), False, 'from dowel import logger, tabular\n'), ((4889, 4908), 'dowel.logger.log', 'logger.log', (['tabular'], {}), '(tabular)\n', (4899, 4908), False, 'from dowel import logger, tabular\n'), ((4916, 4938), 'dowel.logger.dump_all', 'logger.dump_all', (['epoch'], {}), '(epoch)\n', (4931, 4938), False, 'from dowel import logger, tabular\n'), ((4945, 4960), 'dowel.tabular.clear', 'tabular.clear', ([], {}), '()\n', (4958, 4960), False, 'from dowel import logger, tabular\n'), ((5360, 5379), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5372, 5379), False, 'import torch\n'), ((5520, 5543), 'os.getenv', 'os.getenv', (['"""MPI_PARENT"""'], {}), "('MPI_PARENT')\n", (5529, 5543), False, 'import os\n'), ((5763, 5795), 'shutil.rmtree', 'shutil.rmtree', (['ctxt.snapshot_dir'], {}), '(ctxt.snapshot_dir)\n', (5776, 5795), False, 'import shutil\n'), ((3700, 3757), 'dm_control.suite.load', 'suite.load', (['"""swimmer"""', 'task'], {'task_kwargs': "{'random': seed}"}), "('swimmer', task, task_kwargs={'random': seed})\n", (3710, 3757), False, 'from dm_control import suite\n'), ((3861, 3906), 'garage.rollout', 'rollout', (['self.env', 'policy'], {'deterministic': '(True)'}), '(self.env, policy, deterministic=True)\n', (3868, 3906), False, 'from garage import wrap_experiment, rollout\n'), ((4695, 4727), 'numpy.mean', 'np.mean', (['self.population_returns'], {}), '(self.population_returns)\n', (4702, 4727), True, 'import numpy as np\n'), ((4773, 4804), 'numpy.min', 'np.min', (['self.population_returns'], {}), '(self.population_returns)\n', (4779, 4804), True, 'import numpy as np\n'), ((4850, 4881), 'numpy.max', 'np.max', (['self.population_returns'], {}), '(self.population_returns)\n', (4856, 4881), True, 'import numpy as np\n')] |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from extensions.ops.normalize import NormalizeOp
from mo.front.common.layout import get_features_dim
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Graph
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.const import Const
class L2NormToNorm(MiddleReplacementPattern):
enabled = True
force_clean_up = True
def run_after(self):
from extensions.middle.pass_separator import PreMiddleStart
return [PreMiddleStart]
def run_before(self):
from extensions.middle.pass_separator import MiddleStart
return [MiddleStart]
def pattern(self):
return dict(
nodes=[
('input', dict(kind='data')),
('l2_normalize', dict(kind='op', op='Mul')),
('l2_normalize_data', dict(kind='data')),
('maximum', dict(kind='op', op='Maximum')),
('maximum_data', dict(kind='data')),
('maximum_y_data', dict(kind='data')),
('rsqrt_pow', dict(kind='data', value=lambda x: np.all(x == -0.5) if x is not None else False)),
('rsqrt', dict(kind='op', op='Pow')),
('rsqrt_data', dict(kind='data')),
('square_pow', dict(kind='data', value=lambda x: np.all(x == 2) if x is not None else False)),
('square', dict(kind='op', op='Pow')),
('square_data', dict(kind='data')),
('sum', dict(kind='op', op='ReduceSum')),
('sum_data', dict(kind='data')),
],
edges=[
('input', 'square', {'in': 0}),
('square_pow', 'square', {'in': 1}),
('square', 'square_data'),
('square_data', 'sum'),
('sum', 'sum_data'),
('maximum_y_data', 'maximum'),
('sum_data', 'maximum'),
('maximum', 'maximum_data'),
('maximum_data', 'rsqrt', {'in': 0}),
('rsqrt_pow', 'rsqrt', {'in': 1}),
('rsqrt', 'rsqrt_data'),
('rsqrt_data', 'l2_normalize'),
('input', 'l2_normalize'),
('l2_normalize', 'l2_normalize_data'),
]
)
def replace_pattern(self, graph: Graph, match: dict):
y = match['maximum'].in_port(0).data.get_value()
if y is None:
y = match['maximum'].in_port(1).data.get_value()
if y is None or y.shape != ():
log.debug('The value of the "maximum_y_data" is not defined or is not constant')
return
normalize_input_node = match['square'].in_port(0).get_source().node
normalize_node = NormalizeOp(graph, {'name': normalize_input_node.soft_get('name') + '/Normalize', 'eps': y,
'across_spatial': 0, 'channel_shared': 0}).create_node()
weights_node = Const(graph, {'value': np.ones(shape=int64_array([match['input'].shape[-1]]),
dtype=match['input'].data_type)}).create_node()
# the normalize_input_node has 2 consumers so it is necessary to disconnect output port first
normalize_input_node.out_port(0).disconnect()
normalize_input_node.out_port(0).get_connection().set_destination(normalize_node.in_port(0))
weights_node.out_port(0).get_connection().set_destination(normalize_node.in_port(1))
match['l2_normalize'].out_port(0).get_connection().set_source(normalize_node.out_port(0))
| [
"numpy.all",
"logging.debug",
"mo.front.common.partial_infer.utils.int64_array"
] | [((3146, 3231), 'logging.debug', 'log.debug', (['"""The value of the "maximum_y_data" is not defined or is not constant"""'], {}), '(\'The value of the "maximum_y_data" is not defined or is not constant\'\n )\n', (3155, 3231), True, 'import logging as log\n'), ((3603, 3642), 'mo.front.common.partial_infer.utils.int64_array', 'int64_array', (["[match['input'].shape[-1]]"], {}), "([match['input'].shape[-1]])\n", (3614, 3642), False, 'from mo.front.common.partial_infer.utils import int64_array\n'), ((1711, 1728), 'numpy.all', 'np.all', (['(x == -0.5)'], {}), '(x == -0.5)\n', (1717, 1728), True, 'import numpy as np\n'), ((1930, 1944), 'numpy.all', 'np.all', (['(x == 2)'], {}), '(x == 2)\n', (1936, 1944), True, 'import numpy as np\n')] |
import pandas as pd
import tensorflow as tf
import numpy as np
class DatasetCreation():
def __init__(self, input_dictionay, output_tfr_file, output_dictionary_csv):
self.input_dictionay = input_dictionay
self.output_tfr_file = output_tfr_file
self.output_dictionary_csv = output_dictionary_csv
def run(self):
read_dictionary = np.load(self.input_dictionay, allow_pickle='TRUE').item()
dframe = pd.DataFrame(read_dictionary)
header = ["inputs", "targets"]
print(len(read_dictionary["inputs"]))
print(len(read_dictionary["targets"]))
dframe.to_csv(output_dictionary_csv, columns=header, index=False)
csv = pd.read_csv(output_dictionary_csv).values
with tf.io.TFRecordWriter(output_tfr_file) as writer:
for row in csv:
inputs, targets = str(row[:-1]), str(row[-1])
example = tf.train.Example(
features=tf.train.Features(
feature={
"inputs": tf.train.Feature(
bytes_list=tf.train.BytesList(value=[inputs.encode('utf-8')])),
"targets": tf.train.Feature(
bytes_list=tf.train.BytesList(value=[targets.encode('utf-8')])),
}
)
)
writer.write(example.SerializeToString())
if __name__ == "__main__":
input_dictionay = "utils/dic_data.npy"
output_dictionary_csv = "utils/output.csv"
output_tfr_file = "pegasus/data/testdata/emails_complains_pattern.tfrecords"
dc = DatasetCreation(input_dictionay, output_tfr_file, output_dictionary_csv)
dc.run()
| [
"pandas.DataFrame",
"numpy.load",
"tensorflow.io.TFRecordWriter",
"pandas.read_csv"
] | [((446, 475), 'pandas.DataFrame', 'pd.DataFrame', (['read_dictionary'], {}), '(read_dictionary)\n', (458, 475), True, 'import pandas as pd\n'), ((696, 730), 'pandas.read_csv', 'pd.read_csv', (['output_dictionary_csv'], {}), '(output_dictionary_csv)\n', (707, 730), True, 'import pandas as pd\n'), ((751, 788), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['output_tfr_file'], {}), '(output_tfr_file)\n', (771, 788), True, 'import tensorflow as tf\n'), ((371, 421), 'numpy.load', 'np.load', (['self.input_dictionay'], {'allow_pickle': '"""TRUE"""'}), "(self.input_dictionay, allow_pickle='TRUE')\n", (378, 421), True, 'import numpy as np\n')] |
import streamlit as st
import IPython.display as ipd
from pydub import AudioSegment
import numpy as np
from PIL import Image
from load_css import local_css
import pandas as pd
import os
import datetime
# librosa is a Python library for analyzing audio and music. It can be used to extract the data from the audio files we will see it later.
import librosa
import librosa.display
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler, OneHotEncoder
# to play the audio files
from IPython.display import Audio
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import *
import base64
@st.cache(allow_output_mutation=True)
def get_base64_of_bin_file(bin_file):
with open(bin_file, 'rb') as f:
data = f.read()
return base64.b64encode(data).decode()
@st.cache(allow_output_mutation=True)
def load_our_model():
model = tf.keras.models.load_model('covidtest.h5')
return model
local_css("style.css")
st.markdown(" <h1 style='text-align: center; color: black;'><span class='highlight slateblue'>Corona Detection App</span></h1>", unsafe_allow_html=True)
st.markdown("\n")
st.markdown(" <h3 style='text-align: center; color: black;'><span class='highlight slateblue'>To Know about the working of App and to Display Wave Plot, please click</span></h3>", unsafe_allow_html=True)
st.markdown(" <h3 style='text-align: center; color: black;'><span class='highlight slateblue'>on Expand to show option Button below.</span></h3>", unsafe_allow_html=True)
my_expander = st.beta_expander("Expand to show option", expanded=False)
with my_expander:
choice = st.multiselect("Enter Your Choice", ('How does it work ?', 'Display Wave Plot'))
if 'How does it work ?' in choice:
st.markdown("<div><span class='highlight blue'>Hello and Welcome to our AI enabled Covid Detection App.Let us describe you how it works :- </span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight blue'>• Upload an audio of about three seconds in which your cough sound can be heard clearly </span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight blue'> by clicking on the Browse Files button </span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight blue'>• Once the file is uploaded the AI Model will display the result on the screen.. </span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight blue'>• Once your result is displayed and you want to obtain a prediction for any other audio file then</span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight blue'> it is recommended to reload the page. </span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight blue'>• At last, we wish you to stay healthy and Covid Negative. Don't forget to wear Mask and</span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight blue'> maintain Social Distancing.</span><div>", unsafe_allow_html=True)
st.markdown("\n")
st.markdown(" <h3 style='text-align: center; color: black;'><span class='highlight slateblue'>Upload Your Audio File Below</span></h3>", unsafe_allow_html=True)
st.markdown(" <h5 style='text-align: center; color: black;'><span class='highlight slateblue'>The audio file should be of about three seconds containing the cough sound.</span></h5>", unsafe_allow_html=True)
def set_png_as_page_bg(png_file):
bin_str = get_base64_of_bin_file(png_file)
page_bg_img = '''
<style>
body {
background-image: url("data:image/png;base64,%s");
background-size: cover;
}
</style>
''' % bin_str
st.markdown(page_bg_img, unsafe_allow_html=True)
return
set_png_as_page_bg('abcd.png')
from numpy import load
Y = load('Y.npy', allow_pickle = True)
encoder = OneHotEncoder()
Y = encoder.fit_transform(np.array(Y).reshape(-1,1)).toarray()
tr = load('tr.npy')
te = load('te.npy')
scaler = StandardScaler()
tr = scaler.fit_transform(tr)
te = scaler.transform(te)
directory = datetime.datetime.now().time()
st.set_option('deprecation.showPyplotGlobalUse', False)
uploaded_file = st.file_uploader("Insert File", type="mp3")
if uploaded_file is not None:
audio_path = directory.strftime('%Y%m%d%H%M%S') + ".wav"
file_var = AudioSegment.from_mp3(uploaded_file)
file_var.export(audio_path, format='wav')
st.audio(audio_path)
def extract_features(data, sample_rate):
# ZCR
result = np.array([])
zcr = np.mean(librosa.feature.zero_crossing_rate(y=data).T, axis=0)
result=np.hstack((result, zcr)) # stacking horizontally
# Chroma_stft
stft = np.abs(librosa.stft(data))
chroma_stft = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)
result = np.hstack((result, chroma_stft)) # stacking horizontally
# MFCC
mfcc = np.mean(librosa.feature.mfcc(y=data, sr=sample_rate).T, axis=0)
result = np.hstack((result, mfcc)) # stacking horizontally
# Root Mean Square Value
rms = np.mean(librosa.feature.rms(y=data).T, axis=0)
result = np.hstack((result, rms)) # stacking horizontally
# MelSpectogram
mel = np.mean(librosa.feature.melspectrogram(y=data, sr=sample_rate).T, axis=0)
result = np.hstack((result, mel)) # stacking horizontally
return result
def create_waveplot(data, sr):
plt.figure(figsize=(10, 3))
plt.title('Waveplot for audio that you uploaded.', size=15)
librosa.display.waveplot(data, sr=sr)
plt.show()
st.pyplot()
def get_features(data, sample_rate):
# duration and offset are used to take care of the no audio in start and the ending of each audio files as seen above.
res1 = extract_features(data, sample_rate)
result = np.array(res1)
return result
X = []
data, sample_rate = librosa.load(audio_path, duration=2.0, offset=0.6)
if 'Display Wave Plot' in choice:
create_waveplot(data, sample_rate)
feature = get_features(data, sample_rate)
for ele in feature:
X.append(ele)
X = np.array(X)
X = X.reshape(1, -1)
X = scaler.transform(X)
X = np.expand_dims(X, axis=2)
present_model = load_our_model()
prediction = present_model.predict(X)
prediction_pred = encoder.inverse_transform(prediction)
if str(prediction_pred[0][0]) == 'covid':
st.markdown("<div><span class='highlight coral'>Our AI model has predicted you as Covid positive. So, we highly recommend you to visit your</span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight coral'> nearest hopital</span></div>", unsafe_allow_html=True)
else:
st.markdown("<div><span class='highlight green'>According to our AI Model you are Covid Negative. Always wear a mask while going </span></div>", unsafe_allow_html=True)
st.markdown("<div><span class='highlight green'> outside your home and maintain Social Distancing. Be Safe and Be Healthy.</span></div>", unsafe_allow_html=True)
st.markdown("\n")
st.markdown("<div><span class='highlight blue'>Before uploading any next audio file please reload the page.</span></div>", unsafe_allow_html=True)
st.markdown("\n")
st.markdown("<div><span class='highlight blue'>Thank You for using our Covid Detection App.</span></div>", unsafe_allow_html=True)
os.remove(audio_path)
else:
st.write("Please upload a .mp3 file")
my_expander = st.beta_expander("Connect with the Developers", expanded=True)
with my_expander:
st.markdown(" [](https://www.linkedin.com/in/ashish-arya-65923b16b/) [](https://www.linkedin.com/in/anshul-chaudhary-2001/)", unsafe_allow_html=True) | [
"numpy.hstack",
"base64.b64encode",
"librosa.feature.zero_crossing_rate",
"librosa.feature.mfcc",
"streamlit.audio",
"numpy.array",
"streamlit.multiselect",
"tensorflow.keras.models.load_model",
"librosa.display.waveplot",
"librosa.feature.rms",
"os.remove",
"librosa.load",
"streamlit.cache"... | [((711, 747), 'streamlit.cache', 'st.cache', ([], {'allow_output_mutation': '(True)'}), '(allow_output_mutation=True)\n', (719, 747), True, 'import streamlit as st\n'), ((897, 933), 'streamlit.cache', 'st.cache', ([], {'allow_output_mutation': '(True)'}), '(allow_output_mutation=True)\n', (905, 933), True, 'import streamlit as st\n'), ((1030, 1052), 'load_css.local_css', 'local_css', (['"""style.css"""'], {}), "('style.css')\n", (1039, 1052), False, 'from load_css import local_css\n'), ((1057, 1219), 'streamlit.markdown', 'st.markdown', (['""" <h1 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>Corona Detection App</span></h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n " <h1 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>Corona Detection App</span></h1>"\n , unsafe_allow_html=True)\n', (1068, 1219), True, 'import streamlit as st\n'), ((1211, 1228), 'streamlit.markdown', 'st.markdown', (['"""\n"""'], {}), "('\\n')\n", (1222, 1228), True, 'import streamlit as st\n'), ((1230, 1443), 'streamlit.markdown', 'st.markdown', (['""" <h3 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>To Know about the working of App and to Display Wave Plot, please click</span></h3>"""'], {'unsafe_allow_html': '(True)'}), '(\n " <h3 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>To Know about the working of App and to Display Wave Plot, please click</span></h3>"\n , unsafe_allow_html=True)\n', (1241, 1443), True, 'import streamlit as st\n'), ((1435, 1615), 'streamlit.markdown', 'st.markdown', (['""" <h3 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>on Expand to show option Button below.</span></h3>"""'], {'unsafe_allow_html': '(True)'}), '(\n " <h3 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>on Expand to show option Button below.</span></h3>"\n , unsafe_allow_html=True)\n', (1446, 1615), True, 'import streamlit as st\n'), ((1625, 1682), 'streamlit.beta_expander', 'st.beta_expander', (['"""Expand to show option"""'], {'expanded': '(False)'}), "('Expand to show option', expanded=False)\n", (1641, 1682), True, 'import streamlit as st\n'), ((3148, 3318), 'streamlit.markdown', 'st.markdown', (['""" <h3 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>Upload Your Audio File Below</span></h3>"""'], {'unsafe_allow_html': '(True)'}), '(\n " <h3 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>Upload Your Audio File Below</span></h3>"\n , unsafe_allow_html=True)\n', (3159, 3318), True, 'import streamlit as st\n'), ((3310, 3527), 'streamlit.markdown', 'st.markdown', (['""" <h5 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>The audio file should be of about three seconds containing the cough sound.</span></h5>"""'], {'unsafe_allow_html': '(True)'}), '(\n " <h5 style=\'text-align: center; color: black;\'><span class=\'highlight slateblue\'>The audio file should be of about three seconds containing the cough sound.</span></h5>"\n , unsafe_allow_html=True)\n', (3321, 3527), True, 'import streamlit as st\n'), ((3913, 3945), 'numpy.load', 'load', (['"""Y.npy"""'], {'allow_pickle': '(True)'}), "('Y.npy', allow_pickle=True)\n", (3917, 3945), False, 'from numpy import load\n'), ((3959, 3974), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (3972, 3974), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((4045, 4059), 'numpy.load', 'load', (['"""tr.npy"""'], {}), "('tr.npy')\n", (4049, 4059), False, 'from numpy import load\n'), ((4066, 4080), 'numpy.load', 'load', (['"""te.npy"""'], {}), "('te.npy')\n", (4070, 4080), False, 'from numpy import load\n'), ((4093, 4109), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4107, 4109), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((4217, 4272), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showPyplotGlobalUse"""', '(False)'], {}), "('deprecation.showPyplotGlobalUse', False)\n", (4230, 4272), True, 'import streamlit as st\n'), ((4292, 4335), 'streamlit.file_uploader', 'st.file_uploader', (['"""Insert File"""'], {'type': '"""mp3"""'}), "('Insert File', type='mp3')\n", (4308, 4335), True, 'import streamlit as st\n'), ((7634, 7696), 'streamlit.beta_expander', 'st.beta_expander', (['"""Connect with the Developers"""'], {'expanded': '(True)'}), "('Connect with the Developers', expanded=True)\n", (7650, 7696), True, 'import streamlit as st\n'), ((968, 1010), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""covidtest.h5"""'], {}), "('covidtest.h5')\n", (994, 1010), True, 'import tensorflow as tf\n'), ((1716, 1801), 'streamlit.multiselect', 'st.multiselect', (['"""Enter Your Choice"""', "('How does it work ?', 'Display Wave Plot')"], {}), "('Enter Your Choice', ('How does it work ?', 'Display Wave Plot')\n )\n", (1730, 1801), True, 'import streamlit as st\n'), ((1840, 2028), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'>Hello and Welcome to our AI enabled Covid Detection App.Let us describe you how it works :- </span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'>Hello and Welcome to our AI enabled Covid Detection App.Let us describe you how it works :- </span></div>"\n , unsafe_allow_html=True)\n', (1851, 2028), True, 'import streamlit as st\n'), ((2024, 2208), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'>• Upload an audio of about three seconds in which your cough sound can be heard clearly </span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'>• Upload an audio of about three seconds in which your cough sound can be heard clearly </span></div>"\n , unsafe_allow_html=True)\n', (2035, 2208), True, 'import streamlit as st\n'), ((2204, 2341), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'> by clicking on the Browse Files button </span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'> by clicking on the Browse Files button </span></div>"\n , unsafe_allow_html=True)\n', (2215, 2341), True, 'import streamlit as st\n'), ((2337, 2514), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'>• Once the file is uploaded the AI Model will display the result on the screen.. </span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'>• Once the file is uploaded the AI Model will display the result on the screen.. </span></div>"\n , unsafe_allow_html=True)\n', (2348, 2514), True, 'import streamlit as st\n'), ((2510, 2703), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'>• Once your result is displayed and you want to obtain a prediction for any other audio file then</span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'>• Once your result is displayed and you want to obtain a prediction for any other audio file then</span></div>"\n , unsafe_allow_html=True)\n', (2521, 2703), True, 'import streamlit as st\n'), ((2699, 2834), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'> it is recommended to reload the page. </span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'> it is recommended to reload the page. </span></div>"\n , unsafe_allow_html=True)\n', (2710, 2834), True, 'import streamlit as st\n'), ((2830, 3014), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'>• At last, we wish you to stay healthy and Covid Negative. Don\'t forget to wear Mask and</span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'>• At last, we wish you to stay healthy and Covid Negative. Don\'t forget to wear Mask and</span></div>"\n , unsafe_allow_html=True)\n', (2841, 3014), True, 'import streamlit as st\n'), ((3010, 3133), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'> maintain Social Distancing.</span><div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'> maintain Social Distancing.</span><div>"\n , unsafe_allow_html=True)\n', (3021, 3133), True, 'import streamlit as st\n'), ((3129, 3146), 'streamlit.markdown', 'st.markdown', (['"""\n"""'], {}), "('\\n')\n", (3140, 3146), True, 'import streamlit as st\n'), ((3787, 3835), 'streamlit.markdown', 'st.markdown', (['page_bg_img'], {'unsafe_allow_html': '(True)'}), '(page_bg_img, unsafe_allow_html=True)\n', (3798, 3835), True, 'import streamlit as st\n'), ((4441, 4477), 'pydub.AudioSegment.from_mp3', 'AudioSegment.from_mp3', (['uploaded_file'], {}), '(uploaded_file)\n', (4462, 4477), False, 'from pydub import AudioSegment\n'), ((4527, 4547), 'streamlit.audio', 'st.audio', (['audio_path'], {}), '(audio_path)\n', (4535, 4547), True, 'import streamlit as st\n'), ((6057, 6107), 'librosa.load', 'librosa.load', (['audio_path'], {'duration': '(2.0)', 'offset': '(0.6)'}), '(audio_path, duration=2.0, offset=0.6)\n', (6069, 6107), False, 'import librosa\n'), ((6287, 6298), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (6295, 6298), True, 'import numpy as np\n'), ((6357, 6382), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(2)'}), '(X, axis=2)\n', (6371, 6382), True, 'import numpy as np\n'), ((7217, 7234), 'streamlit.markdown', 'st.markdown', (['"""\n"""'], {}), "('\\n')\n", (7228, 7234), True, 'import streamlit as st\n'), ((7238, 7394), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'>Before uploading any next audio file please reload the page.</span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'>Before uploading any next audio file please reload the page.</span></div>"\n , unsafe_allow_html=True)\n', (7249, 7394), True, 'import streamlit as st\n'), ((7388, 7405), 'streamlit.markdown', 'st.markdown', (['"""\n"""'], {}), "('\\n')\n", (7399, 7405), True, 'import streamlit as st\n'), ((7409, 7549), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight blue\'>Thank You for using our Covid Detection App.</span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight blue\'>Thank You for using our Covid Detection App.</span></div>"\n , unsafe_allow_html=True)\n', (7420, 7549), True, 'import streamlit as st\n'), ((7543, 7564), 'os.remove', 'os.remove', (['audio_path'], {}), '(audio_path)\n', (7552, 7564), False, 'import os\n'), ((7580, 7617), 'streamlit.write', 'st.write', (['"""Please upload a .mp3 file"""'], {}), "('Please upload a .mp3 file')\n", (7588, 7617), True, 'import streamlit as st\n'), ((7721, 8093), 'streamlit.markdown', 'st.markdown', (['""" [](https://www.linkedin.com/in/ashish-arya-65923b16b/) [](https://www.linkedin.com/in/anshul-chaudhary-2001/)"""'], {'unsafe_allow_html': '(True)'}), "(\n ' [](https://www.linkedin.com/in/ashish-arya-65923b16b/) [](https://www.linkedin.com/in/anshul-chaudhary-2001/)'\n , unsafe_allow_html=True)\n", (7732, 8093), True, 'import streamlit as st\n'), ((4183, 4206), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4204, 4206), False, 'import datetime\n'), ((4623, 4635), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4631, 4635), True, 'import numpy as np\n'), ((4725, 4749), 'numpy.hstack', 'np.hstack', (['(result, zcr)'], {}), '((result, zcr))\n', (4734, 4749), True, 'import numpy as np\n'), ((4946, 4978), 'numpy.hstack', 'np.hstack', (['(result, chroma_stft)'], {}), '((result, chroma_stft))\n', (4955, 4978), True, 'import numpy as np\n'), ((5111, 5136), 'numpy.hstack', 'np.hstack', (['(result, mfcc)'], {}), '((result, mfcc))\n', (5120, 5136), True, 'import numpy as np\n'), ((5271, 5295), 'numpy.hstack', 'np.hstack', (['(result, rms)'], {}), '((result, rms))\n', (5280, 5295), True, 'import numpy as np\n'), ((5448, 5472), 'numpy.hstack', 'np.hstack', (['(result, mel)'], {}), '((result, mel))\n', (5457, 5472), True, 'import numpy as np\n'), ((5571, 5598), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (5581, 5598), True, 'import matplotlib.pyplot as plt\n'), ((5606, 5665), 'matplotlib.pyplot.title', 'plt.title', (['"""Waveplot for audio that you uploaded."""'], {'size': '(15)'}), "('Waveplot for audio that you uploaded.', size=15)\n", (5615, 5665), True, 'import matplotlib.pyplot as plt\n'), ((5673, 5710), 'librosa.display.waveplot', 'librosa.display.waveplot', (['data'], {'sr': 'sr'}), '(data, sr=sr)\n', (5697, 5710), False, 'import librosa\n'), ((5718, 5728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5726, 5728), True, 'import matplotlib.pyplot as plt\n'), ((5736, 5747), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (5745, 5747), True, 'import streamlit as st\n'), ((5982, 5996), 'numpy.array', 'np.array', (['res1'], {}), '(res1)\n', (5990, 5996), True, 'import numpy as np\n'), ((6574, 6762), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight coral\'>Our AI model has predicted you as Covid positive. So, we highly recommend you to visit your</span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight coral\'>Our AI model has predicted you as Covid positive. So, we highly recommend you to visit your</span></div>"\n , unsafe_allow_html=True)\n', (6585, 6762), True, 'import streamlit as st\n'), ((6758, 6865), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight coral\'> nearest hopital</span></div>"""'], {'unsafe_allow_html': '(True)'}), '("<div><span class=\'highlight coral\'> nearest hopital</span></div>",\n unsafe_allow_html=True)\n', (6769, 6865), True, 'import streamlit as st\n'), ((6878, 7056), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight green\'>According to our AI Model you are Covid Negative. Always wear a mask while going </span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight green\'>According to our AI Model you are Covid Negative. Always wear a mask while going </span></div>"\n , unsafe_allow_html=True)\n', (6889, 7056), True, 'import streamlit as st\n'), ((7052, 7223), 'streamlit.markdown', 'st.markdown', (['"""<div><span class=\'highlight green\'> outside your home and maintain Social Distancing. Be Safe and Be Healthy.</span></div>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<div><span class=\'highlight green\'> outside your home and maintain Social Distancing. Be Safe and Be Healthy.</span></div>"\n , unsafe_allow_html=True)\n', (7063, 7223), True, 'import streamlit as st\n'), ((861, 883), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (877, 883), False, 'import base64\n'), ((4818, 4836), 'librosa.stft', 'librosa.stft', (['data'], {}), '(data)\n', (4830, 4836), False, 'import librosa\n'), ((4657, 4699), 'librosa.feature.zero_crossing_rate', 'librosa.feature.zero_crossing_rate', ([], {'y': 'data'}), '(y=data)\n', (4691, 4699), False, 'import librosa\n'), ((4867, 4918), 'librosa.feature.chroma_stft', 'librosa.feature.chroma_stft', ([], {'S': 'stft', 'sr': 'sample_rate'}), '(S=stft, sr=sample_rate)\n', (4894, 4918), False, 'import librosa\n'), ((5039, 5083), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'data', 'sr': 'sample_rate'}), '(y=data, sr=sample_rate)\n', (5059, 5083), False, 'import librosa\n'), ((5216, 5243), 'librosa.feature.rms', 'librosa.feature.rms', ([], {'y': 'data'}), '(y=data)\n', (5235, 5243), False, 'import librosa\n'), ((5366, 5420), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'data', 'sr': 'sample_rate'}), '(y=data, sr=sample_rate)\n', (5396, 5420), False, 'import librosa\n'), ((4002, 4013), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (4010, 4013), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
testOverrideSet.py
Created by <NAME> on 27 Sep 2015.
Licensed under a 3-clause BSD license.
Revision history:
27 Sep 2015 <NAME>
Initial version
"""
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import warnings
from Totoro.db import getConnectionFull
from Totoro.dbclasses.plate_utils import removeOrphanedSets
from Totoro.bin.overrideSet import main
from Totoro.dbclasses import Exposure, Set, fromPlateID
db, Session, plateDB, mangaDB = getConnectionFull('test')
session = db.Session()
class TestOverrideSet(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Restores plate 7495."""
setPK = [1, 1, 1, 2, 2, 2, 3, 4, 3, 4, 3, 4]
with session.begin():
for ii, expPK in enumerate(range(17, 29)):
exp = session.query(db.mangaDB.Exposure).get(expPK)
ss = session.query(db.mangaDB.Set).get(setPK[ii])
if ss is None:
session.add(db.mangaDB.Set(pk=setPK[ii]))
session.flush()
exp.set_pk = setPK[ii]
exp.exposure_status_pk = 4
session.flush()
for sPK in setPK:
ss = session.query(db.mangaDB.Set).get(sPK)
ss.set_status_pk = 0
plate8549_setPKs = [268, 269, 276, 280, 281]
plate8549_setStatus = [1, None, 1, None, 1]
with session.begin():
for ii, setPK in enumerate(plate8549_setPKs):
ss = session.query(db.mangaDB.Set).get(setPK)
if ss is None:
session.add(
db.mangaDB.Set(pk=setPK,
set_status_pk=plate8549_setStatus[ii]))
session.flush()
continue
else:
ss.set_status_pk = plate8549_setStatus[ii]
session.flush()
for exp in ss.exposures:
exp.set_pk = None
plate8549_exposures = [84465, 84588, 84667, 84468, 84462, 84664, 84670,
84658, 84661, 84591, 84672, 84675]
plate8549_exposure_set = [268, 268, 268, 269, 276, 276, 276, 280, 280,
281, 281, 281]
with session.begin():
for ii, expPK in enumerate(plate8549_exposures):
exp = session.query(db.plateDB.Exposure).get(expPK)
if expPK == 84468:
exp.mangadbExposure[0].exposure_status_pk = None
else:
exp.mangadbExposure[0].exposure_status_pk = 4
exp.mangadbExposure[0].set_pk = plate8549_exposure_set[ii]
ss = session.query(db.mangaDB.Set).get(312)
if ss is not None:
session.delete(ss)
removeOrphanedSets()
def tearDown(self):
"""Similar to set up."""
self.setUpClass()
def testOverrideGoodOneSet(self):
"""Test overriding all the exposures in a set to Override Good."""
exps = [177773, 177774, 177775]
main(argv=['-v', 'good'] + map(str, exps))
for exp in exps:
totExp = Exposure(exp, format='exposure_no', parent='plateDB')
self.assertEqual(totExp._mangaExposure.set_pk, 1)
self.assertEqual(totExp._mangaExposure.status.label,
'Override Good')
self.assertEqual(totExp._mangaExposure.set.status.label,
'Override Good')
# Checks that the set is overriden good
with session.begin():
ss = session.query(db.mangaDB.Set).get(1)
self.assertIsNotNone(ss)
self.assertEqual(ss.status.label, 'Override Good')
def testOverrideGoodSeveralSet(self):
"""Test overriding exposures from different sets into a good set."""
exps = [177773, 177774, 177778]
main(argv=['-v', 'good'] + map(str, exps))
setPKs = []
for exp in exps:
totExp = Exposure(exp, format='exposure_no', parent='plateDB')
setPK = totExp._mangaExposure.set_pk
self.assertIsNotNone(setPK)
setPKs.append(setPK)
self.assertEqual(totExp._mangaExposure.status.label,
'Override Good')
self.assertEqual(totExp._mangaExposure.set.status.label,
'Override Good')
self.assertEqual(len(np.unique(setPKs)), 1)
# Checks that the set is overriden good
with session.begin():
ss = session.query(db.mangaDB.Set).get(setPKs[0])
self.assertIsNotNone(ss)
self.assertEqual(ss.status.label, 'Override Good')
for setPK in [1, 2, 3, 4]:
ss = Set(setPK, format='pk')
status = ss.getStatus()[0]
if setPK in [1, 2]:
self.assertEqual(status, 'Unplugged')
else:
self.assertEqual(status, 'Good')
self.assertEqual(ss.status.label, 'Excellent')
plate = fromPlateID(7495)
self.assertEqual(len(plate.sets), 5)
def testOverrideBadOneSet(self):
"""Test overriding all the exposures in a set to Override Bad."""
exps = [177773, 177774, 177775]
main(argv=['-v', 'bad'] + map(str, exps))
for exp in exps:
totExp = Exposure(exp, format='exposure_no', parent='plateDB')
self.assertEqual(totExp._mangaExposure.set_pk, 1)
self.assertEqual(totExp._mangaExposure.status.label,
'Override Bad')
self.assertEqual(totExp._mangaExposure.set.status.label,
'Override Bad')
# Checks that the set is overriden good
with session.begin():
ss = session.query(db.mangaDB.Set).get(1)
self.assertIsNotNone(ss)
self.assertEqual(ss.status.label, 'Override Bad')
plate = fromPlateID(7495)
self.assertEqual(len(plate.sets), 4)
# Checks that the plate completion takes into account that there is one
# fewer valid set.
self.assertLessEqual(plate.getPlateCompletion(), 1.5)
def testOverrideBadSeveralSet(self):
"""Test overriding exposures from different sets into a bad set."""
exps = [177773, 177774, 177778]
with warnings.catch_warnings(record=True) as ww:
main(argv=['-v', 'bad'] + map(str, exps))
warnMessages = '\n'.join([str(ww[ii].message)
for ii in range(len(ww))])
self.assertIn('plate completion has changed from 1.85 to 0.91.',
warnMessages)
setPKs = []
for exp in exps:
totExp = Exposure(exp, format='exposure_no', parent='plateDB')
setPK = totExp._mangaExposure.set_pk
self.assertIsNotNone(setPK)
setPKs.append(setPK)
self.assertEqual(totExp._mangaExposure.status.label,
'Override Bad')
self.assertEqual(totExp._mangaExposure.set.status.label,
'Override Bad')
self.assertEqual(len(np.unique(setPKs)), 1)
# Checks that the set is overriden bad
with session.begin():
ss = session.query(db.mangaDB.Set).get(setPKs[0])
self.assertIsNotNone(ss)
self.assertEqual(ss.status.label, 'Override Bad')
for setPK in [1, 2, 3, 4]:
ss = Set(setPK, format='pk')
status = ss.getStatus()[0]
if setPK in [1, 2]:
self.assertEqual(status, 'Unplugged')
else:
self.assertEqual(status, 'Good')
self.assertEqual(ss.status.label, 'Excellent')
plate = fromPlateID(7495)
self.assertEqual(len(plate.sets), 5)
self.assertLessEqual(plate.getPlateCompletion(), 1.)
def testRemoveSet(self):
"""Test removing overridden set."""
# We override a set as bad
exps = [177773, 177774, 177778]
overridenSetPK = main(argv=['-v', 'bad'] + map(str, exps))
# Checks that the new overridden set is 297
with session.begin():
ss = session.query(db.mangaDB.Set).get(overridenSetPK)
self.assertIsNotNone(ss)
self.assertEqual(ss.status.label, 'Override Bad')
# Now we remove the set
main(argv=['-v', 'remove', str(overridenSetPK)])
# Check that set_pk 297 has been removed
with session.begin():
ss = session.query(db.mangaDB.Set).get(overridenSetPK)
self.assertIsNone(ss)
# Checks that all exposures don't have exposure status or set_pk
with session.begin():
for exp in exps:
ee = session.query(db.plateDB.Exposure).filter(
db.plateDB.Exposure.exposure_no == exp).one()
mangaDBexp = ee.mangadbExposure[0]
self.assertIsNone(mangaDBexp.set_pk)
self.assertIsNone(mangaDBexp.exposure_status_pk)
# Now we repeat the test but using --reload. The plate should be left
# in the original state but with sets 1 and 2 exchanged.
overridenSetPK = main(argv=['-v', 'bad'] + map(str, exps))
# Now we remove the set
main(argv=['-v', 'remove', '--reload', str(overridenSetPK)])
# Checks that all exposures don't have exposure status or set_pk
expSetPKs = [1, 1, 2]
with session.begin():
for ii, exp in enumerate(exps):
ee = session.query(db.plateDB.Exposure).filter(
db.plateDB.Exposure.exposure_no == exp).one()
mangaDBexp = ee.mangadbExposure[0]
self.assertEqual(mangaDBexp.set_pk, expSetPKs[ii])
self.assertEqual(mangaDBexp.exposure_status_pk, 4)
setStatus = [1, None]
with session.begin():
for ii, setPK in enumerate([1, 2]):
ss = session.query(db.mangaDB.Set).get(setPK)
self.assertEqual(ss.set_status_pk, setStatus[ii])
def testInfo(self):
"""Test getting information from a set."""
# Checks a fake set
exps = [177773, 177774, 177778]
(status, code,
statusMock, codeMock) = main(argv=['-v', 'info'] + map(str, exps))
self.assertEqual(status, 'Bad')
self.assertEqual(code, 2)
self.assertIsNone(statusMock)
self.assertIsNone(codeMock)
# Now we check a real good one
exps = [177773, 177774, 177775]
(status, code,
statusMock, codeMock) = main(argv=['-v', 'info'] + map(str, exps))
self.assertEqual(status, 'Excellent')
self.assertEqual(code, 10)
self.assertEqual(statusMock, 'Good')
self.assertEqual(codeMock, 0)
# Lets override the first example as bad
exps = [177773, 177774, 177778]
main(argv=['-v', 'bad'] + map(str, exps))
(status, code,
statusMock, codeMock) = main(argv=['-v', 'info'] + map(str, exps))
self.assertEqual(status, 'Override Bad')
self.assertEqual(code, 10)
self.assertEqual(statusMock, 'Bad')
self.assertEqual(codeMock, 2)
# For the previous test the status of the exposures has been internally
# changed to None. That change should not be recorded in the DB. Let's
# check.
for exp in exps:
totExp = Exposure(exp, format='exposure_no')
self.assertEqual(totExp._mangaExposure.status.label,
'Override Bad')
def testEmptySet(self):
"""Tests when one of the original sets in empty after overriding."""
exps = [198371, 198447, 198258]
main(argv=['-v', 'good'] + map(str, exps))
with session.begin():
exp198371 = session.query(db.plateDB.Exposure).filter(
db.plateDB.Exposure.exposure_no == 198371).one()
ss = exp198371.mangadbExposure[0].set
self.assertIsNotNone(ss)
self.assertEqual(ss.status.label, 'Override Good')
self.assertItemsEqual([exp.platedbExposure.exposure_no
for exp in ss.exposures], exps)
if __name__ == '__main__':
unittest.main()
| [
"Totoro.dbclasses.fromPlateID",
"Totoro.dbclasses.Set",
"numpy.unique",
"Totoro.db.getConnectionFull",
"warnings.catch_warnings",
"Totoro.dbclasses.Exposure",
"unittest.main",
"Totoro.dbclasses.plate_utils.removeOrphanedSets"
] | [((559, 584), 'Totoro.db.getConnectionFull', 'getConnectionFull', (['"""test"""'], {}), "('test')\n", (576, 584), False, 'from Totoro.db import getConnectionFull\n'), ((12412, 12427), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12425, 12427), False, 'import unittest\n'), ((2923, 2943), 'Totoro.dbclasses.plate_utils.removeOrphanedSets', 'removeOrphanedSets', ([], {}), '()\n', (2941, 2943), False, 'from Totoro.dbclasses.plate_utils import removeOrphanedSets\n'), ((5167, 5184), 'Totoro.dbclasses.fromPlateID', 'fromPlateID', (['(7495)'], {}), '(7495)\n', (5178, 5184), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((6062, 6079), 'Totoro.dbclasses.fromPlateID', 'fromPlateID', (['(7495)'], {}), '(7495)\n', (6073, 6079), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((7910, 7927), 'Totoro.dbclasses.fromPlateID', 'fromPlateID', (['(7495)'], {}), '(7495)\n', (7921, 7927), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((3282, 3335), 'Totoro.dbclasses.Exposure', 'Exposure', (['exp'], {'format': '"""exposure_no"""', 'parent': '"""plateDB"""'}), "(exp, format='exposure_no', parent='plateDB')\n", (3290, 3335), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((4129, 4182), 'Totoro.dbclasses.Exposure', 'Exposure', (['exp'], {'format': '"""exposure_no"""', 'parent': '"""plateDB"""'}), "(exp, format='exposure_no', parent='plateDB')\n", (4137, 4182), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((4871, 4894), 'Totoro.dbclasses.Set', 'Set', (['setPK'], {'format': '"""pk"""'}), "(setPK, format='pk')\n", (4874, 4894), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((5480, 5533), 'Totoro.dbclasses.Exposure', 'Exposure', (['exp'], {'format': '"""exposure_no"""', 'parent': '"""plateDB"""'}), "(exp, format='exposure_no', parent='plateDB')\n", (5488, 5533), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((6468, 6504), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (6491, 6504), False, 'import warnings\n'), ((6876, 6929), 'Totoro.dbclasses.Exposure', 'Exposure', (['exp'], {'format': '"""exposure_no"""', 'parent': '"""plateDB"""'}), "(exp, format='exposure_no', parent='plateDB')\n", (6884, 6929), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((7614, 7637), 'Totoro.dbclasses.Set', 'Set', (['setPK'], {'format': '"""pk"""'}), "(setPK, format='pk')\n", (7617, 7637), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((11603, 11638), 'Totoro.dbclasses.Exposure', 'Exposure', (['exp'], {'format': '"""exposure_no"""'}), "(exp, format='exposure_no')\n", (11611, 11638), False, 'from Totoro.dbclasses import Exposure, Set, fromPlateID\n'), ((4561, 4578), 'numpy.unique', 'np.unique', (['setPKs'], {}), '(setPKs)\n', (4570, 4578), True, 'import numpy as np\n'), ((7306, 7323), 'numpy.unique', 'np.unique', (['setPKs'], {}), '(setPKs)\n', (7315, 7323), True, 'import numpy as np\n')] |
from __future__ import annotations
from typing import List
import cv2
import numpy as np
from . import utils
from .models import PredictedFrames, PredictedSubtitle
from .opencv_adapter import Capture
from paddleocr import PaddleOCR
class Video:
path: str
lang: str
use_fullframe: bool
det_model_dir: str
rec_model_dir: str
num_frames: int
fps: float
height: int
ocr: PaddleOCR
pred_frames: List[PredictedFrames]
pred_subs: List[PredictedSubtitle]
def __init__(self, path: str, det_model_dir: str, rec_model_dir: str):
self.path = path
self.det_model_dir = det_model_dir
self.rec_model_dir = rec_model_dir
with Capture(path) as v:
self.num_frames = int(v.get(cv2.CAP_PROP_FRAME_COUNT))
self.fps = v.get(cv2.CAP_PROP_FPS)
self.height = int(v.get(cv2.CAP_PROP_FRAME_HEIGHT))
def run_ocr(self, use_gpu: bool, lang: str, time_start: str, time_end: str,
conf_threshold: int, use_fullframe: bool, brightness_threshold: int, similar_image_threshold: int, similar_pixel_threshold: int, frames_to_skip: int,
crop_x: int, crop_y: int, crop_width: int, crop_height: int) -> None:
conf_threshold_percent = float(conf_threshold/100)
self.lang = lang
self.use_fullframe = use_fullframe
self.pred_frames = []
ocr = PaddleOCR(lang=self.lang, rec_model_dir=self.rec_model_dir, det_model_dir=self.det_model_dir, use_gpu=use_gpu)
ocr_start = utils.get_frame_index(time_start, self.fps) if time_start else 0
ocr_end = utils.get_frame_index(time_end, self.fps) if time_end else self.num_frames
if ocr_end < ocr_start:
raise ValueError('time_start is later than time_end')
num_ocr_frames = ocr_end - ocr_start
crop_x_end = None
crop_y_end = None
if crop_x and crop_y and crop_width and crop_height:
crop_x_end = crop_x + crop_width
crop_y_end = crop_y + crop_height
# get frames from ocr_start to ocr_end
with Capture(self.path) as v:
v.set(cv2.CAP_PROP_POS_FRAMES, ocr_start)
prev_grey = None
predicted_frames = None
modulo = frames_to_skip + 1
for i in range(num_ocr_frames):
if i % modulo == 0:
frame = v.read()[1]
if not self.use_fullframe:
if crop_x_end and crop_y_end:
frame = frame[crop_y:crop_y_end, crop_x:crop_x_end]
else:
# only use bottom third of the frame by default
frame = frame[self.height // 3:, :]
if brightness_threshold:
frame = cv2.bitwise_and(frame, frame, mask=cv2.inRange(frame, (brightness_threshold, brightness_threshold, brightness_threshold), (255, 255, 255)))
if similar_image_threshold:
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if prev_grey is not None:
_, absdiff = cv2.threshold(cv2.absdiff(prev_grey, grey), similar_pixel_threshold, 255, cv2.THRESH_BINARY)
if np.count_nonzero(absdiff) < similar_image_threshold:
predicted_frames.end_index = i + ocr_start
prev_grey = grey
continue
prev_grey = grey
predicted_frames = PredictedFrames(i + ocr_start, ocr.ocr(frame), conf_threshold_percent)
self.pred_frames.append(predicted_frames)
else:
v.read()
def get_subtitles(self, sim_threshold: int) -> str:
self._generate_subtitles(sim_threshold)
return ''.join(
'{}\n{} --> {}\n{}\n\n'.format(
i,
utils.get_srt_timestamp(sub.index_start, self.fps),
utils.get_srt_timestamp(sub.index_end, self.fps),
sub.text)
for i, sub in enumerate(self.pred_subs))
def _generate_subtitles(self, sim_threshold: int) -> None:
self.pred_subs = []
if self.pred_frames is None:
raise AttributeError(
'Please call self.run_ocr() first to perform ocr on frames')
#max_frame_merge_diff = int(0.09 * self.fps)
for frame in self.pred_frames:
self._append_sub(PredictedSubtitle([frame], sim_threshold))
def _append_sub(self, sub: PredictedSubtitle) -> None:
if len(sub.text) == 0:
return
# merge new sub to the last subs if they are similar and within 0.09 seconds apart
while self.pred_subs and sub.is_similar_to(self.pred_subs[-1]):
ls = self.pred_subs[-1]
del self.pred_subs[-1]
sub = PredictedSubtitle(ls.frames + sub.frames, sub.sim_threshold)
self.pred_subs.append(sub)
| [
"cv2.inRange",
"numpy.count_nonzero",
"cv2.cvtColor",
"paddleocr.PaddleOCR",
"cv2.absdiff"
] | [((1396, 1511), 'paddleocr.PaddleOCR', 'PaddleOCR', ([], {'lang': 'self.lang', 'rec_model_dir': 'self.rec_model_dir', 'det_model_dir': 'self.det_model_dir', 'use_gpu': 'use_gpu'}), '(lang=self.lang, rec_model_dir=self.rec_model_dir, det_model_dir=\n self.det_model_dir, use_gpu=use_gpu)\n', (1405, 1511), False, 'from paddleocr import PaddleOCR\n'), ((3049, 3088), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (3061, 3088), False, 'import cv2\n'), ((2864, 2971), 'cv2.inRange', 'cv2.inRange', (['frame', '(brightness_threshold, brightness_threshold, brightness_threshold)', '(255, 255, 255)'], {}), '(frame, (brightness_threshold, brightness_threshold,\n brightness_threshold), (255, 255, 255))\n', (2875, 2971), False, 'import cv2\n'), ((3194, 3222), 'cv2.absdiff', 'cv2.absdiff', (['prev_grey', 'grey'], {}), '(prev_grey, grey)\n', (3205, 3222), False, 'import cv2\n'), ((3304, 3329), 'numpy.count_nonzero', 'np.count_nonzero', (['absdiff'], {}), '(absdiff)\n', (3320, 3329), True, 'import numpy as np\n')] |
################################################################################
##
## 2018/05/02
##
## Author: <NAME>, IB²
## Version: 1.0
## Python: 3.6
##
## This implements a random forest in order to predict digenic effect of DIDA
## combinations as discussed in this paper.
##
## https://academic.oup.com/nar/article/45/15/e140/3894171
##
## It performs stratified cross-validations and averages results over a given
## amount of repeats. dida_v2_full.csv is an instance of valid CSV file.
##
################################################################################
import sys
import time
import pandas as pd
from math import sqrt
from numpy import array, concatenate, dot, diag, mean, std
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import roc_curve, roc_auc_score, auc, matthews_corrcoef
def main(f_name, n_trees, n_epochs, threshold, selector):
"""
Loads csv, launches cross-validation, displays scores
f_name: str, path to reach the .csv file to evaluate predictor
n_trees: int, amount of trees in forest
n_epochs: int, amount of cross-validation to perform
threshold: see getScores
selector: str, boolean vector representing features to take into account
"""
# .csv file MUST contains these columns
features = [
'CADD1', 'CADD2', 'RecA', 'EssA',
'CADD3', 'CADD4', 'RecB', 'EssB',
'Distance', 'Path', 'CoExp', 'AllelicState'
]
assert len(selector) == len(features), "Features selector must fit features amount."
to_keep = [f for i, f in enumerate(features) if selector[i] == '1']
# Csv gathering, it needs to be ordered.
df_data = pd.read_csv(f_name)
X = array(df_data[to_keep])
# TD: true digenic, CO: composite, UK: unknown
y = array(
df_data['DE'].replace('TD',1).replace('CO',0).replace('UK',-1).astype(int)
)
gene_pairs = array(df_data['Pair'])
X, y, gene_pairs = X[y != -1], y[y != -1], gene_pairs[y != -1]
print('Training on subspace {', ', '.join( to_keep ), '}.' )
def getScores(pred, real, threshold=0.5):
"""
Returns evaluation metrics to evaluate one cross-validation:
Sensitivity, Specificity, Matthews_corrcoef and Area under the curve
pred: probability vector predicted by the random forest
real: true labels vector
threshold: tuning parameter to favoritize one class
"""
if len(pred) != len(real):
raise Exception("ERROR: input vectors have differente len!")
aucScore = roc_auc_score(real, pred)
tp, fp, fn, tn = (
sum(r == 0 and p < threshold for r, p in zip(real, pred)),
sum(r == 0 and p >= threshold for r, p in zip(real, pred)),
sum(r == 1 and p < threshold for r, p in zip(real, pred)),
sum(r == 1 and p >= threshold for r, p in zip(real, pred))
)
sen = tp / (tp + fn)
spe = tn / (tn + fp)
mcc = (
(tp * tn - fn * fp) / sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
)
)
return sen, spe, mcc, aucScore
def LOGO_crossValidation(X, y, groups, n_trees=100, n_epochs=50, threshold=0.5):
"""
Stratified cross-validation.
X: Design matrix
y: label vector
groups: Gene pair vector to define training groups
N: number of cross validations to perform
threshold: see getScores
"""
logo = LeaveOneGroupOut()
clf = RandomForestClassifier(
n_estimators=n_trees,
max_depth=10,
criterion='gini',
min_samples_split=2,
min_samples_leaf=2,
bootstrap=True,
n_jobs=1
)
# Vectors to compute final results
sum_sen, sum_spe, sum_mcc, sum_auc = [], [], [], []
for i in range(n_epochs):
start_time = time.time()
values_t, values_p = [], []
print("#"*10, "Trial %i" % i, "#"*10)
# We leave one group out
for train_index, test_index in logo.split(X, y, groups):
X_fit, y_fit, X_train, y_train = (
X[train_index], y[train_index],
X[test_index], y[test_index]
)
clf = clf.fit(X_fit, y_fit)
y_predicted = clf.predict_proba(X_train)
# predictions are concatenated
values_t, values_p = values_t + [yi for yi in y_train], values_p + [yi for yi in y_predicted[:,1]]
sen, spe, mcc, auc = getScores(values_p, values_t, threshold)
sum_sen.append(sen)
sum_spe.append(spe)
sum_mcc.append(mcc)
sum_auc.append(auc)
print('Duration:', round( (time.time() - start_time) * 100) / 100, 's')
print('sen-spe-mcc-auc')
print('-'.join(map(lambda x: str(round(x*100)/100), [sen, spe, mcc, auc])))
print('Sensitivity: %f, std: %f' % (mean(sum_sen), std(sum_sen)) )
print('Specificity: %f, std: %f' % (mean(sum_spe), std(sum_spe)) )
print('MCC: %f, std: %f' % (mean(sum_mcc), std(sum_mcc)) )
print('AUC: %f, std: %f' % (mean(sum_auc), std(sum_auc)) )
LOGO_crossValidation(X, y, gene_pairs, n_trees, n_epochs, threshold)
if __name__ == "__main__":
# Args parsing
# Flag help
if sys.argv[1] in ("-h", "help"):
print("Usage: python random_forest.py {file_name}.csv n_trees n_epochs threshold feature_selector \n \
file: a csv file with DIDAID, features, DE, gene pair \n \
n_trees: How many trees are contained in a forest. \n \
n_epochs: How many pass you want to do. \n \
threshold: threshold separation between classes in interval (0,1) \n \
feature_selector: binary string of size #features corresponding to activated features.")
else:
# Arguments control
f_name = sys.argv[1]
assert len(f_name) >= 4 and f_name[-4:] == '.csv', f'Arg 1 must be a .csv file. Found: {f_name}'
n_trees = sys.argv[2]
try:
n_trees = int(n_trees)
except ValueError:
raise Exception(f'Arg 2 must be an integer. Found: {n_trees}')
n_epochs = sys.argv[3]
try:
n_epochs = int(n_epochs)
except ValueError:
raise f'Arg 3 must be an integer. Found: {n_epochs}'
threshold = sys.argv[4]
try:
threshold = float(threshold)
except ValueError:
raise f'Arg 4 must be a float. Found: {threshold}'
selector = sys.argv[5]
assert all(c in ('0', '1') for c in selector), f'Arg 5 must be a selector composed of 0s and 1s. Found: {selector}'
main(f_name, n_trees, n_epochs, threshold, selector)
| [
"numpy.mean",
"pandas.read_csv",
"sklearn.model_selection.LeaveOneGroupOut",
"math.sqrt",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"numpy.std",
"time.time"
] | [((1832, 1851), 'pandas.read_csv', 'pd.read_csv', (['f_name'], {}), '(f_name)\n', (1843, 1851), True, 'import pandas as pd\n'), ((1863, 1886), 'numpy.array', 'array', (['df_data[to_keep]'], {}), '(df_data[to_keep])\n', (1868, 1886), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((2066, 2088), 'numpy.array', 'array', (["df_data['Pair']"], {}), "(df_data['Pair'])\n", (2071, 2088), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((2748, 2773), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['real', 'pred'], {}), '(real, pred)\n', (2761, 2773), False, 'from sklearn.metrics import roc_curve, roc_auc_score, auc, matthews_corrcoef\n'), ((3725, 3743), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (3741, 3743), False, 'from sklearn.model_selection import LeaveOneGroupOut\n'), ((3759, 3906), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_trees', 'max_depth': '(10)', 'criterion': '"""gini"""', 'min_samples_split': '(2)', 'min_samples_leaf': '(2)', 'bootstrap': '(True)', 'n_jobs': '(1)'}), "(n_estimators=n_trees, max_depth=10, criterion='gini',\n min_samples_split=2, min_samples_leaf=2, bootstrap=True, n_jobs=1)\n", (3781, 3906), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3220, 3271), 'math.sqrt', 'sqrt', (['((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))'], {}), '((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n', (3224, 3271), False, 'from math import sqrt\n'), ((4175, 4186), 'time.time', 'time.time', ([], {}), '()\n', (4184, 4186), False, 'import time\n'), ((5309, 5322), 'numpy.mean', 'mean', (['sum_sen'], {}), '(sum_sen)\n', (5313, 5322), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((5324, 5336), 'numpy.std', 'std', (['sum_sen'], {}), '(sum_sen)\n', (5327, 5336), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((5385, 5398), 'numpy.mean', 'mean', (['sum_spe'], {}), '(sum_spe)\n', (5389, 5398), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((5400, 5412), 'numpy.std', 'std', (['sum_spe'], {}), '(sum_spe)\n', (5403, 5412), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((5461, 5474), 'numpy.mean', 'mean', (['sum_mcc'], {}), '(sum_mcc)\n', (5465, 5474), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((5476, 5488), 'numpy.std', 'std', (['sum_mcc'], {}), '(sum_mcc)\n', (5479, 5488), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((5537, 5550), 'numpy.mean', 'mean', (['sum_auc'], {}), '(sum_auc)\n', (5541, 5550), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((5552, 5564), 'numpy.std', 'std', (['sum_auc'], {}), '(sum_auc)\n', (5555, 5564), False, 'from numpy import array, concatenate, dot, diag, mean, std\n'), ((5090, 5101), 'time.time', 'time.time', ([], {}), '()\n', (5099, 5101), False, 'import time\n')] |
import os
import numpy as np
path_praat_script=os.path.dirname(os.path.abspath(__file__))
def multi_find(s, r):
"""
Internal function used to decode the Formants file generated by Praat.
"""
s_len = len(s)
r_len = len(r)
_complete = []
if s_len < r_len:
n = -1
else:
for i in range(s_len):
# search for r in s until not enough characters are left
if s[i:i + r_len] == r:
_complete.append(i)
else:
i = i + 1
return(_complete)
def praat_vuv(audio_filaname, resultsp, resultst, time_stepF0=0, minf0=75, maxf0=600, maxVUVPeriod=0.02, averageVUVPeriod=0.01):
"""
runs vuv_praat script to obtain pitch and voicing decisions for a wav file.
It writes the results into two text files, one for the pitch and another
for the voicing decisions. These results can then be read using the function
read_textgrid_trans and decodeF0
:param audio_filaname: Full path to the wav file
:param resultsp: Full path to the resulting file with the pitch
:param resultst: Full path to the resulting file with the voiced/unvoiced decisions
:param time_stepF0: time step to compute the pitch, default value is 0 and Praat will use 0.75 / minf0
:param minf0: minimum frequency for the pitch in Hz, default is 75Hz
:param maxf0: maximum frequency for the pitch in Hz, default is 600
:param maxVUVPeriod: maximum interval that considered part of a larger voiced interval, default 0.02
:param averageVUVPeriod: half of this value will be taken to be the amount to which a voiced interval will extend beyond its initial and final points, default is 0.01
:returns: nothing
"""
command='praat '+path_praat_script+'/vuv_praat.praat '
command+=audio_filaname+' '+resultsp +' '+ resultst+' '
command+=str(minf0)+' '+str(maxf0)+' '
command+=str(time_stepF0)+' '+str(maxVUVPeriod)+' '+str(averageVUVPeriod)
os.system(command)
def praat_formants(audio_filename, results_filename,sizeframe,step, n_formants=5, max_formant=5500):
"""
runs FormantsPraat script to obtain the formants for a wav file.
It writes the results into a text file.
These results can then be read using the function decodeFormants.
:param audio_filaname: Full path to the wav file, string
:param results_filename: Full path to the resulting file with the formants
:param sizeframe: window size
:param step: time step to compute formants
:param n_formants: number of formants to look for
:param max_formant: maximum frequencyof formants to look for
:returns: nothing
"""
command='praat '+path_praat_script+'/FormantsPraat.praat '
command+=audio_filename + ' '+results_filename+' '
command+=str(n_formants)+' '+ str(max_formant) + ' '
command+=str(float(sizeframe)/2)+' '
command+=str(float(step))
os.system(command) #formant extraction praat
def read_textgrid_trans(file_textgrid, data_audio, fs, win_trans=0.04):
"""
This function reads a text file with the text grid with voiced/unvoiced
decisions then finds the onsets (unvoiced -> voiced) and
offsets (voiced -> unvoiced) and then reads the audio data to returns
lists of segments of lenght win_trans around these transitions.
:param file_textgrid: The text file with the text grid with voicing decisions.
:param data_audio: the audio signal.
:param fs: sampling frequency of the audio signal.
:param win_trans: the transition window lenght, default 0.04
:returns segments: List with both onset and offset transition segments.
:returns segments_onset: List with onset transition segments
:returns segments_offset: List with offset transition segments
"""
segments=[]
segments_onset=[]
segments_offset=[]
prev_trans=""
prev_line=0
with open(file_textgrid) as fp:
for line in fp:
line = line.strip('\n')
if line in ('"V"', '"U"'):
transVal=int(float(prev_line)*fs)-1
segment=data_audio[int(transVal-win_trans*fs):int(transVal+win_trans*fs)]
segments.append(segment)
if prev_trans in ('"V"', ""):
segments_onset.append(segment)
elif prev_trans=='"U"':
segments_offset.append(segment)
prev_trans=line
prev_line=line
return segments,segments_onset,segments_offset
def decodeF0(fileTxt,len_signal=0, time_stepF0=0):
"""
Reads the content of a pitch file created with praat_vuv function.
By default it will return the contents of the file in two arrays,
one for the actual values of pitch and the other with the time stamps.
Optionally the lenght of the signal and the time step of the pitch
values can be provided to return an array with the full pitch contour
for the signal, with padded zeros for unvoiced segments.
:param fileTxt: File with the pitch, which can be generated using the function praat_vuv
:param len_signal: Lenght of the audio signal in
:param time_stepF0: The time step of pitch values. Optional.
:returns pitch: Numpy array with the values of the pitch.
:returns time_voiced: time stamp for each pitch value.
"""
if os.stat(fileTxt).st_size==0:
return np.array([0]), np.array([0])
pitch_data=np.loadtxt(fileTxt)
if len(pitch_data.shape)>1:
time_voiced=pitch_data[:,0] # First column is the time stamp vector
pitch=pitch_data[:,1] # Second column
elif len(pitch_data.shape)==1: # Only one point of data
time_voiced=pitch_data[0] # First datum is the time stamp
pitch=pitch_data[1] # Second datum is the pitch value
if len_signal>0:
n_frames=int(len_signal/time_stepF0)
t=np.linspace(0.0,len_signal,n_frames)
pitch_zeros=np.zeros(int(n_frames))
if len(pitch_data.shape)>1:
for idx,time_p in enumerate(time_voiced):
argmin=np.argmin(np.abs(t-time_p))
pitch_zeros[argmin]=pitch[idx]
else:
argmin=np.argmin(np.abs(t-time_voiced))
pitch_zeros[argmin]=pitch
return pitch_zeros, t
return pitch, time_voiced
def decodeFormants(fileTxt):
"""
Read the praat textgrid file for formants and return the array
:param fileTxt: File with the formants, which can be generated using the '''praat_formants'''
:returns F1: Numpy array containing the values for the first formant
:returns F2: Numpy array containing the values for the second formant
"""
fid=open(fileTxt)
datam=fid.read()
end_line1=multi_find(datam, '\n')
F1=[]
F2=[]
ji=10
while (ji<len(end_line1)-1):
line1=datam[end_line1[ji]+1:end_line1[ji+1]]
cond=(line1 in ('3', '4', '5'))
if (cond):
F1.append(float(datam[end_line1[ji+1]+1:end_line1[ji+2]]))
F2.append(float(datam[end_line1[ji+3]+1:end_line1[ji+4]]))
ji=ji+1
F1=np.asarray(F1)
F2=np.asarray(F2)
return F1, F2
| [
"numpy.abs",
"os.stat",
"numpy.asarray",
"numpy.array",
"numpy.linspace",
"os.path.abspath",
"os.system",
"numpy.loadtxt"
] | [((64, 89), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import os\n'), ((1920, 1938), 'os.system', 'os.system', (['command'], {}), '(command)\n', (1929, 1938), False, 'import os\n'), ((2800, 2818), 'os.system', 'os.system', (['command'], {}), '(command)\n', (2809, 2818), False, 'import os\n'), ((5050, 5069), 'numpy.loadtxt', 'np.loadtxt', (['fileTxt'], {}), '(fileTxt)\n', (5060, 5069), True, 'import numpy as np\n'), ((6495, 6509), 'numpy.asarray', 'np.asarray', (['F1'], {}), '(F1)\n', (6505, 6509), True, 'import numpy as np\n'), ((6514, 6528), 'numpy.asarray', 'np.asarray', (['F2'], {}), '(F2)\n', (6524, 6528), True, 'import numpy as np\n'), ((5443, 5481), 'numpy.linspace', 'np.linspace', (['(0.0)', 'len_signal', 'n_frames'], {}), '(0.0, len_signal, n_frames)\n', (5454, 5481), True, 'import numpy as np\n'), ((4971, 4987), 'os.stat', 'os.stat', (['fileTxt'], {}), '(fileTxt)\n', (4978, 4987), False, 'import os\n'), ((5009, 5022), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5017, 5022), True, 'import numpy as np\n'), ((5024, 5037), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5032, 5037), True, 'import numpy as np\n'), ((5695, 5718), 'numpy.abs', 'np.abs', (['(t - time_voiced)'], {}), '(t - time_voiced)\n', (5701, 5718), True, 'import numpy as np\n'), ((5614, 5632), 'numpy.abs', 'np.abs', (['(t - time_p)'], {}), '(t - time_p)\n', (5620, 5632), True, 'import numpy as np\n')] |
# Copyright 2019-2020 Toyota Research Institute. All rights reserved.
"""
Defines a new XAS Spectrum object built on top of Pymatgen's
Spectrum object.
"""
import os
import numpy as np
from pymatgen.core.structure import Structure
from trixs.spectra.core import XAS_Spectrum, XAS_Collation
from trixs.spectra.spectrum_io import parse_spectrum
from copy import deepcopy
from numpy import eye
from pytest import fixture, raises
from json import loads, dumps
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, 'test_files')
@fixture
def fake_structure():
lattice = eye(3)
species = ['H']
coords = np.array([[0, 0, 0]])
yield Structure(lattice, species, coords)
@fixture
def fake_spectrum(fake_structure):
x = np.random.uniform(size=100)
y = np.random.uniform(size=100)
return XAS_Spectrum(x, y, structure=fake_structure,
absorbing_site=0)
def test_instantiate_XAS_spectra(fake_structure):
x = np.random.uniform(size=100)
y = np.random.uniform(size=100)
absorbing_site = 0
spec = XAS_Spectrum(x, y, fake_structure, absorbing_site)
assert isinstance(spec, XAS_Spectrum)
def test_XAS_full_spec_attributes():
x = np.random.uniform(size=100)
y = np.random.uniform(size=100)
structure = Structure.from_file(os.path.join(TEST_FILE_DIR, 'Cu_structure.cif'))
absorbing_site = 0
full_spectrum = np.random.uniform(size=(100, 6))
spec = XAS_Spectrum(x, y, structure, absorbing_site, full_spectrum=full_spectrum)
assert isinstance(spec, XAS_Spectrum)
assert np.array_equal(spec.E, full_spectrum[:, 0])
assert np.array_equal(spec.Enorm, full_spectrum[:, 1])
assert np.array_equal(spec.k, full_spectrum[:, 2])
assert np.array_equal(spec.mu, full_spectrum[:, 3])
assert np.array_equal(spec.mu0, full_spectrum[:, 4])
assert np.array_equal(spec.chi, full_spectrum[:, 5])
assert spec.abs_idx == 0
assert isinstance(spec.as_dict(), dict)
def test_exceptions(fake_spectrum):
with raises(ValueError):
fake_spectrum.E()
with raises(ValueError):
fake_spectrum.mu()
with raises(ValueError):
fake_spectrum.Enorm()
with raises(ValueError):
fake_spectrum.mu0()
with raises(ValueError):
fake_spectrum.k()
with raises(ValueError):
fake_spectrum.chi()
with raises(ValueError):
fake_spectrum.shifted_Enorm(shift=0)
with raises(NotImplementedError):
fake_spectrum.normalize('zappa')
def test_load_from_doc_and_object():
with open(os.path.join(TEST_FILE_DIR, 'sample_spectrum_e.txt'), 'r') as f:
data = loads(f.readline())
spec1 = XAS_Spectrum.from_atomate_document(data)
spec2 = XAS_Spectrum.load_from_object(data)
line = dumps(data)
spec3 = XAS_Spectrum.load_from_object(line)
for spec in [spec1, spec2, spec3]:
assert isinstance(spec,XAS_Spectrum)
assert spec.has_full_spectrum()
assert spec.E[0] == 8334.08
assert spec.Enorm[0] == -9.293
assert spec.k[0] == -0.8
assert spec.mu[0] == 0.0519168
assert spec.mu0[0] == 0.0795718
assert spec.chi[0] == -0.027655
assert len(spec.E) == 100
assert len(spec.Enorm) == 100
assert len(spec.mu) == 100
assert len(spec.mu0) == 100
assert len(spec.k) == 100
assert len(spec.chi) == 100
enorm = spec1.Enorm
sub_enorm = np.add(enorm,1)
assert np.isclose(sub_enorm,spec.shifted_Enorm(1)).all()
def test_XAS_gradient():
X = np.linspace(0,1,500)
Y = np.sin(X)
Yprime = np.cos(X)
spec = XAS_Spectrum(X,Y)
dy = spec.dy
assert np.isclose(dy, Yprime,atol=.001).all()
def test_XAS_Spectrum_methods(fake_spectrum):
assert isinstance(str(fake_spectrum), str)
assert 'H' in str(fake_spectrum)
assert isinstance(fake_spectrum.as_dict(), dict)
assert isinstance(fake_spectrum.as_str(),str)
assert fake_spectrum.has_full_spectrum() is False
X = np.linspace(0,1,100)
Y = np.sin(X)
assert np.isclose(np.add(fake_spectrum.x, 10), fake_spectrum.shifted_x(10)).all()
fake_spectrum_2 = XAS_Spectrum(X,Y)
assert fake_spectrum_2.get_peak_idx() == 99
fake_spectrum_2.normalize('sum')
assert np.isclose(np.sum(fake_spectrum_2.y), 1.0)
fake_spectrum_2.normalize('max')
assert np.isclose(np.max(fake_spectrum_2.y), 1.0)
# Reset and shift up
fake_spectrum_2 = XAS_Spectrum(X,Y+.2)
fake_spectrum_2.normalize('minmax')
assert np.isclose(np.min(fake_spectrum_2.y), 0)
assert np.isclose(np.max(fake_spectrum_2.y), 1.0)
fake_spectrum_2.normalize('l2')
assert np.isclose(np.linalg.norm(fake_spectrum_2.y, ord=2),1.0)
def test_XAS_shift():
spec1 = parse_spectrum(os.path.join(TEST_FILE_DIR, 'sample_spectrum_c.txt'), kind='json')
spec2 = deepcopy(spec1)
spec1.x = np.subtract(spec1.x, 5.0)
assert (abs(5.0 + spec1.get_shift_alignment(spec2, fidelity=20, iterations=3)[0]) < .1)
def test_projection():
x = np.linspace(0, np.pi / 2, 100)
y = np.sin(x)
spec = XAS_Spectrum(x, y)
# Test that interpolation method works
new_xvals = np.linspace(0, np.pi / 2, 300)
new_yvals = spec.project_to_x_range(new_xvals)
assert (max(np.abs(y - new_yvals[::3])) < 1e-2)
new_xvals = np.linspace(-np.pi / 2, np.pi, 300)
new_yvals = spec.project_to_x_range(new_xvals)
true_yvals = np.sin(new_xvals)
# Test that it pads to 0 on the left edge
assert np.isclose(0.0, np.abs(new_yvals[:33])).all()
# Test that it extrapolates correctly for a few points
assert (np.isclose(new_yvals[200:205], true_yvals[200:205])).all()
def test_broaden_spectrum():
x = np.linspace(0, 1, 100)
y = np.sin(x)
spec = XAS_Spectrum(x, y)
assert x[-1]-x[0] == 1.0
spec.broaden_spectrum_mult(0)
assert spec.x[-1]-spec.x[0] == 1.0
spec.broaden_spectrum_mult(.05)
assert np.isclose(spec.x[-1] - spec.x[0], 1.05)
assert spec.x[-1] == 1.025
assert spec.x[0] ==-.025
x = np.linspace(0, 1, 100)
y = np.sin(x)
spec = XAS_Spectrum(x, y)
spec.broaden_spectrum_mult(-.05)
assert np.isclose(spec.x[-1] - spec.x[0], .95)
assert spec.x[-1] ==.975
assert spec.x[0] ==.025
def test_sanity_check():
X = np.linspace(0, 1, 100)
Y = np.linspace(.5, -.5, 100)
bad_spec = XAS_Spectrum(x=X, y=Y)
assert bad_spec.sanity_check() is False
good_spec = XAS_Spectrum(x=X, y=X)
assert good_spec.sanity_check() is True
Y = np.linspace(-0.001,0.001, 100)
bad_spec = XAS_Spectrum(x=X,y=Y)
assert bad_spec.sanity_check() is False
def test_simple_XAS_collation(fake_structure):
col = XAS_Collation(fake_structure)
assert isinstance(XAS_Collation(fake_structure), XAS_Collation)
all_false = [col.has_mp_spectra(),
col.has_mp_bader(),
col.has_oqmd_bader(),
col.has_bader(),
col.has_feff_spectra(),
col.has_features(),
col.has_spectra()]
assert not all(all_false)
def test_full_XAS_collation(fake_structure):
col = XAS_Collation(fake_structure,
mp_id='frank',
oqmd_id='zappa',
mp_spectra=[fake_spectrum],
feff_spectra=[fake_spectrum],
icsd_ids=[1,2,3],
mp_bader=[1],
oqmd_bader=[1],
coordination_numbers=[1])
all_true = [col.has_mp_spectra(),
col.has_mp_bader(),
col.has_oqmd_bader(),
col.has_bader(),
col.has_feff_spectra(),
col.has_spectra(),
col.has_features()]
assert all(all_true)
| [
"pymatgen.core.structure.Structure",
"trixs.spectra.core.XAS_Spectrum.load_from_object",
"trixs.spectra.core.XAS_Spectrum",
"numpy.array",
"copy.deepcopy",
"numpy.sin",
"numpy.linalg.norm",
"trixs.spectra.core.XAS_Spectrum.from_atomate_document",
"trixs.spectra.core.XAS_Collation",
"json.dumps",
... | [((470, 495), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (485, 495), False, 'import os\n'), ((512, 548), 'os.path.join', 'os.path.join', (['TEST_DIR', '"""test_files"""'], {}), "(TEST_DIR, 'test_files')\n", (524, 548), False, 'import os\n'), ((596, 602), 'numpy.eye', 'eye', (['(3)'], {}), '(3)\n', (599, 602), False, 'from numpy import eye\n'), ((636, 657), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (644, 657), True, 'import numpy as np\n'), ((759, 786), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (776, 786), True, 'import numpy as np\n'), ((795, 822), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (812, 822), True, 'import numpy as np\n'), ((835, 897), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['x', 'y'], {'structure': 'fake_structure', 'absorbing_site': '(0)'}), '(x, y, structure=fake_structure, absorbing_site=0)\n', (847, 897), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((982, 1009), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (999, 1009), True, 'import numpy as np\n'), ((1018, 1045), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (1035, 1045), True, 'import numpy as np\n'), ((1082, 1132), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['x', 'y', 'fake_structure', 'absorbing_site'], {}), '(x, y, fake_structure, absorbing_site)\n', (1094, 1132), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((1223, 1250), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (1240, 1250), True, 'import numpy as np\n'), ((1259, 1286), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (1276, 1286), True, 'import numpy as np\n'), ((1418, 1450), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100, 6)'}), '(size=(100, 6))\n', (1435, 1450), True, 'import numpy as np\n'), ((1463, 1537), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['x', 'y', 'structure', 'absorbing_site'], {'full_spectrum': 'full_spectrum'}), '(x, y, structure, absorbing_site, full_spectrum=full_spectrum)\n', (1475, 1537), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((1593, 1636), 'numpy.array_equal', 'np.array_equal', (['spec.E', 'full_spectrum[:, 0]'], {}), '(spec.E, full_spectrum[:, 0])\n', (1607, 1636), True, 'import numpy as np\n'), ((1648, 1695), 'numpy.array_equal', 'np.array_equal', (['spec.Enorm', 'full_spectrum[:, 1]'], {}), '(spec.Enorm, full_spectrum[:, 1])\n', (1662, 1695), True, 'import numpy as np\n'), ((1707, 1750), 'numpy.array_equal', 'np.array_equal', (['spec.k', 'full_spectrum[:, 2]'], {}), '(spec.k, full_spectrum[:, 2])\n', (1721, 1750), True, 'import numpy as np\n'), ((1762, 1806), 'numpy.array_equal', 'np.array_equal', (['spec.mu', 'full_spectrum[:, 3]'], {}), '(spec.mu, full_spectrum[:, 3])\n', (1776, 1806), True, 'import numpy as np\n'), ((1818, 1863), 'numpy.array_equal', 'np.array_equal', (['spec.mu0', 'full_spectrum[:, 4]'], {}), '(spec.mu0, full_spectrum[:, 4])\n', (1832, 1863), True, 'import numpy as np\n'), ((1875, 1920), 'numpy.array_equal', 'np.array_equal', (['spec.chi', 'full_spectrum[:, 5]'], {}), '(spec.chi, full_spectrum[:, 5])\n', (1889, 1920), True, 'import numpy as np\n'), ((2694, 2734), 'trixs.spectra.core.XAS_Spectrum.from_atomate_document', 'XAS_Spectrum.from_atomate_document', (['data'], {}), '(data)\n', (2728, 2734), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((2747, 2782), 'trixs.spectra.core.XAS_Spectrum.load_from_object', 'XAS_Spectrum.load_from_object', (['data'], {}), '(data)\n', (2776, 2782), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((2795, 2806), 'json.dumps', 'dumps', (['data'], {}), '(data)\n', (2800, 2806), False, 'from json import loads, dumps\n'), ((2819, 2854), 'trixs.spectra.core.XAS_Spectrum.load_from_object', 'XAS_Spectrum.load_from_object', (['line'], {}), '(line)\n', (2848, 2854), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((3464, 3480), 'numpy.add', 'np.add', (['enorm', '(1)'], {}), '(enorm, 1)\n', (3470, 3480), True, 'import numpy as np\n'), ((3577, 3599), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(500)'], {}), '(0, 1, 500)\n', (3588, 3599), True, 'import numpy as np\n'), ((3606, 3615), 'numpy.sin', 'np.sin', (['X'], {}), '(X)\n', (3612, 3615), True, 'import numpy as np\n'), ((3630, 3639), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (3636, 3639), True, 'import numpy as np\n'), ((3652, 3670), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['X', 'Y'], {}), '(X, Y)\n', (3664, 3670), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((4035, 4057), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (4046, 4057), True, 'import numpy as np\n'), ((4064, 4073), 'numpy.sin', 'np.sin', (['X'], {}), '(X)\n', (4070, 4073), True, 'import numpy as np\n'), ((4184, 4202), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['X', 'Y'], {}), '(X, Y)\n', (4196, 4202), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((4483, 4507), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['X', '(Y + 0.2)'], {}), '(X, Y + 0.2)\n', (4495, 4507), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((4887, 4902), 'copy.deepcopy', 'deepcopy', (['spec1'], {}), '(spec1)\n', (4895, 4902), False, 'from copy import deepcopy\n'), ((4918, 4943), 'numpy.subtract', 'np.subtract', (['spec1.x', '(5.0)'], {}), '(spec1.x, 5.0)\n', (4929, 4943), True, 'import numpy as np\n'), ((5069, 5099), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)', '(100)'], {}), '(0, np.pi / 2, 100)\n', (5080, 5099), True, 'import numpy as np\n'), ((5108, 5117), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (5114, 5117), True, 'import numpy as np\n'), ((5130, 5148), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['x', 'y'], {}), '(x, y)\n', (5142, 5148), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((5210, 5240), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)', '(300)'], {}), '(0, np.pi / 2, 300)\n', (5221, 5240), True, 'import numpy as np\n'), ((5361, 5396), 'numpy.linspace', 'np.linspace', (['(-np.pi / 2)', 'np.pi', '(300)'], {}), '(-np.pi / 2, np.pi, 300)\n', (5372, 5396), True, 'import numpy as np\n'), ((5465, 5482), 'numpy.sin', 'np.sin', (['new_xvals'], {}), '(new_xvals)\n', (5471, 5482), True, 'import numpy as np\n'), ((5756, 5778), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (5767, 5778), True, 'import numpy as np\n'), ((5787, 5796), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (5793, 5796), True, 'import numpy as np\n'), ((5808, 5826), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['x', 'y'], {}), '(x, y)\n', (5820, 5826), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((5979, 6019), 'numpy.isclose', 'np.isclose', (['(spec.x[-1] - spec.x[0])', '(1.05)'], {}), '(spec.x[-1] - spec.x[0], 1.05)\n', (5989, 6019), True, 'import numpy as np\n'), ((6090, 6112), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (6101, 6112), True, 'import numpy as np\n'), ((6121, 6130), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (6127, 6130), True, 'import numpy as np\n'), ((6142, 6160), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', (['x', 'y'], {}), '(x, y)\n', (6154, 6160), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((6210, 6250), 'numpy.isclose', 'np.isclose', (['(spec.x[-1] - spec.x[0])', '(0.95)'], {}), '(spec.x[-1] - spec.x[0], 0.95)\n', (6220, 6250), True, 'import numpy as np\n'), ((6342, 6364), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (6353, 6364), True, 'import numpy as np\n'), ((6373, 6400), 'numpy.linspace', 'np.linspace', (['(0.5)', '(-0.5)', '(100)'], {}), '(0.5, -0.5, 100)\n', (6384, 6400), True, 'import numpy as np\n'), ((6414, 6436), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', ([], {'x': 'X', 'y': 'Y'}), '(x=X, y=Y)\n', (6426, 6436), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((6498, 6520), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', ([], {'x': 'X', 'y': 'X'}), '(x=X, y=X)\n', (6510, 6520), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((6574, 6605), 'numpy.linspace', 'np.linspace', (['(-0.001)', '(0.001)', '(100)'], {}), '(-0.001, 0.001, 100)\n', (6585, 6605), True, 'import numpy as np\n'), ((6621, 6643), 'trixs.spectra.core.XAS_Spectrum', 'XAS_Spectrum', ([], {'x': 'X', 'y': 'Y'}), '(x=X, y=Y)\n', (6633, 6643), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((6747, 6776), 'trixs.spectra.core.XAS_Collation', 'XAS_Collation', (['fake_structure'], {}), '(fake_structure)\n', (6760, 6776), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((7195, 7399), 'trixs.spectra.core.XAS_Collation', 'XAS_Collation', (['fake_structure'], {'mp_id': '"""frank"""', 'oqmd_id': '"""zappa"""', 'mp_spectra': '[fake_spectrum]', 'feff_spectra': '[fake_spectrum]', 'icsd_ids': '[1, 2, 3]', 'mp_bader': '[1]', 'oqmd_bader': '[1]', 'coordination_numbers': '[1]'}), "(fake_structure, mp_id='frank', oqmd_id='zappa', mp_spectra=[\n fake_spectrum], feff_spectra=[fake_spectrum], icsd_ids=[1, 2, 3],\n mp_bader=[1], oqmd_bader=[1], coordination_numbers=[1])\n", (7208, 7399), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((669, 704), 'pymatgen.core.structure.Structure', 'Structure', (['lattice', 'species', 'coords'], {}), '(lattice, species, coords)\n', (678, 704), False, 'from pymatgen.core.structure import Structure\n'), ((1324, 1371), 'os.path.join', 'os.path.join', (['TEST_FILE_DIR', '"""Cu_structure.cif"""'], {}), "(TEST_FILE_DIR, 'Cu_structure.cif')\n", (1336, 1371), False, 'import os\n'), ((2042, 2060), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2048, 2060), False, 'from pytest import fixture, raises\n'), ((2097, 2115), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2103, 2115), False, 'from pytest import fixture, raises\n'), ((2153, 2171), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2159, 2171), False, 'from pytest import fixture, raises\n'), ((2212, 2230), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2218, 2230), False, 'from pytest import fixture, raises\n'), ((2269, 2287), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2275, 2287), False, 'from pytest import fixture, raises\n'), ((2324, 2342), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2330, 2342), False, 'from pytest import fixture, raises\n'), ((2381, 2399), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2387, 2399), False, 'from pytest import fixture, raises\n'), ((2456, 2483), 'pytest.raises', 'raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2462, 2483), False, 'from pytest import fixture, raises\n'), ((4311, 4336), 'numpy.sum', 'np.sum', (['fake_spectrum_2.y'], {}), '(fake_spectrum_2.y)\n', (4317, 4336), True, 'import numpy as np\n'), ((4403, 4428), 'numpy.max', 'np.max', (['fake_spectrum_2.y'], {}), '(fake_spectrum_2.y)\n', (4409, 4428), True, 'import numpy as np\n'), ((4566, 4591), 'numpy.min', 'np.min', (['fake_spectrum_2.y'], {}), '(fake_spectrum_2.y)\n', (4572, 4591), True, 'import numpy as np\n'), ((4618, 4643), 'numpy.max', 'np.max', (['fake_spectrum_2.y'], {}), '(fake_spectrum_2.y)\n', (4624, 4643), True, 'import numpy as np\n'), ((4709, 4749), 'numpy.linalg.norm', 'np.linalg.norm', (['fake_spectrum_2.y'], {'ord': '(2)'}), '(fake_spectrum_2.y, ord=2)\n', (4723, 4749), True, 'import numpy as np\n'), ((4807, 4859), 'os.path.join', 'os.path.join', (['TEST_FILE_DIR', '"""sample_spectrum_c.txt"""'], {}), "(TEST_FILE_DIR, 'sample_spectrum_c.txt')\n", (4819, 4859), False, 'import os\n'), ((6799, 6828), 'trixs.spectra.core.XAS_Collation', 'XAS_Collation', (['fake_structure'], {}), '(fake_structure)\n', (6812, 6828), False, 'from trixs.spectra.core import XAS_Spectrum, XAS_Collation\n'), ((2581, 2633), 'os.path.join', 'os.path.join', (['TEST_FILE_DIR', '"""sample_spectrum_e.txt"""'], {}), "(TEST_FILE_DIR, 'sample_spectrum_e.txt')\n", (2593, 2633), False, 'import os\n'), ((3698, 3732), 'numpy.isclose', 'np.isclose', (['dy', 'Yprime'], {'atol': '(0.001)'}), '(dy, Yprime, atol=0.001)\n', (3708, 3732), True, 'import numpy as np\n'), ((5308, 5334), 'numpy.abs', 'np.abs', (['(y - new_yvals[::3])'], {}), '(y - new_yvals[::3])\n', (5314, 5334), True, 'import numpy as np\n'), ((5658, 5709), 'numpy.isclose', 'np.isclose', (['new_yvals[200:205]', 'true_yvals[200:205]'], {}), '(new_yvals[200:205], true_yvals[200:205])\n', (5668, 5709), True, 'import numpy as np\n'), ((4097, 4124), 'numpy.add', 'np.add', (['fake_spectrum.x', '(10)'], {}), '(fake_spectrum.x, 10)\n', (4103, 4124), True, 'import numpy as np\n'), ((5557, 5579), 'numpy.abs', 'np.abs', (['new_yvals[:33]'], {}), '(new_yvals[:33])\n', (5563, 5579), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/12/2 20:33
# @author :Mo
# @function :topic model of NMF
from macropodus.preprocess.tools_ml import extract_chinese, tfidf_fit
from macropodus.data.words_common.stop_words import stop_words
from macropodus.preprocess.tools_ml import macropodus_cut
from macropodus.preprocess.tools_ml import cut_sentence
# sklearn
from sklearn.decomposition import NMF
import numpy as np
class NMFSum:
def __init__(self):
self.stop_words = stop_words.values()
self.algorithm = 'lsi'
def summarize(self, text, num=320, topic_min=5, judge_topic="all"):
"""
:param text: text or list, input docs
:param num: int, number or amount of return
:param topic_min: int, topic number
:param judge_topic: str, calculate ways of topic
:return:
"""
# 切句
if type(text) == str:
self.sentences = cut_sentence(text)
elif type(text) == list:
self.sentences = text
else:
raise RuntimeError("text type must be list or str")
# 切词
sentences_cut = [[word for word in macropodus_cut(extract_chinese(sentence))
if word.strip()] for sentence in self.sentences]
len_sentences_cut = len(sentences_cut)
# 去除停用词等
self.sentences_cut = [list(filter(lambda x: x not in self.stop_words, sc)) for sc in sentences_cut]
self.sentences_cut = [" ".join(sc) for sc in self.sentences_cut]
# 计算每个句子的tfidf
sen_tfidf = tfidf_fit(self.sentences_cut)
# 主题数, 经验判断
topic_num = min(topic_min, int(len(sentences_cut) / 2)) # 设定最小主题数为3
nmf_tfidf = NMF(n_components=topic_num, max_iter=320)
res_nmf_w = nmf_tfidf.fit_transform(sen_tfidf.T) # 基矩阵 or 权重矩阵
res_nmf_h = nmf_tfidf.components_ # 系数矩阵 or 降维矩阵
if judge_topic:
### 方案一, 获取最大那个主题的k个句子
##################################################################################
topic_t_score = np.sum(res_nmf_h, axis=-1)
# 对每列(一个句子topic_num个主题),得分进行排序,0为最大
res_nmf_h_soft = res_nmf_h.argsort(axis=0)[-topic_num:][::-1]
# 统计为最大每个主题的句子个数
exist = (res_nmf_h_soft <= 0) * 1.0
factor = np.ones(res_nmf_h_soft.shape[1])
topic_t_count = np.dot(exist, factor)
# 标准化
topic_t_count /= np.sum(topic_t_count, axis=-1)
topic_t_score /= np.sum(topic_t_score, axis=-1)
# 主题最大个数占比, 与主题总得分占比选择最大的主题
topic_t_tc = topic_t_count + topic_t_score
topic_t_tc_argmax = np.argmax(topic_t_tc)
# 最后得分选择该最大主题的
res_nmf_h_soft_argmax = res_nmf_h[topic_t_tc_argmax].tolist()
res_combine = {}
for l in range(len_sentences_cut):
res_combine[self.sentences[l]] = res_nmf_h_soft_argmax[l]
score_sen = [(rc[1], rc[0]) for rc in sorted(res_combine.items(), key=lambda d: d[1], reverse=True)]
#####################################################################################
else:
### 方案二, 获取最大主题概率的句子, 不分主题
res_combine = {}
for i in range(len_sentences_cut):
res_row_i = res_nmf_h[:, i]
res_row_i_argmax = np.argmax(res_row_i)
res_combine[self.sentences[i]] = res_row_i[res_row_i_argmax]
score_sen = [(rc[1], rc[0]) for rc in sorted(res_combine.items(), key=lambda d: d[1], reverse=True)]
num_min = min(num, int(len_sentences_cut * 0.6))
return score_sen[0:num_min]
if __name__ == '__main__':
nmf = NMFSum()
doc = "多知网5月26日消息,今日,方直科技发公告,拟用自有资金人民币1.2亿元," \
"与深圳嘉道谷投资管理有限公司、深圳嘉道功程股权投资基金(有限合伙)共同发起设立嘉道方直教育产业投资基金(暂定名)。" \
"该基金认缴出资总规模为人民币3.01亿元。" \
"基金的出资方式具体如下:出资进度方面,基金合伙人的出资应于基金成立之日起四年内分四期缴足,每期缴付7525万元;" \
"各基金合伙人每期按其出资比例缴付。合伙期限为11年,投资目标为教育领域初创期或成长期企业。" \
"截止公告披露日,深圳嘉道谷投资管理有限公司股权结构如下:截止公告披露日,深圳嘉道功程股权投资基金产权结构如下:" \
"公告还披露,方直科技将探索在中小学教育、在线教育、非学历教育、学前教育、留学咨询等教育行业其他分支领域的投资。" \
"方直科技2016年营业收入9691万元,营业利润1432万元,归属于普通股股东的净利润1847万元。(多知网 黎珊)}}"
doc = "和投票目标的等级来决定新的等级.简单的说。" \
"是上世纪90年代末提出的一种计算网页权重的算法! " \
"当时,互联网技术突飞猛进,各种网页网站爆炸式增长。" \
"业界急需一种相对比较准确的网页重要性计算方法。" \
"是人们能够从海量互联网世界中找出自己需要的信息。" \
"百度百科如是介绍他的思想:PageRank通过网络浩瀚的超链接关系来确定一个页面的等级。" \
"Google把从A页面到B页面的链接解释为A页面给B页面投票。" \
"Google根据投票来源甚至来源的来源,即链接到A页面的页面。" \
"一个高等级的页面可以使其他低等级页面的等级提升。" \
"具体说来就是,PageRank有两个基本思想,也可以说是假设。" \
"即数量假设:一个网页被越多的其他页面链接,就越重)。" \
"质量假设:一个网页越是被高质量的网页链接,就越重要。" \
"总的来说就是一句话,从全局角度考虑,获取重要的信。"
doc = '早年林志颖带kimi上《爸爸去哪儿》的时候,当时遮遮掩掩的林志颖老婆低调探班,总让人觉得格外神秘,大概是特别不喜欢' \
'在公众面前曝光自己日常的那种人。可能这么些年过去,心态不断调整过了,至少在微博上,陈若仪越来越放得开,晒自己带' \
'娃照顾双子星的点滴,也晒日常自己的护肤心得,时不时安利一些小东西。都快晚上十点半,睡美容觉的最佳时候,结果才带' \
'完一天娃的陈若仪还是不忘先保养自己,敷起了面膜。泡完澡,这次用的是一个稍微平价的面膜,脸上、甚至仔细到脖子上都' \
'抹上了。陈若仪也是多此一举,特别说自己不是裸体,是裹着浴巾的,谁在意这个呀,目光完全被你那又长又扑闪的睫毛给吸' \
'引住了。这也太吓人吧,怎么能够长那么长那么密那么翘。嫉妒地说一句,真的很像种的假睫毛呐。陈若仪的睫毛应该是天生' \
'的基础好吧,要不然也不会遗传给小孩,一家子都是睫毛精,几个儿子现在这么小都是长睫毛。只是陈若仪现在这个完美状态,' \
'一定是后天再经过悉心的呵护培养。网友已经迫不及待让她教教怎么弄睫毛了,陈若仪也是答应地好好的。各种私人物品主动' \
'揭秘,安利一些品牌给大家,虽然一再强调是自己的日常小物,还是很让人怀疑,陈若仪是不是在做微商当网红呐,网友建议' \
'她开个店,看这回复,也是很有意愿了。她应该不缺这个钱才对。隔三差五介绍下自己用的小刷子之类,陈若仪乐于向大家传' \
'授自己的保养呵护之道。她是很容易就被晒出斑的肤质,去海岛参加婚礼,都要必备这几款超爱用的防晒隔离。日常用的、太' \
'阳大时候用的,好几个种类,活得相当精致。你们按照自己的需要了解一下。画眉毛,最爱用的是intergrate的眉笔。也是个' \
'念旧的人,除了Dior,陈若仪的另一个眉粉其中一个是她高中就开始用的Kate。一般都是大学才开始化妆修饰自己,感受得到' \
'陈若仪从小就很爱美。各种小零小碎的化妆品,已经买过七八次的粉红胡椒抛光美体油,每天洗完澡陈若仪都会喷在肚子、大' \
'腿、屁股和膝盖手肘,说是能保持肌肤的平滑紧致程度。每安利一样东西,总有网友要在下面问其他问题咋个办,真是相当信' \
'任陈若仪了。每次她也很耐心的解答,"去黑头我用的是SUQQU洁面去角质按摩膏磨砂洁面洗面奶,"一定要先按摩再用。她自己' \
'已经回购过好几次,意思是你们再了解一下。了解归了解,买不买随意。毕竟像她另一个爱用的达尔肤面膜,效果好是好,价' \
'格据说比sk2都还要贵,不是大多数人日常能够消费得起的,大家就看个热闹就好了,还是多买多试多用才能找到最适合自己的' \
'护肤方法。'
sum = nmf.summarize(doc, num=320)
for i in sum:
print(i)
| [
"sklearn.decomposition.NMF",
"macropodus.preprocess.tools_ml.tfidf_fit",
"numpy.ones",
"numpy.argmax",
"macropodus.data.words_common.stop_words.stop_words.values",
"numpy.sum",
"numpy.dot",
"macropodus.preprocess.tools_ml.cut_sentence",
"macropodus.preprocess.tools_ml.extract_chinese"
] | [((502, 521), 'macropodus.data.words_common.stop_words.stop_words.values', 'stop_words.values', ([], {}), '()\n', (519, 521), False, 'from macropodus.data.words_common.stop_words import stop_words\n'), ((1565, 1594), 'macropodus.preprocess.tools_ml.tfidf_fit', 'tfidf_fit', (['self.sentences_cut'], {}), '(self.sentences_cut)\n', (1574, 1594), False, 'from macropodus.preprocess.tools_ml import extract_chinese, tfidf_fit\n'), ((1712, 1753), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'topic_num', 'max_iter': '(320)'}), '(n_components=topic_num, max_iter=320)\n', (1715, 1753), False, 'from sklearn.decomposition import NMF\n'), ((940, 958), 'macropodus.preprocess.tools_ml.cut_sentence', 'cut_sentence', (['text'], {}), '(text)\n', (952, 958), False, 'from macropodus.preprocess.tools_ml import cut_sentence\n'), ((2080, 2106), 'numpy.sum', 'np.sum', (['res_nmf_h'], {'axis': '(-1)'}), '(res_nmf_h, axis=-1)\n', (2086, 2106), True, 'import numpy as np\n'), ((2327, 2359), 'numpy.ones', 'np.ones', (['res_nmf_h_soft.shape[1]'], {}), '(res_nmf_h_soft.shape[1])\n', (2334, 2359), True, 'import numpy as np\n'), ((2388, 2409), 'numpy.dot', 'np.dot', (['exist', 'factor'], {}), '(exist, factor)\n', (2394, 2409), True, 'import numpy as np\n'), ((2457, 2487), 'numpy.sum', 'np.sum', (['topic_t_count'], {'axis': '(-1)'}), '(topic_t_count, axis=-1)\n', (2463, 2487), True, 'import numpy as np\n'), ((2517, 2547), 'numpy.sum', 'np.sum', (['topic_t_score'], {'axis': '(-1)'}), '(topic_t_score, axis=-1)\n', (2523, 2547), True, 'import numpy as np\n'), ((2675, 2696), 'numpy.argmax', 'np.argmax', (['topic_t_tc'], {}), '(topic_t_tc)\n', (2684, 2696), True, 'import numpy as np\n'), ((3367, 3387), 'numpy.argmax', 'np.argmax', (['res_row_i'], {}), '(res_row_i)\n', (3376, 3387), True, 'import numpy as np\n'), ((1175, 1200), 'macropodus.preprocess.tools_ml.extract_chinese', 'extract_chinese', (['sentence'], {}), '(sentence)\n', (1190, 1200), False, 'from macropodus.preprocess.tools_ml import extract_chinese, tfidf_fit\n')] |
# -*- coding: utf-8 -*-
"""
=========================================================
19 - "Perpendicular" Burning Ship: hidden Koch snowflakes
=========================================================
Another hidden feature in this fractal: here, a reminiscence of Koch snowflakes
close to a mini at a depth of 1.e-53.
Reference:
`fractalshades.models.Perturbation_perpendicular_burning_ship`
"""
import os
import numpy as np
import fractalshades as fs
import fractalshades.models as fsm
import fractalshades.colors as fscolors
from fractalshades.postproc import (
Postproc_batch,
Continuous_iter_pp,
DEM_normal_pp,
DEM_pp,
Raw_pp,
)
from fractalshades.colors.layers import (
Color_layer,
Bool_layer,
Normal_map_layer,
Virtual_layer,
Blinn_lighting,
)
def plot(plot_dir):
fs.settings.enable_multithreading = True
fs.settings.inspect_calc = True
# A simple showcase using perturbation technique
calc_name = 'test'
# _1 = 'Zoom parameters'
x = '-1.47214775731981401403314210855688086942157169819838263666455234'
y = '-0.000000821699995458791163928571540134217349033686644929036252293894057'
dx = '1.00371044925664e-53'
theta_deg = 90.0
xy_ratio = 1.8
dps = 60
nx = 2400
# _1b = 'Skew parameters /!\\ Re-run when modified!'
has_skew = True
skew_00 = 0.13539222675927937
skew_01 = 0.8700072011411545
skew_10 = -1.153798854345238
skew_11 = -0.028164925269682
# _2 = 'Calculation parameters'
max_iter = 20000
# _3 = 'Bilinear series parameters'
eps = 1e-06
# _4 = 'Plotting parameters: base field'
base_layer = 'distance_estimation'
interior_color = (0.6627451181411743, 0.4313725531101227, 0.0)
colormap = fscolors.Fractal_colormap(
colors=[
[0. , 0.02745098, 0.39215686],
[1. , 1. , 0.37254903]
],
kinds=['Lch'],
grad_npts=[40],
grad_funcs=['1-(1-x)**2'],
extent='repeat'
)
invert_cmap = False
DEM_min = 1e-07
cmap_z_kind = 'relative'
zmin = 0.
zmax = 1.
# _5 = 'Plotting parameters: shading'
shade_kind = 'glossy'
gloss_intensity = 100.0
light_angle_deg = 35.0
light_color = (1.0, 1.0, 1.0)
gloss_light_color = (1.0, 1.0, 1.0)
# Run the calculation
fractal = fsm.Perturbation_perpendicular_burning_ship(plot_dir)
# f.clean_up()
fractal.zoom(precision=dps, x=x, y=y, dx=dx, nx=nx, xy_ratio=xy_ratio,
theta_deg=theta_deg, projection="cartesian", antialiasing=False,
has_skew=has_skew, skew_00=skew_00, skew_01=skew_01,
skew_10=skew_10, skew_11=skew_11
)
fractal.calc_std_div(
calc_name=calc_name,
subset=None,
max_iter=max_iter,
M_divergence=1.e3,
BLA_params={"eps": eps},
)
if fractal.res_available():
print("RES AVAILABLE, no compute")
else:
print("RES NOT AVAILABLE, clean-up")
fractal.clean_up(calc_name)
fractal.run()
pp = Postproc_batch(fractal, calc_name)
if base_layer == "continuous_iter":
pp.add_postproc(base_layer, Continuous_iter_pp())
elif base_layer == "distance_estimation":
pp.add_postproc("continuous_iter", Continuous_iter_pp())
pp.add_postproc(base_layer, DEM_pp())
pp.add_postproc("interior", Raw_pp("stop_reason",
func=lambda x: x != 1))
if shade_kind != "None":
pp.add_postproc("DEM_map", DEM_normal_pp(kind="potential"))
plotter = fs.Fractal_plotter(pp)
plotter.add_layer(Bool_layer("interior", output=False))
if shade_kind != "None":
plotter.add_layer(Normal_map_layer(
"DEM_map", max_slope=60, output=False
))
if base_layer != 'continuous_iter':
plotter.add_layer(
Virtual_layer("continuous_iter", func=None, output=False)
)
sign = {False: 1., True: -1.}[invert_cmap]
if base_layer == 'distance_estimation':
cmap_func = lambda x: sign * np.where(
np.isinf(x),
np.log(DEM_min),
np.log(np.clip(x, DEM_min, None))
)
else:
cmap_func = lambda x: sign * np.log(x)
plotter.add_layer(Color_layer(
base_layer,
func=cmap_func,
colormap=colormap,
probes_z=[zmin, zmax],
probes_kind=cmap_z_kind,
output=True))
plotter[base_layer].set_mask(
plotter["interior"], mask_color=interior_color
)
if shade_kind != "None":
light = Blinn_lighting(0.6, np.array([1., 1., 1.]))
light.add_light_source(
k_diffuse=0.8,
k_specular=.0,
shininess=350.,
angles=(light_angle_deg, 20.),
coords=None,
color=np.array(light_color))
if shade_kind == "glossy":
light.add_light_source(
k_diffuse=0.2,
k_specular=gloss_intensity,
shininess=400.,
angles=(light_angle_deg, 20.),
coords=None,
color=np.array(gloss_light_color))
plotter[base_layer].shade(plotter["DEM_map"], light)
plotter.plot()
if __name__ == "__main__":
# Some magic to get the directory for plotting: with a name that matches
# the file or a temporary dir if we are building the documentation
try:
realpath = os.path.realpath(__file__)
plot_dir = os.path.splitext(realpath)[0]
plot(plot_dir)
except NameError:
import tempfile
with tempfile.TemporaryDirectory() as plot_dir:
fs.utils.exec_no_output(plot, plot_dir)
| [
"numpy.clip",
"fractalshades.models.Perturbation_perpendicular_burning_ship",
"numpy.log",
"numpy.array",
"fractalshades.Fractal_plotter",
"fractalshades.postproc.Postproc_batch",
"fractalshades.colors.layers.Color_layer",
"fractalshades.colors.layers.Virtual_layer",
"fractalshades.colors.layers.Boo... | [((1759, 1931), 'fractalshades.colors.Fractal_colormap', 'fscolors.Fractal_colormap', ([], {'colors': '[[0.0, 0.02745098, 0.39215686], [1.0, 1.0, 0.37254903]]', 'kinds': "['Lch']", 'grad_npts': '[40]', 'grad_funcs': "['1-(1-x)**2']", 'extent': '"""repeat"""'}), "(colors=[[0.0, 0.02745098, 0.39215686], [1.0, 1.0,\n 0.37254903]], kinds=['Lch'], grad_npts=[40], grad_funcs=['1-(1-x)**2'],\n extent='repeat')\n", (1784, 1931), True, 'import fractalshades.colors as fscolors\n'), ((2365, 2418), 'fractalshades.models.Perturbation_perpendicular_burning_ship', 'fsm.Perturbation_perpendicular_burning_ship', (['plot_dir'], {}), '(plot_dir)\n', (2408, 2418), True, 'import fractalshades.models as fsm\n'), ((3096, 3130), 'fractalshades.postproc.Postproc_batch', 'Postproc_batch', (['fractal', 'calc_name'], {}), '(fractal, calc_name)\n', (3110, 3130), False, 'from fractalshades.postproc import Postproc_batch, Continuous_iter_pp, DEM_normal_pp, DEM_pp, Raw_pp\n'), ((3602, 3624), 'fractalshades.Fractal_plotter', 'fs.Fractal_plotter', (['pp'], {}), '(pp)\n', (3620, 3624), True, 'import fractalshades as fs\n'), ((3424, 3468), 'fractalshades.postproc.Raw_pp', 'Raw_pp', (['"""stop_reason"""'], {'func': '(lambda x: x != 1)'}), "('stop_reason', func=lambda x: x != 1)\n", (3430, 3468), False, 'from fractalshades.postproc import Postproc_batch, Continuous_iter_pp, DEM_normal_pp, DEM_pp, Raw_pp\n'), ((3650, 3686), 'fractalshades.colors.layers.Bool_layer', 'Bool_layer', (['"""interior"""'], {'output': '(False)'}), "('interior', output=False)\n", (3660, 3686), False, 'from fractalshades.colors.layers import Color_layer, Bool_layer, Normal_map_layer, Virtual_layer, Blinn_lighting\n'), ((4297, 4420), 'fractalshades.colors.layers.Color_layer', 'Color_layer', (['base_layer'], {'func': 'cmap_func', 'colormap': 'colormap', 'probes_z': '[zmin, zmax]', 'probes_kind': 'cmap_z_kind', 'output': '(True)'}), '(base_layer, func=cmap_func, colormap=colormap, probes_z=[zmin,\n zmax], probes_kind=cmap_z_kind, output=True)\n', (4308, 4420), False, 'from fractalshades.colors.layers import Color_layer, Bool_layer, Normal_map_layer, Virtual_layer, Blinn_lighting\n'), ((5491, 5517), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5507, 5517), False, 'import os\n'), ((3212, 3232), 'fractalshades.postproc.Continuous_iter_pp', 'Continuous_iter_pp', ([], {}), '()\n', (3230, 3232), False, 'from fractalshades.postproc import Postproc_batch, Continuous_iter_pp, DEM_normal_pp, DEM_pp, Raw_pp\n'), ((3554, 3585), 'fractalshades.postproc.DEM_normal_pp', 'DEM_normal_pp', ([], {'kind': '"""potential"""'}), "(kind='potential')\n", (3567, 3585), False, 'from fractalshades.postproc import Postproc_batch, Continuous_iter_pp, DEM_normal_pp, DEM_pp, Raw_pp\n'), ((3744, 3799), 'fractalshades.colors.layers.Normal_map_layer', 'Normal_map_layer', (['"""DEM_map"""'], {'max_slope': '(60)', 'output': '(False)'}), "('DEM_map', max_slope=60, output=False)\n", (3760, 3799), False, 'from fractalshades.colors.layers import Color_layer, Bool_layer, Normal_map_layer, Virtual_layer, Blinn_lighting\n'), ((3903, 3960), 'fractalshades.colors.layers.Virtual_layer', 'Virtual_layer', (['"""continuous_iter"""'], {'func': 'None', 'output': '(False)'}), "('continuous_iter', func=None, output=False)\n", (3916, 3960), False, 'from fractalshades.colors.layers import Color_layer, Bool_layer, Normal_map_layer, Virtual_layer, Blinn_lighting\n'), ((4651, 4676), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4659, 4676), True, 'import numpy as np\n'), ((5537, 5563), 'os.path.splitext', 'os.path.splitext', (['realpath'], {}), '(realpath)\n', (5553, 5563), False, 'import os\n'), ((3323, 3343), 'fractalshades.postproc.Continuous_iter_pp', 'Continuous_iter_pp', ([], {}), '()\n', (3341, 3343), False, 'from fractalshades.postproc import Postproc_batch, Continuous_iter_pp, DEM_normal_pp, DEM_pp, Raw_pp\n'), ((3381, 3389), 'fractalshades.postproc.DEM_pp', 'DEM_pp', ([], {}), '()\n', (3387, 3389), False, 'from fractalshades.postproc import Postproc_batch, Continuous_iter_pp, DEM_normal_pp, DEM_pp, Raw_pp\n'), ((4264, 4273), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (4270, 4273), True, 'import numpy as np\n'), ((4875, 4896), 'numpy.array', 'np.array', (['light_color'], {}), '(light_color)\n', (4883, 4896), True, 'import numpy as np\n'), ((5649, 5678), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5676, 5678), False, 'import tempfile\n'), ((5704, 5743), 'fractalshades.utils.exec_no_output', 'fs.utils.exec_no_output', (['plot', 'plot_dir'], {}), '(plot, plot_dir)\n', (5727, 5743), True, 'import fractalshades as fs\n'), ((4121, 4132), 'numpy.isinf', 'np.isinf', (['x'], {}), '(x)\n', (4129, 4132), True, 'import numpy as np\n'), ((4145, 4160), 'numpy.log', 'np.log', (['DEM_min'], {}), '(DEM_min)\n', (4151, 4160), True, 'import numpy as np\n'), ((5175, 5202), 'numpy.array', 'np.array', (['gloss_light_color'], {}), '(gloss_light_color)\n', (5183, 5202), True, 'import numpy as np\n'), ((4180, 4205), 'numpy.clip', 'np.clip', (['x', 'DEM_min', 'None'], {}), '(x, DEM_min, None)\n', (4187, 4205), True, 'import numpy as np\n')] |
import copy
import os
import sys
import time
from collections import OrderedDict, defaultdict
import yaml
import torch
import numpy as np
from hyperopt import hp
import ray
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray.tune import register_trainable, run_experiments, run
from ray import tune
# sys.path.insert(1, os.path.dirname(os.path.dirname(__file__)))
# sys.path.insert(1, os.path.dirname(__file__))
from augmentations_tuner.fastautoaugment.FastAutoAugment.archive import remove_deplicates, policy_decoder
from dataloaders import detection_augment_list
from augmentations_tuner.fastautoaugment.FastAutoAugment.common import get_logger, add_filehandler
from augmentations_tuner.fastautoaugment.FastAutoAugment.data import get_data
from networks import get_model, num_class
from augmentations_tuner.fastautoaugment.FastAutoAugment.train import train_and_eval
from theconf import Config as C, ConfigArgumentParser
import json
from pystopwatch2 import PyStopwatch
from easydict import EasyDict as edict
w = PyStopwatch()
from objectdetection.csv_eval import evaluate
top1_valid_by_cv = defaultdict(lambda: list)
logger = get_logger('Fast AutoAugment')
def _get_path(dataset, model, tag):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'FastAutoAugment',
'models/%s_%s_%s.pt' % (dataset, model, tag)) # TODO
# @ray.remote(num_gpus=4, max_calls=1) #TODO: change to num_gpus=1 ???
# @ray.remote
def train_model(config, dataroot, augment, cv_ratio_test, cv_fold, save_path=None, skip_exist=False):
C.get()
C.get().conf = config
C.get().aug = augment
result = train_and_eval(config, None, dataroot, cv_ratio_test, cv_fold, save_path=save_path, only_eval=skip_exist)
return C.get()['model'], cv_fold, result
# def eval_tta(config, augment, reporter):
def eval_tta(config, augment):
augment['num_policy'] = 1 # TODO remove
C.get()
C.get().conf = config
cv_ratio_test, cv_fold, save_path = augment['cv_ratio_test'], augment['cv_fold'], augment['save_path']
print(augment)
# setup - provided augmentation rules
C.get().aug = policy_decoder(augment, augment['num_policy'], augment['num_op'])
# eval
ckpt = torch.load(save_path)
model = get_model(ckpt['model_specs']['name'], len(ckpt['labels']), ckpt['model_specs']['training_configs'], local_rank=ckpt['devices']['gpu_index']) #TODO: get model configuration from Retinanet
if 'model' in ckpt:
model.load_state_dict(ckpt['model'])
else:
model.load_state_dict(ckpt)
model.eval()
dataroot = os.path.join(augment['working_dir'], ckpt['model_specs']['data']['home_path'])
mAPs = []
start_t = time.time()
for _ in range(augment['num_policy']): # TODO
train_dataset, test_dataset = get_data(ckpt['model_specs']['data']['annotation_type'], dataroot, augment,
split=cv_ratio_test, split_idx=cv_fold)
mAP = evaluate(train_dataset, model) #TODO: adjust from train to testing on randomely selected perecentage every time
mAPs.append(mAP)
del train_dataset, test_dataset
gpu_secs = (time.time() - start_t) * torch.cuda.device_count()
tune.report(top1_valid=np.mean(mAPs), elapsed_time=gpu_secs)
return np.mean(mAPs)
def augsearch(args=None, paths_ls=None):
if args is None:
d = yaml.load(open('/home/noam/ZazuML/augmentations_tuner/fastautoaugment/confs/resnet50.yaml'), Loader=yaml.FullLoader)
# from argparse import Namespace
# args = Namespace(**d)
args = edict(d)
args.redis = 'gpu-cloud-vnode30.dakao.io:23655'
args.per_class = True
args.resume = True
args.smoke_test = True
if args.decay > 0:
logger.info('decay=%.4f' % args.decay)
args['optimizer']['decay'] = args.decay
add_filehandler(logger, os.path.join('augmentations_tuner/fastautoaugment/FastAutoAugment/models', '%s_%s_cv%.1f.log' % (
args['dataset'], args['model'], args.cv_ratio)))
logger.info('initialize ray...')
ray.init(num_cpus=1, num_gpus=1)
num_result_per_cv = 10 if not args.smoke_test else 2
cv_num = 5 if paths_ls is None else len(paths_ls)
args.version = 1
args._timestamp = '2020/08/30 20:40:10'
args.config = '/home/noam/ZazuML/augmentations_tuner/fastautoaugment/confs/resnet50.yaml'
copied_args = copy.deepcopy(args)
copied_args = copied_args
logger.info('search augmentation policies, dataset=%s model=%s' % (args['dataset'], args['model']))
logger.info('----- Train without Augmentations ratio(test)=%.1f -----' % (args.cv_ratio))
w.start(tag='train_no_aug')
if paths_ls is None:
paths_ls = [_get_path(args['dataset'], args['model'], 'ratio%.1f_fold%d' % (args.cv_ratio, i)) for i
in
range(cv_num)]
print(paths_ls)
logger.info('getting results...')
pretrain_results = [
train_model(copy.deepcopy(copied_args), args.dataroot, args['aug'], args.cv_ratio, i, save_path=paths_ls[i],
skip_exist=args.smoke_test)
for i in range(cv_num)]
logger.info('processed in %.4f secs' % w.pause('train_no_aug'))
if args.until == 1:
sys.exit(0)
logger.info('----- Search Test-Time Augmentation Policies -----')
w.start(tag='search')
ops = detection_augment_list()
space = {}
for i in range(args.num_policy):
for j in range(args.num_op):
space['policy_%d_%d' % (i, j)] = hp.choice('policy_%d_%d' % (i, j), list(range(0, len(ops))))
space['prob_%d_%d' % (i, j)] = hp.uniform('prob_%d_ %d' % (i, j), 0.0, 1.0)
space['level_%d_%d' % (i, j)] = hp.uniform('level_%d_ %d' % (i, j), 0.0, 1.0)
def eval_t(augs):
print(augs)
return eval_tta(copy.deepcopy(copied_args), augs)
final_policy_set = []
total_computation = 0
reward_attr = 'top1_valid' # top1_valid or minus_loss
for _ in range(1): # run multiple times.
for cv_fold in range(cv_num):
name = "search_%s_%s_fold%d_ratio%.1f" % (
args['dataset'], args['model'], cv_fold, args.cv_ratio)
print(name)
algo = HyperOptSearch(space, max_concurrent=1, metric=reward_attr)
aug_config = {
'working_dir': os.getcwd(), 'save_path': paths_ls[cv_fold],
'cv_ratio_test': args.cv_ratio, 'cv_fold': cv_fold,
'num_op': args.num_op, 'num_policy': args.num_policy
}
num_samples = 4 if args.smoke_test else args.num_search
print(aug_config)
# eval_t(aug_config)
results = run(eval_t, search_alg=algo, config=aug_config, num_samples=num_samples,
resources_per_trial={'gpu': 1}, stop={'training_iteration': args.num_policy})
dataframe = results.dataframe().sort_values(reward_attr, ascending=False)
total_computation = dataframe['elapsed_time'].sum()
for i in range(num_result_per_cv):
config_dict = dataframe.loc[i].filter(like='config').to_dict()
new_keys = [x.replace('config/', '') for x in config_dict.keys()]
new_config_dict = {}
for key in new_keys:
new_config_dict[key] = config_dict['config/' + key]
final_policy = policy_decoder(new_config_dict, args.num_policy, args.num_op)
logger.info('top1_valid=%.4f %s' % (dataframe.loc[i]['top1_valid'].item(), final_policy))
final_policy = remove_deplicates(final_policy)
final_policy_set.extend(final_policy)
logger.info(json.dumps(final_policy_set))
logger.info('final_policy=%d' % len(final_policy_set))
logger.info('processed in %.4f secs, gpu hours=%.4f' % (w.pause('search'), total_computation / 3600.))
logger.info('----- Train with Augmentations model=%s dataset=%s aug=%s ratio(test)=%.1f -----' % (
args['model'], args['dataset'], args.aug, args.cv_ratio))
w.start(tag='train_aug')
return final_policy_set
if __name__ == '__main__':
augsearch = AugSearch()
| [
"torch.cuda.device_count",
"augmentations_tuner.fastautoaugment.FastAutoAugment.archive.remove_deplicates",
"sys.exit",
"copy.deepcopy",
"ray.init",
"augmentations_tuner.fastautoaugment.FastAutoAugment.data.get_data",
"numpy.mean",
"dataloaders.detection_augment_list",
"json.dumps",
"augmentations... | [((1026, 1039), 'pystopwatch2.PyStopwatch', 'PyStopwatch', ([], {}), '()\n', (1037, 1039), False, 'from pystopwatch2 import PyStopwatch\n'), ((1105, 1131), 'collections.defaultdict', 'defaultdict', (['(lambda : list)'], {}), '(lambda : list)\n', (1116, 1131), False, 'from collections import OrderedDict, defaultdict\n'), ((1143, 1173), 'augmentations_tuner.fastautoaugment.FastAutoAugment.common.get_logger', 'get_logger', (['"""Fast AutoAugment"""'], {}), "('Fast AutoAugment')\n", (1153, 1173), False, 'from augmentations_tuner.fastautoaugment.FastAutoAugment.common import get_logger, add_filehandler\n'), ((1571, 1578), 'theconf.Config.get', 'C.get', ([], {}), '()\n', (1576, 1578), True, 'from theconf import Config as C, ConfigArgumentParser\n'), ((1645, 1755), 'augmentations_tuner.fastautoaugment.FastAutoAugment.train.train_and_eval', 'train_and_eval', (['config', 'None', 'dataroot', 'cv_ratio_test', 'cv_fold'], {'save_path': 'save_path', 'only_eval': 'skip_exist'}), '(config, None, dataroot, cv_ratio_test, cv_fold, save_path=\n save_path, only_eval=skip_exist)\n', (1659, 1755), False, 'from augmentations_tuner.fastautoaugment.FastAutoAugment.train import train_and_eval\n'), ((1921, 1928), 'theconf.Config.get', 'C.get', ([], {}), '()\n', (1926, 1928), True, 'from theconf import Config as C, ConfigArgumentParser\n'), ((2141, 2206), 'augmentations_tuner.fastautoaugment.FastAutoAugment.archive.policy_decoder', 'policy_decoder', (['augment', "augment['num_policy']", "augment['num_op']"], {}), "(augment, augment['num_policy'], augment['num_op'])\n", (2155, 2206), False, 'from augmentations_tuner.fastautoaugment.FastAutoAugment.archive import remove_deplicates, policy_decoder\n'), ((2230, 2251), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (2240, 2251), False, 'import torch\n'), ((2600, 2678), 'os.path.join', 'os.path.join', (["augment['working_dir']", "ckpt['model_specs']['data']['home_path']"], {}), "(augment['working_dir'], ckpt['model_specs']['data']['home_path'])\n", (2612, 2678), False, 'import os\n'), ((2707, 2718), 'time.time', 'time.time', ([], {}), '()\n', (2716, 2718), False, 'import time\n'), ((3309, 3322), 'numpy.mean', 'np.mean', (['mAPs'], {}), '(mAPs)\n', (3316, 3322), True, 'import numpy as np\n'), ((4085, 4117), 'ray.init', 'ray.init', ([], {'num_cpus': '(1)', 'num_gpus': '(1)'}), '(num_cpus=1, num_gpus=1)\n', (4093, 4117), False, 'import ray\n'), ((4408, 4427), 'copy.deepcopy', 'copy.deepcopy', (['args'], {}), '(args)\n', (4421, 4427), False, 'import copy\n'), ((5407, 5431), 'dataloaders.detection_augment_list', 'detection_augment_list', ([], {}), '()\n', (5429, 5431), False, 'from dataloaders import detection_augment_list\n'), ((1583, 1590), 'theconf.Config.get', 'C.get', ([], {}), '()\n', (1588, 1590), True, 'from theconf import Config as C, ConfigArgumentParser\n'), ((1609, 1616), 'theconf.Config.get', 'C.get', ([], {}), '()\n', (1614, 1616), True, 'from theconf import Config as C, ConfigArgumentParser\n'), ((1933, 1940), 'theconf.Config.get', 'C.get', ([], {}), '()\n', (1938, 1940), True, 'from theconf import Config as C, ConfigArgumentParser\n'), ((2127, 2134), 'theconf.Config.get', 'C.get', ([], {}), '()\n', (2132, 2134), True, 'from theconf import Config as C, ConfigArgumentParser\n'), ((2808, 2927), 'augmentations_tuner.fastautoaugment.FastAutoAugment.data.get_data', 'get_data', (["ckpt['model_specs']['data']['annotation_type']", 'dataroot', 'augment'], {'split': 'cv_ratio_test', 'split_idx': 'cv_fold'}), "(ckpt['model_specs']['data']['annotation_type'], dataroot, augment,\n split=cv_ratio_test, split_idx=cv_fold)\n", (2816, 2927), False, 'from augmentations_tuner.fastautoaugment.FastAutoAugment.data import get_data\n'), ((2988, 3018), 'objectdetection.csv_eval.evaluate', 'evaluate', (['train_dataset', 'model'], {}), '(train_dataset, model)\n', (2996, 3018), False, 'from objectdetection.csv_eval import evaluate\n'), ((3207, 3232), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3230, 3232), False, 'import torch\n'), ((3603, 3611), 'easydict.EasyDict', 'edict', (['d'], {}), '(d)\n', (3608, 3611), True, 'from easydict import EasyDict as edict\n'), ((3888, 4037), 'os.path.join', 'os.path.join', (['"""augmentations_tuner/fastautoaugment/FastAutoAugment/models"""', "('%s_%s_cv%.1f.log' % (args['dataset'], args['model'], args.cv_ratio))"], {}), "('augmentations_tuner/fastautoaugment/FastAutoAugment/models', \n '%s_%s_cv%.1f.log' % (args['dataset'], args['model'], args.cv_ratio))\n", (3900, 4037), False, 'import os\n'), ((5287, 5298), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5295, 5298), False, 'import sys\n'), ((7754, 7782), 'json.dumps', 'json.dumps', (['final_policy_set'], {}), '(final_policy_set)\n', (7764, 7782), False, 'import json\n'), ((1252, 1278), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1268, 1278), False, 'import os\n'), ((1762, 1769), 'theconf.Config.get', 'C.get', ([], {}), '()\n', (1767, 1769), True, 'from theconf import Config as C, ConfigArgumentParser\n'), ((3182, 3193), 'time.time', 'time.time', ([], {}), '()\n', (3191, 3193), False, 'import time\n'), ((3260, 3273), 'numpy.mean', 'np.mean', (['mAPs'], {}), '(mAPs)\n', (3267, 3273), True, 'import numpy as np\n'), ((5670, 5714), 'hyperopt.hp.uniform', 'hp.uniform', (["('prob_%d_ %d' % (i, j))", '(0.0)', '(1.0)'], {}), "('prob_%d_ %d' % (i, j), 0.0, 1.0)\n", (5680, 5714), False, 'from hyperopt import hp\n'), ((5759, 5804), 'hyperopt.hp.uniform', 'hp.uniform', (["('level_%d_ %d' % (i, j))", '(0.0)', '(1.0)'], {}), "('level_%d_ %d' % (i, j), 0.0, 1.0)\n", (5769, 5804), False, 'from hyperopt import hp\n'), ((5872, 5898), 'copy.deepcopy', 'copy.deepcopy', (['copied_args'], {}), '(copied_args)\n', (5885, 5898), False, 'import copy\n'), ((6272, 6331), 'ray.tune.suggest.hyperopt.HyperOptSearch', 'HyperOptSearch', (['space'], {'max_concurrent': '(1)', 'metric': 'reward_attr'}), '(space, max_concurrent=1, metric=reward_attr)\n', (6286, 6331), False, 'from ray.tune.suggest.hyperopt import HyperOptSearch\n'), ((6739, 6898), 'ray.tune.run', 'run', (['eval_t'], {'search_alg': 'algo', 'config': 'aug_config', 'num_samples': 'num_samples', 'resources_per_trial': "{'gpu': 1}", 'stop': "{'training_iteration': args.num_policy}"}), "(eval_t, search_alg=algo, config=aug_config, num_samples=num_samples,\n resources_per_trial={'gpu': 1}, stop={'training_iteration': args.\n num_policy})\n", (6742, 6898), False, 'from ray.tune import register_trainable, run_experiments, run\n'), ((5000, 5026), 'copy.deepcopy', 'copy.deepcopy', (['copied_args'], {}), '(copied_args)\n', (5013, 5026), False, 'import copy\n'), ((6390, 6401), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6399, 6401), False, 'import os\n'), ((7451, 7512), 'augmentations_tuner.fastautoaugment.FastAutoAugment.archive.policy_decoder', 'policy_decoder', (['new_config_dict', 'args.num_policy', 'args.num_op'], {}), '(new_config_dict, args.num_policy, args.num_op)\n', (7465, 7512), False, 'from augmentations_tuner.fastautoaugment.FastAutoAugment.archive import remove_deplicates, policy_decoder\n'), ((7651, 7682), 'augmentations_tuner.fastautoaugment.FastAutoAugment.archive.remove_deplicates', 'remove_deplicates', (['final_policy'], {}), '(final_policy)\n', (7668, 7682), False, 'from augmentations_tuner.fastautoaugment.FastAutoAugment.archive import remove_deplicates, policy_decoder\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 12 19:49:18 2020
@author: hongxing
"""
import torch
import numpy as np
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
torch.manual_seed(1) # reproducible
def Graying(img):
i = 0
grayed = 0
for channel in img:
grayed = grayed + abs(channel)
i += 1
grayed = grayed/i
return grayed[np.newaxis,:]
'''MyDataset'''
class MyDataset(Dataset):
def __init__(self, data):
self.data = np.load(data)
def __getitem__(self, index):
hdct = self.data[index, :, :, :]
hdct = torch.from_numpy(hdct)
return hdct
def __len__(self):
return self.data.shape[0]
class GrayDataset(Dataset):
def __init__(self, data):
self.data = np.load(data)
def __getitem__(self, index):
hdct = self.data[index, :, :, :]
hdct = Graying(hdct)
hdct = torch.from_numpy(hdct)
return hdct
def __len__(self):
return self.data.shape[0]
'''
dataset=MyDataset('./dataset/MNIST/fgsm/fgsm1.npy')
mstfgsmloader= DataLoader(dataset, batch_size=64, shuffle=True, pin_memory=True)
for inputs in mstfgsmloader:
print(inputs.shape
break
''' | [
"torch.manual_seed",
"numpy.load",
"torch.from_numpy"
] | [((227, 247), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (244, 247), False, 'import torch\n'), ((534, 547), 'numpy.load', 'np.load', (['data'], {}), '(data)\n', (541, 547), True, 'import numpy as np\n'), ((638, 660), 'torch.from_numpy', 'torch.from_numpy', (['hdct'], {}), '(hdct)\n', (654, 660), False, 'import torch\n'), ((818, 831), 'numpy.load', 'np.load', (['data'], {}), '(data)\n', (825, 831), True, 'import numpy as np\n'), ((951, 973), 'torch.from_numpy', 'torch.from_numpy', (['hdct'], {}), '(hdct)\n', (967, 973), False, 'import torch\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.optim
import torch.nn.functional as F
import os
import models
from datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot, miniImageNet_few_shot, tiered_ImageNet_few_shot
from tqdm import tqdm
import pandas as pd
import argparse
import random
import copy
import warnings
from utils import to_one_hot, AverageMeter, loss_calc
import utils
from methods.baselinetrain_dgl import BaselineTrain
def evaluate(dataloader, params):
print("Loading Model: ", params.embedding_load_path)
if params.embedding_load_path_version == 0:
state = torch.load(params.embedding_load_path)['state']
state_keys = list(state.keys())
#print(state_keys)
for _, key in enumerate(state_keys):
if "feature." in key:
# an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
newkey = key.replace("feature.", "")
state[newkey] = state.pop(key)
else:
state.pop(key)
sd = state
elif params.embedding_load_path_version == 1:
sd = torch.load(params.embedding_load_path)
if 'epoch' in sd:
print("Model checkpointed at epoch: ", sd['epoch'])
sd = sd['model']
# elif params.embedding_load_path_version == 3:
# state = torch.load(params.embedding_load_path)
# print("Model checkpointed at epoch: ", state['epoch'])
# state = state['model']
# state_keys = list(state.keys())
# for _, key in enumerate(state_keys):
# if "module." in key:
# # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
# newkey = key.replace("module.", "")
# state[newkey] = state.pop(key)
# else:
# state.pop(key)
# sd = state
else:
raise ValueError("Invalid load path version!")
if params.model == 'resnet10':
pretrained_model = models.ResNet10()
feature_dim = pretrained_model.final_feat_dim
elif params.model == 'resnet10_dgl':
pretrained_model = models.ResNet10_dgl()
feature_dim = pretrained_model.final_feat_dim
elif params.model == 'resnet12':
pretrained_model = models.Resnet12(width=1, dropout=0.1)
feature_dim = pretrained_model.output_size
elif params.model == 'resnet18':
pretrained_model = models.resnet18(remove_last_relu=False,
input_high_res=True)
feature_dim = 512
elif params.model == 'vgg11':
pretrained_model = models.VGGn('vgg11', feat_mult=1, num_classes=64, dropout=0.0,nonlin='relu',
loss_sup='pred', dim_in_decoder=200704, num_layers=1,
num_hidden=1024, aux_type='nokland', n_mlp=1, n_conv=0,
pooling='adaptiveavg', bn=True, aux_bn=False)
pretrained_model.final_feat_dim = 512
feature_dim = 512
else:
raise ValueError("Invalid model!")
pretrained_model.load_state_dict(sd)
model = BaselineTrain(pretrained_model, 64)
model.load_state_dict(torch.load(params.embedding_load_path)['state'])
pretrained_model = model
N = pretrained_model.n_cnn
acc_all = []
pretrained_model.cuda()
print(pretrained_model)
total = 0
correct = 0.0
meters = [utils.AverageMeterSet() for n in range(N)]
pretrained_model.eval()
for i, (x, y) in tqdm(enumerate(dataloader)):
x = x.cuda()
y = y.cuda()
for n in range(N):
scores,_,x = pretrained_model(x, n)
perf = utils.accuracy(scores.data,
y.data, topk=(1, 5))
meters[n].update('top1', perf['average'][0].item(), len(x))
meters[n].update('top5', perf['average'][1].item(), len(x))
#_, pred = torch.max(scores.data, 1)
#total += y.size(0)
#correct += (pred==y).sum().item()
###############################################################################################
# print('Test Acc = %d %%' %
# (100 * correct/total))
#print(correct, total)
for n in range(N):
print("Top1 Avg of {:d}'th module: {:f}".format(n, meters[n].__getitem__('top1')))
def main(params):
#if params.target_dataset == 'ISIC':
# datamgr = ISIC_few_shot
#elif params.target_dataset == 'EuroSAT':
# datamgr = EuroSAT_few_shot
#elif params.target_dataset == 'CropDisease':
# datamgr = CropDisease_few_shot
#elif params.target_dataset == 'ChestX':
# datamgr = Chest_few_shot
#elif params.target_dataset == 'miniImageNet_train':
# datamgr = miniImageNet_few_shot
#elif params.target_dataset == 'miniImageNet_val':
# datamgr = miniImageNet_few_shot
#else:
# print(params.target_dataset)
# raise ValueError("Invalid Dataset!")
for i in ['miniImageNet_val', 'miniImageNet_train' ]:
params.target_dataset = i
datamgr = miniImageNet_few_shot
results = {}
shot_done = []
print(params.target_dataset)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(params.seed)
torch.random.manual_seed(params.seed)
torch.cuda.manual_seed(params.seed)
random.seed(params.seed)
dataloader = datamgr.SimpleDataManager(params.image_size, params.batch_size).get_data_loader(aug=False, train_or_val=params.target_dataset=='miniImageNet_train')
evaluate(dataloader, params)
if __name__=='__main__':
parser = argparse.ArgumentParser(
description='Evaluation script')
parser.add_argument('--target_dataset', default='miniImagenet',
help='test target dataset')
parser.add_argument('--batch_size', type=int, default=128,
help='Size of batch')
parser.add_argument('--image_size', type=int, default=224,
help='Resolution of the input image')
parser.add_argument('--train_aug', action='store_true',
help='perform data augmentation or not during training ')
parser.add_argument('--model', default='resnet10',
help='backbone architecture')
parser.add_argument('--seed', default=1, type=int, help='random seed')
parser.add_argument('--embedding_load_path', type=str,
help='path to load embedding')
parser.add_argument('--embedding_load_path_version', type=int, default=1,
help='how to load the embedding')
params = parser.parse_args()
main(params)
| [
"torch.random.manual_seed",
"methods.baselinetrain_dgl.BaselineTrain",
"argparse.ArgumentParser",
"utils.accuracy",
"models.Resnet12",
"torch.load",
"models.resnet18",
"random.seed",
"utils.AverageMeterSet",
"numpy.random.seed",
"models.ResNet10",
"models.ResNet10_dgl",
"torch.cuda.manual_se... | [((3279, 3314), 'methods.baselinetrain_dgl.BaselineTrain', 'BaselineTrain', (['pretrained_model', '(64)'], {}), '(pretrained_model, 64)\n', (3292, 3314), False, 'from methods.baselinetrain_dgl import BaselineTrain\n'), ((5867, 5923), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluation script"""'}), "(description='Evaluation script')\n", (5890, 5923), False, 'import argparse\n'), ((2196, 2213), 'models.ResNet10', 'models.ResNet10', ([], {}), '()\n', (2211, 2213), False, 'import models\n'), ((3571, 3594), 'utils.AverageMeterSet', 'utils.AverageMeterSet', ([], {}), '()\n', (3592, 3594), False, 'import utils\n'), ((5469, 5496), 'numpy.random.seed', 'np.random.seed', (['params.seed'], {}), '(params.seed)\n', (5483, 5496), True, 'import numpy as np\n'), ((5505, 5542), 'torch.random.manual_seed', 'torch.random.manual_seed', (['params.seed'], {}), '(params.seed)\n', (5529, 5542), False, 'import torch\n'), ((5551, 5586), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['params.seed'], {}), '(params.seed)\n', (5573, 5586), False, 'import torch\n'), ((5595, 5619), 'random.seed', 'random.seed', (['params.seed'], {}), '(params.seed)\n', (5606, 5619), False, 'import random\n'), ((651, 689), 'torch.load', 'torch.load', (['params.embedding_load_path'], {}), '(params.embedding_load_path)\n', (661, 689), False, 'import torch\n'), ((1246, 1284), 'torch.load', 'torch.load', (['params.embedding_load_path'], {}), '(params.embedding_load_path)\n', (1256, 1284), False, 'import torch\n'), ((2336, 2357), 'models.ResNet10_dgl', 'models.ResNet10_dgl', ([], {}), '()\n', (2355, 2357), False, 'import models\n'), ((3341, 3379), 'torch.load', 'torch.load', (['params.embedding_load_path'], {}), '(params.embedding_load_path)\n', (3351, 3379), False, 'import torch\n'), ((3841, 3889), 'utils.accuracy', 'utils.accuracy', (['scores.data', 'y.data'], {'topk': '(1, 5)'}), '(scores.data, y.data, topk=(1, 5))\n', (3855, 3889), False, 'import utils\n'), ((2476, 2513), 'models.Resnet12', 'models.Resnet12', ([], {'width': '(1)', 'dropout': '(0.1)'}), '(width=1, dropout=0.1)\n', (2491, 2513), False, 'import models\n'), ((2629, 2689), 'models.resnet18', 'models.resnet18', ([], {'remove_last_relu': '(False)', 'input_high_res': '(True)'}), '(remove_last_relu=False, input_high_res=True)\n', (2644, 2689), False, 'import models\n'), ((2819, 3066), 'models.VGGn', 'models.VGGn', (['"""vgg11"""'], {'feat_mult': '(1)', 'num_classes': '(64)', 'dropout': '(0.0)', 'nonlin': '"""relu"""', 'loss_sup': '"""pred"""', 'dim_in_decoder': '(200704)', 'num_layers': '(1)', 'num_hidden': '(1024)', 'aux_type': '"""nokland"""', 'n_mlp': '(1)', 'n_conv': '(0)', 'pooling': '"""adaptiveavg"""', 'bn': '(True)', 'aux_bn': '(False)'}), "('vgg11', feat_mult=1, num_classes=64, dropout=0.0, nonlin=\n 'relu', loss_sup='pred', dim_in_decoder=200704, num_layers=1,\n num_hidden=1024, aux_type='nokland', n_mlp=1, n_conv=0, pooling=\n 'adaptiveavg', bn=True, aux_bn=False)\n", (2830, 3066), False, 'import models\n')] |
from sklearn.neighbors import KNeighborsClassifier
from rios import rat
import numpy
import osgeo.gdal as gdal
clumpsImg='./N00E103_10_grid_knn_skl.kea'
# Open dataset
ratDataset = gdal.Open(clumpsImg, gdal.GA_Update)
# Import Columns
print("Importing Columns")
HH = rat.readColumn(ratDataset, "HH")
HV = rat.readColumn(ratDataset, "HV")
NDVI = rat.readColumn(ratDataset, "NDVI")
NDWI = rat.readColumn(ratDataset, "NDWI")
Training = rat.readColumn(ratDataset, "Training")
LCClass = rat.readColumn(ratDataset, "Class")
ApplyTo = rat.readColumn(ratDataset, "ApplyTo")
# Form array of all columns
XTrain = numpy.array([HH,HV,NDVI,NDWI])
XTrain = XTrain.transpose()
# Keep only finite values
XTrain = XTrain[numpy.isfinite(XTrain).all(axis=1)]
CTrain = LCClass[numpy.isfinite(XTrain).all(axis=1)]
# Keep only Coastal strip values
XTrain = XTrain[Training == 1]
CTrain = CTrain[Training == 1]
print(XTrain.shape)
print(CTrain.shape)
nsamples = XTrain.shape[0]
print('Calculate Covariance Matrix')
# BUG MAYBE??? REALLY SLOW AND TAKES LOTS OF MEMORY!!
#xTrainCov = numpy.cov(XTrain)
mean = numpy.mean(XTrain,axis=0)
# make a mean matrix the same shape as data for subtraction
mean_mat = numpy.outer(numpy.ones((nsamples,1)),mean)
xTrainCov = XTrain - mean_mat
xTrainCov = numpy.dot(xTrainCov.T,xTrainCov)/(nsamples -1)
print(xTrainCov)
print('Create Classifier')
neigh = KNeighborsClassifier(n_neighbors=12, metric='mahalanobis', V=xTrainCov)
print('Training Classifier')
neigh.fit(XTrain, CTrain)
print('Calc Accuracy',)
accVal = neigh.score(XTrain, CTrain)
print(' = ', accVal)
# Create array of IDs (to keep track of the original row number)
ID = numpy.arange(Training.shape[0])
# Form array of all columns
XClass = numpy.array([HH,HV,NDVI,NDWI])
XClass = XClass.transpose()
XClass = XClass[numpy.isfinite(XClass).all(axis=1)]
ID = ID[numpy.isfinite(XClass).all(axis=1)]
XClass = XClass[ApplyTo == 1]
ID = ID[ApplyTo == 1]
print('Predicting Classifier')
predClass = neigh.predict(XClass)
#predClassProb = neigh.predict_proba(XClass)
#print(predClass)
print('Create Output Array')
outLabels = numpy.zeros_like(ApplyTo,dtype=numpy.int16)
outLabels[...] = -1
outLabels[ID] = predClass
print("Writing Columns")
rat.writeColumn(ratDataset, "ClassOut", outLabels)
ratDataset = None
| [
"osgeo.gdal.Open",
"numpy.mean",
"rios.rat.writeColumn",
"numpy.ones",
"rios.rat.readColumn",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.array",
"numpy.dot",
"numpy.isfinite",
"numpy.zeros_like",
"numpy.arange"
] | [((184, 220), 'osgeo.gdal.Open', 'gdal.Open', (['clumpsImg', 'gdal.GA_Update'], {}), '(clumpsImg, gdal.GA_Update)\n', (193, 220), True, 'import osgeo.gdal as gdal\n'), ((271, 303), 'rios.rat.readColumn', 'rat.readColumn', (['ratDataset', '"""HH"""'], {}), "(ratDataset, 'HH')\n", (285, 303), False, 'from rios import rat\n'), ((309, 341), 'rios.rat.readColumn', 'rat.readColumn', (['ratDataset', '"""HV"""'], {}), "(ratDataset, 'HV')\n", (323, 341), False, 'from rios import rat\n'), ((349, 383), 'rios.rat.readColumn', 'rat.readColumn', (['ratDataset', '"""NDVI"""'], {}), "(ratDataset, 'NDVI')\n", (363, 383), False, 'from rios import rat\n'), ((391, 425), 'rios.rat.readColumn', 'rat.readColumn', (['ratDataset', '"""NDWI"""'], {}), "(ratDataset, 'NDWI')\n", (405, 425), False, 'from rios import rat\n'), ((437, 475), 'rios.rat.readColumn', 'rat.readColumn', (['ratDataset', '"""Training"""'], {}), "(ratDataset, 'Training')\n", (451, 475), False, 'from rios import rat\n'), ((486, 521), 'rios.rat.readColumn', 'rat.readColumn', (['ratDataset', '"""Class"""'], {}), "(ratDataset, 'Class')\n", (500, 521), False, 'from rios import rat\n'), ((532, 569), 'rios.rat.readColumn', 'rat.readColumn', (['ratDataset', '"""ApplyTo"""'], {}), "(ratDataset, 'ApplyTo')\n", (546, 569), False, 'from rios import rat\n'), ((608, 641), 'numpy.array', 'numpy.array', (['[HH, HV, NDVI, NDWI]'], {}), '([HH, HV, NDVI, NDWI])\n', (619, 641), False, 'import numpy\n'), ((1095, 1121), 'numpy.mean', 'numpy.mean', (['XTrain'], {'axis': '(0)'}), '(XTrain, axis=0)\n', (1105, 1121), False, 'import numpy\n'), ((1377, 1448), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(12)', 'metric': '"""mahalanobis"""', 'V': 'xTrainCov'}), "(n_neighbors=12, metric='mahalanobis', V=xTrainCov)\n", (1397, 1448), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1659, 1690), 'numpy.arange', 'numpy.arange', (['Training.shape[0]'], {}), '(Training.shape[0])\n', (1671, 1690), False, 'import numpy\n'), ((1729, 1762), 'numpy.array', 'numpy.array', (['[HH, HV, NDVI, NDWI]'], {}), '([HH, HV, NDVI, NDWI])\n', (1740, 1762), False, 'import numpy\n'), ((2108, 2152), 'numpy.zeros_like', 'numpy.zeros_like', (['ApplyTo'], {'dtype': 'numpy.int16'}), '(ApplyTo, dtype=numpy.int16)\n', (2124, 2152), False, 'import numpy\n'), ((2224, 2274), 'rios.rat.writeColumn', 'rat.writeColumn', (['ratDataset', '"""ClassOut"""', 'outLabels'], {}), "(ratDataset, 'ClassOut', outLabels)\n", (2239, 2274), False, 'from rios import rat\n'), ((1204, 1229), 'numpy.ones', 'numpy.ones', (['(nsamples, 1)'], {}), '((nsamples, 1))\n', (1214, 1229), False, 'import numpy\n'), ((1277, 1310), 'numpy.dot', 'numpy.dot', (['xTrainCov.T', 'xTrainCov'], {}), '(xTrainCov.T, xTrainCov)\n', (1286, 1310), False, 'import numpy\n'), ((710, 732), 'numpy.isfinite', 'numpy.isfinite', (['XTrain'], {}), '(XTrain)\n', (724, 732), False, 'import numpy\n'), ((763, 785), 'numpy.isfinite', 'numpy.isfinite', (['XTrain'], {}), '(XTrain)\n', (777, 785), False, 'import numpy\n'), ((1805, 1827), 'numpy.isfinite', 'numpy.isfinite', (['XClass'], {}), '(XClass)\n', (1819, 1827), False, 'import numpy\n'), ((1849, 1871), 'numpy.isfinite', 'numpy.isfinite', (['XClass'], {}), '(XClass)\n', (1863, 1871), False, 'import numpy\n')] |
import numpy as np
import torch
from torch.autograd import Variable
from utils import *
def getSurrogateloss(model,states,actions,advantages,logProbabilityOld):
log_prob = model.getLogProbabilityDensity(states,Variable(actions))
action_loss = -advantages.squeeze() * torch.exp(log_prob - Variable(logProbabilityOld))
return action_loss.mean()
def FisherVectorProduct(v , model, states, actions,logProbabilityOld,damping):
kl = model.meanKlDivergence(states, actions,logProbabilityOld)
grads = torch.autograd.grad(kl, model.parameters()
,retain_graph=True, create_graph=True)
flat_grad_kl = torch.cat([grad.view(-1) for grad in grads])
kl_v = (flat_grad_kl * v).sum()
grads = torch.autograd.grad(kl_v, model.parameters())
flat_grad_grad_kl = torch.cat([grad.contiguous().view(-1)
for grad in grads]).data
return flat_grad_grad_kl + v * damping
def trpo_step(model, states, actions, advantages, max_kl, damping):
fixed_log_prob = model.getLogProbabilityDensity(Variable(states),actions).detach()
get_loss = lambda x: getSurrogateloss(x,
states,
actions,
advantages,
fixed_log_prob)
loss = get_loss(model)
grads = torch.autograd.grad(loss, model.parameters())
loss_grad = torch.cat([grad.view(-1) for grad in grads])
Fvp = lambda v: FisherVectorProduct(v,
model,
states,
actions,
fixed_log_prob,
damping)
stepdir = conjugate_gradients(Fvp, -loss_grad, 10)
shs = 0.5 * (stepdir * Fvp(stepdir)).sum(0, keepdim=True)
lm = torch.sqrt(shs / max_kl)
fullstep = stepdir / lm[0]
neggdotstepdir = (-loss_grad * stepdir).sum(0, keepdim=True)
prev_params = get_flat_params_from(model)
success, new_params = linesearch(model, get_loss, prev_params, fullstep,
neggdotstepdir / lm[0])
set_flat_params_to(model, new_params)
return loss
def conjugate_gradients(Avp, b, nsteps, residual_tol=1e-10):
x = torch.zeros(b.size())
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
_Avp = Avp(p)
alpha = rdotr / torch.dot(p, _Avp)
x += alpha * p
r -= alpha * _Avp
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
t= i
if rdotr < residual_tol:
break
return x
def linesearch(model,
f,
x,
fullstep,
expected_improve_rate,
max_backtracks=10,
accept_ratio=.1):
fval = f(model).data
# print("\tfval before", fval.item())
for (_n_backtracks, stepfrac) in enumerate(.5**np.arange(max_backtracks)):
xnew = x + stepfrac * fullstep
set_flat_params_to(model, xnew)
newfval = f(model).data
actual_improve = fval - newfval
expected_improve = expected_improve_rate * stepfrac
ratio = actual_improve / expected_improve
# print("\ta : %6.4e /e : %6.4e /r : %6.4e "%(actual_improve.item(), expected_improve.item(), ratio.item()))
if ratio.item() > accept_ratio and actual_improve.item() > 0:
# print("\tfval after", newfval.item())
# print("\tlog(std): %f"%xnew[0])
return True, xnew
return False, x
| [
"torch.autograd.Variable",
"torch.sqrt",
"numpy.arange",
"torch.dot"
] | [((1906, 1930), 'torch.sqrt', 'torch.sqrt', (['(shs / max_kl)'], {}), '(shs / max_kl)\n', (1916, 1930), False, 'import torch\n'), ((2410, 2425), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (2419, 2425), False, 'import torch\n'), ((217, 234), 'torch.autograd.Variable', 'Variable', (['actions'], {}), '(actions)\n', (225, 234), False, 'from torch.autograd import Variable\n'), ((2588, 2603), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (2597, 2603), False, 'import torch\n'), ((2500, 2518), 'torch.dot', 'torch.dot', (['p', '_Avp'], {}), '(p, _Avp)\n', (2509, 2518), False, 'import torch\n'), ((3074, 3099), 'numpy.arange', 'np.arange', (['max_backtracks'], {}), '(max_backtracks)\n', (3083, 3099), True, 'import numpy as np\n'), ((299, 326), 'torch.autograd.Variable', 'Variable', (['logProbabilityOld'], {}), '(logProbabilityOld)\n', (307, 326), False, 'from torch.autograd import Variable\n'), ((1068, 1084), 'torch.autograd.Variable', 'Variable', (['states'], {}), '(states)\n', (1076, 1084), False, 'from torch.autograd import Variable\n')] |
"""This module is the GUI frontend for darksky.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# python2
try:
from Tkinter import (Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button,
Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton,
BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S,
StringVar, IntVar, Radiobutton, IntVar)
import ttk
import tkFileDialog as filedialog
from tkFont import Font
# python3
except:
from tkinter import (Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button,
Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton,
BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S,
StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar)
from tkinter.font import Font
import matplotlib
matplotlib.use('TkAgg')
from PIL import Image, ImageTk, ImageOps
from numpy import loadtxt
import webbrowser
import threading
from multiprocessing import Pool
import sys, os
import time
import logging
logger = logging.getLogger()
import skyglow.constants as constants
import skyglow.darksky as darksky
class SkyglowEstimationToolbox:
"""Main class that establishes GUI."""
def __init__(self, root):
self.root = root
# Radio action buttons
self.action = None
self.sgmap_single_btn, self.krn_lib_btn, self.multi_map_btn = None, None, None
self.file_log_var = StringVar()
self.csv_file_var = StringVar()
self.krn_folder_var = StringVar()
self.output_folder_var = StringVar()
self.sgmap_folder_var = StringVar()
self.krn_ent_var = StringVar()
self.krn_var, self.hem_var = IntVar(), IntVar()
self.img, self.cdiag = None, None
self.lat_lbl, self.lat_entry = None, None
self.k_lbl, self.k_entry = None, None
self.zen_lbl, self.zen_entry = None, None
self.azi_lbl, self.azi_entry = None, None
self.krn_lvl, self.krn_entry, self.krn_btn = None, None, None
self.txt_redir, self.prg_log = None, None
self.map_btn, self.gen_krn_btn = None, None
# Sets window title, size, and icon on screen.
self.root.title("Skyglow Estimation Toolbox (SET)")
self.root.geometry('%dx%d+%d+%d' % (constants.SW*0.75, constants.SH*0.75, 25, 25))
self.root.iconbitmap(os.path.join(os.getcwd(), constants.ICO))
self.root.resizable(False, False)
self.root.update_idletasks()
# Creates three paned windows for the main screen.
base = PanedWindow()
base.pack(fill=BOTH, expand=1)
sub1 = PanedWindow(base, orient=VERTICAL, height=self.root.winfo_height()*3/4)
base.add(sub1)
sub2 = PanedWindow(sub1, orient=HORIZONTAL, height=self.root.winfo_height()/5)
sub1.add(sub2)
# Creates frame for holding inputs.
self.input_frame = Frame(sub2)
sub2.add(self.input_frame)
# Creates frame for bottom half of main screen.
self.img_frame = Frame(sub1, bd=2, bg='white', relief="sunken")
sub1.add(self.img_frame)
# Creates canvas for displaying images.
self.img_canvas = Canvas(self.img_frame, bd=2, relief="groove",
width=constants.SW*0.6, height=self.root.winfo_height()*3/4*0.9)
self.img_canvas.place(relx=.5, rely=.5, anchor=CENTER)
# Creates help button for link to documentation, instructions, and about.
self.help_btn = Menubutton(self.input_frame, text="Help", relief="raised",
bd=2, width=8, pady=1)
#self.help_btn.place(relx=1, rely=0, anchor=NE)
self.help_btn.grid(column=4,columnspan=1,row=0)
self.help_btn_menu = Menu(self.help_btn, tearoff=0)
doc = 'https://github.com/NASA-DEVELOP'
self.help_btn_menu.add_command(label="Documentation", command=lambda: self.open_url(doc))
self.help_btn_menu.add_command(label="Instructions", command=self.instructions)
self.help_btn_menu.add_separator()
self.help_btn_menu.add_command(label="About", command=self.about)
self.help_btn["menu"] = self.help_btn_menu
def main_screen(self):
"""Set up input GUI and image display screen."""
self.action = IntVar()
btn_width = int(constants.SW/60)
file_width = int(constants.SW/18)
lbl_width = int(constants.SW/60)
gen_width = int(constants.SW/42)
radio_font = Font(family='TkDefaultFont', size=12)
self.sgmap_single_btn = Radiobutton(self.input_frame, text="Generate Artificial Skyglow Map", font=radio_font,
width=btn_width, variable=self.action, value='sng',
command=self.sng_popup)
self.krn_lib_btn = Radiobutton(self.input_frame, text="Generate Kernel Library", font=radio_font,
width=btn_width, variable=self.action, value='krn',
command=self.krn_popup)
self.multi_map_btn = Radiobutton(self.input_frame, text="Generate Maps from Multiple Kernels", font=radio_font,
width=btn_width, variable=self.action, value='mul',
command=self.mul_popup)
self.hem_map_btn = Radiobutton(self.input_frame, text="Generate Hemispherical Visualization", font=radio_font,
width=btn_width, variable=self.action, value='hem',
command=self.hem_popup)
#Place widget
self.sgmap_single_btn.grid(column=0,columnspan=1, row=0)
self.krn_lib_btn.grid(column=1, columnspan=1, row=0)
self.multi_map_btn.grid(column=2, columnspan=1, row=0)
self.hem_map_btn.grid(column=3, columnspan=1, row=0)
# VIIRS Image Reference File
self.file_lbl = Label(self.input_frame, text="Image File:", width=lbl_width, anchor=E)
self.file_log = Entry(self.input_frame, width=file_width, bd=2, relief="sunken",
textvariable=self.file_log_var)
self.browse_btn = Button(self.input_frame, text="Browse", command=self.import_viirs)
# Angles CSV File
self.csv_file_lbl = Label(self.input_frame, text="Angles CSV File:", width=lbl_width, anchor=E)
self.csv_file_log = Entry(self.input_frame, width=file_width, bd=2, relief="sunken",
textvariable=self.csv_file_var)
self.csv_browse_btn = Button(self.input_frame, text="Browse", command=self.import_csv)
# Multiple Maps form Kernel library
self.mul_file_lbl = Label(self.input_frame, text="Kernel Folder:", width=lbl_width, anchor=E)
self.mul_file_log = Entry(self.input_frame, width=file_width, bd=2, relief="sunken",
textvariable=self.krn_folder_var)
self.mul_browse_btn = Button(self.input_frame, text="Browse", command=self.import_krn_folder)
# MultiKrn Map Output Location
self.output_lbl = Label(self.input_frame, text="Output Location:", width=lbl_width, anchor=E)
self.output_log = Entry(self.input_frame, width=file_width, bd=2, relief="sunken",
textvariable=self.output_folder_var)
self.output_btn = Button(self.input_frame, text="Browse", command=self.import_out_folder)
# Hemisphere Output Location
self.sgmap_folder_lbl = Label(self.input_frame, text="Skyglow Map Location:", width=lbl_width, anchor=E)
self.sgmap_folder_log = Entry(self.input_frame, width=file_width, bd=2, relief="sunken",
textvariable=self.sgmap_folder_var)
self.sgmap_folder_btn = Button(self.input_frame, text="Browse", command=self.import_sgmap_folder)
# Import Kernel Checkbutton
self.check_lbl = Label(self.input_frame, text="Import Kernel:", width=lbl_width, anchor=E)
self.krn_chk = Checkbutton(self.input_frame, anchor=W, variable=self.krn_var,
command=self.checkbtn_val)
self.hem_chk_lbl = Label(self.input_frame, text="Generate kernels for hemisphere:", width=lbl_width, anchor=E)
self.hem_chk = Checkbutton(self.input_frame, anchor=W, variable=self.hem_var)
# Region Latitude (deg), Grand Teton National park = 43.7904 degrees N
self.lat_lbl = Label(self.input_frame, text="Latitude (deg):", width=lbl_width, anchor=E)
self.lat_entry = Entry(self.input_frame, width=btn_width, bd=2, relief="sunken")
self.lon_lbl = Label(self.input_frame, text="Longitude (deg):", width=lbl_width, anchor=E)
self.lon_entry = Entry(self.input_frame, width=btn_width, bd=2, relief="sunken")
# Atmospheric Clarity Parameter, REF 2, Eq. 12, p. 645
self.k_lbl = Label(self.input_frame, text="Atmospheric Clarity Parameter:",
width=btn_width, anchor=E)
self.k_entry = Entry(self.input_frame, width=btn_width, bd=2, relief="sunken")
# Zenith angle (deg), z, REF 2, Fig. 6, p.648
self.zen_lbl = Label(self.input_frame, text="Zenith Angle (deg):", width=lbl_width, anchor=E)
self.zen_entry = Entry(self.input_frame, width=btn_width, bd=2, relief="sunken")
# Azimuth angle (deg)
self.azi_lbl = Label(self.input_frame, text="Azimuth Angle (deg):", width=lbl_width, anchor=E)
self.azi_entry = Entry(self.input_frame, width=btn_width, bd=2, relief="sunken")
self.krn_lbl = Label(self.input_frame, text="Kernel File:", width=lbl_width, anchor=E)
self.krn_ent = Entry(self.input_frame, width=file_width, bd=2, relief="sunken",
textvariable=self.krn_ent_var)
self.krn_btn = Button(self.input_frame, text="Browse", command=self.import_krn)
# Generate Artificial Skyglow Map Button
self.map_btn = Button(self.input_frame, text="Generate Artificial Skyglow Map",
width=gen_width, command=self.generate_map)
# Generate Kernal library button for SET
self.gen_krn_btn = Button(self.input_frame, text="Generate Kernel Library",
width=gen_width, command=self.generate_krn)
# Generate Map of Multiple Kernals(word better later on)
self.mul_map_btn = Button(self.input_frame, text="Generate Maps from Multiple Kernels",
width=gen_width, command=self.generate_mmap)
# Generate Hemispherical Visualization Display of Skyglow
self.hem_gen_btn = Button(self.input_frame, text="Generate Hemisphere",
width=gen_width, command=self.generate_hem)
def import_viirs(self):
"""Import a VIIRS DNB file."""
# Allows user to search through his directory for VIIRS Image file.
file_types = [('TIFF Files', '*.tif'), ('All files', '*')]
file_name = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=file_types)
self.file_log_var.set(file_name)
# Checks to see if file is empty. If not, displays image on canvas.
if file_name != '':
pilimg = Image.open(file_name)
pilimg_width, pilimg_height = pilimg.size
pilimg.tile = [t for t in pilimg.tile if t[1][2] < pilimg_width and t[1][3] < pilimg_height]
canvas_size = (self.img_canvas.winfo_width(), self.img_canvas.winfo_height())
pilimg_r = pilimg.resize(canvas_size, Image.ANTIALIAS)
pilimg_col = ImageOps.colorize(ImageOps.grayscale(pilimg_r), (0,0,0), (255,255,255))
pilimg_cont = ImageOps.autocontrast(pilimg_col, cutoff=.4, ignore=None)
self.img = ImageTk.PhotoImage(pilimg_cont)
self.img_canvas.create_image(canvas_size[0]/2, canvas_size[1]/2, image=self.img)
else:
print('File is empty.')
def import_csv(self):
"""Import CSV file."""
file_types = [('CSV Files', '*.csv'), ('All files', '*')]
file_name = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=file_types)
self.csv_file_var.set(file_name)
if file_name is '':
print('File is empty.')
def import_krn_folder(self):
"""Import kernel folder."""
krn_dir = filedialog.askdirectory(initialdir='/', title="Select kernel folder")
self.krn_folder_var.set(krn_dir)
if krn_dir is '':
print('Directory is empty.')
def import_out_folder(self):
"""Import skyglow output folder."""
output_dir = filedialog.askdirectory(initialdir='/', title="Select output folder")
self.output_folder_var.set(output_dir)
if output_dir is '':
print('Directory is empty.')
def import_krn(self):
"""Import existing kernel tif."""
file_types = [('TIFF Files', '*.tif'), ('All files', '*')]
file_name = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=file_types)
self.krn_ent_var.set(file_name)
def import_sgmap_folder(self):
"""Import skyglow map folder for hemisphere building."""
sgmap_dir = filedialog.askdirectory(initialdir='/', title="Select skyglow map folder")
self.sgmap_folder_var.set(sgmap_dir)
if sgmap_dir is '':
print('Directory is empty.')
def sng_popup(self):
"""Single map tab."""
self.remove_all()
self.check_lbl.grid(column=0, row=2)
self.krn_chk.place(relx=.22, rely=.41, anchor=CENTER)
self.file_lbl.grid(column=0, row=1)
self.file_log.grid(column=1, columnspan=3, row=1)
self.browse_btn.grid(column=4, row=1, sticky=W, padx=3)
self.lat_lbl.grid(column=0, row=3)
self.lat_entry.grid(column=1, row=3)
self.k_lbl.grid(column=2, row=3)
self.k_entry.grid(column=3, row=3)
self.zen_lbl.grid(column=0, row=4)
self.zen_entry.grid(column=1, row=4)
self.azi_lbl.grid(column=2, row=4)
self.azi_entry.grid(column=3, row=4)
self.map_btn.grid(column=1, columnspan=3, row=5, sticky=N+S+E+W)
def krn_popup(self):
"""Kernel lib tab."""
self.remove_all()
# latitude
self.lat_lbl.grid(column=0, row=3)
self.lat_entry.grid(column=1, row=3)
# atmospheric clarity
self.k_lbl.grid(column=2, row=3)
self.k_entry.grid(column=3, row=3)
# angles file
self.csv_file_lbl.grid(column=0, row=1)
self.csv_file_log.grid(column=1, columnspan=3, row=1)
self.csv_browse_btn.grid(column=4, row=1,sticky=W, padx=3)
# input VIIRS image
self.file_lbl.grid(column=0, row=2)
self.file_log.grid(column=1, columnspan=3, row=2)
self.browse_btn.grid(column=4, row=2, sticky=W, padx=3)
self.hem_chk_lbl.grid(column=0, row=4)
self.hem_chk.place(relx=.21, rely=.69)
self.gen_krn_btn.grid(column=1, columnspan=3, row=5, sticky=N+S+E+W)
def mul_popup(self):
"""Multiple maps tab."""
self.remove_all()
# Kernel folder location
self.mul_file_lbl.grid(column=0, row=1)
self.mul_file_log.grid(column=1, columnspan=3, row=1)
self.mul_browse_btn.grid(column=4, row=1, sticky=W, padx=3)
# input VIIRS image
self.file_lbl.grid(column=0, row=2)
self.file_log.grid(column=1, columnspan=3, row=2)
self.browse_btn.grid(column=4, row=2, sticky=W, padx=3)
# Choose output location
self.output_lbl.grid(column=0, row=3)
self.output_log.grid(column=1, columnspan=3, row=3)
self.output_btn.grid(column=4, row=3, sticky=W, padx=3)
# Generate map from kernel folder
self.mul_map_btn.grid(column=1, columnspan=3, row=4, sticky=N+S+E+W)
def hem_popup(self):
"""Hemisphere tab."""
self.remove_all()
# Skyglow Map Folder
self.sgmap_folder_lbl.grid(column=0, row=1)
self.sgmap_folder_log.grid(column=1, columnspan=3, row=1)
self.sgmap_folder_btn.grid(column=4, row=1, sticky=W, padx=3)
# Latitude entry
self.lat_lbl.grid(column=0, row=3)
self.lat_entry.grid(column=1, row=3)
# Longitude entry
self.lon_lbl.grid(column=2, row=3)
self.lon_entry.grid(column=3, row=3)
# Generate Hemispherical Visualization button
self.hem_gen_btn.grid(column=1, columnspan=3, row=4, sticky=N+S+E+W)
def remove_all(self):
"""Remove all existing GUI elements before opening new tab."""
self.check_lbl.grid_remove()
self.krn_chk.place_forget()
self.hem_chk.place_forget()
self.hem_chk_lbl.grid_remove()
self.file_lbl.grid_remove()
self.file_log.grid_remove()
self.browse_btn.grid_remove()
self.krn_lbl.grid_remove()
self.krn_ent.grid_remove()
self.krn_btn.grid_remove()
self.lat_lbl.grid_remove()
self.lat_entry.grid_remove()
self.k_lbl.grid_remove()
self.k_entry.grid_remove()
self.zen_lbl.grid_remove()
self.zen_entry.grid_remove()
self.azi_lbl.grid_remove()
self.azi_entry.grid_remove()
self.map_btn.grid_remove()
self.gen_krn_btn.grid_remove()
self.mul_map_btn.grid_remove()
self.csv_file_lbl.grid_remove()
self.csv_file_log.grid_remove()
self.csv_browse_btn.grid_remove()
self.mul_file_lbl.grid_remove()
self.mul_file_log.grid_remove()
self.mul_browse_btn.grid_remove()
self.output_lbl.grid_remove()
self.output_log.grid_remove()
self.output_btn.grid_remove()
self.hem_gen_btn.grid_remove()
self.lat_lbl.grid_remove()
self.lat_entry.grid_remove()
self.lon_lbl.grid_remove()
self.lon_entry.grid_remove()
self.sgmap_folder_lbl.grid_remove()
self.sgmap_folder_log.grid_remove()
self.sgmap_folder_btn.grid_remove()
def checkbtn_val(self):
"""Change interface based on if Import Kernel button is checked."""
# Import Kernel File widgets when Kernel Checkbutton is marked.
if self.krn_var.get():
self.lat_lbl.grid_remove()
self.lat_entry.grid_remove()
self.k_lbl.grid_remove()
self.k_entry.grid_remove()
self.zen_lbl.grid_remove()
self.zen_entry.grid_remove()
self.azi_lbl.grid_remove()
self.azi_entry.grid_remove()
self.krn_lbl.grid(column=0, row=2)
self.krn_ent.grid(column=1, columnspan=3, row=2)
self.krn_btn.grid(column=4, row=2, sticky=W, padx=3)
self.krn_chk.place_forget()
self.krn_chk.place(relx=0.19, rely=.5)
# Input parameter widgets when Kernel Checkbuttton is unmarked
else:
self.krn_lbl.grid_remove()
self.krn_ent.grid_remove()
self.krn_btn.grid_remove()
self.lat_lbl.grid(column=0, row=3)
self.lat_entry.grid(column=1, row=3)
self.k_lbl.grid(column=2, row=3)
self.k_entry.grid(column=3, row=3)
self.zen_lbl.grid(column=0, row=4)
self.zen_entry.grid(column=1, row=4)
self.azi_lbl.grid(column=2, row=4)
self.azi_entry.grid(column=3, row=4)
self.krn_chk.place_forget()
self.krn_chk.place(relx=0.22, rely=.41, anchor=CENTER)
@staticmethod
def open_url(url):
""""Open a url"""
webbrowser.open_new(url)
def instructions(self):
"""Open instructions window."""
# Instantiates separate Toplevel instruction window.
instr_window = Toplevel(self.root)
instr_window.geometry('550x575+25+25')
instr_window.title('Instructions')
instr_window.wm_iconbitmap(constants.ICO)
instr_window.resizable(False, False)
# Creatse Scrollbar and Frame for containing other widgets.
instr_scroll = Scrollbar(instr_window)
instr_scroll.pack(fill=Y, side="right")
instr_frame = Frame(instr_window, bg='white')
instr_frame.pack(fill=BOTH, side="left")
# Adds instruction text from constants and adds image of Cinzano's diagram.
instr = Text(instr_frame, width=65, height=40, padx=10, pady=5, bd=0, wrap="word")
instr.insert("end", constants.INSTR)
cdiagram_file = Image.open("./static/cinzano_diagram.PNG")
cdiagram_file = cdiagram_file.resize((500, 450), Image.ANTIALIAS)
self.cdiag = ImageTk.PhotoImage(cdiagram_file)
instr.image_create("end", image=self.cdiag)
instr.tag_add("top", "1.0", "4.10")
instr.tag_config("top", font='Times 12 bold')
instr.tag_add("body", "5.0", "19.20")
instr.tag_config("body", font='Times 12')
instr.insert("end", constants.CDIAG)
instr.pack()
instr_scroll.config(command=instr.yview)
def about(self):
"""Open an about window.
Window gives authors, SET version number, and icon credit.
"""
# Instantiates a new Toplevel about window.
about_window = Toplevel(self.root)
about_window.geometry('350x335+25+25')
about_window.title('About')
about_window.wm_iconbitmap(constants.ICO)
about_window.resizable(False, False)
# Adds text to about window.
about = Text(about_window, width=50, height=30, padx=10, pady=3)
about.insert("end", constants.ABOUT)
about.tag_add("abt", "1.0", "21.30")
about.tag_config("abt", font='Times 10 bold', justify=CENTER)
about.pack()
def progress(self):
"""Construct a progress window to monitor darksky."""
# Instantiates a new Toplevel window and frame for progress bar and loading log.
self.prg_window = Toplevel(self.root)
self.prg_window.geometry('650x325+250+250')
self.prg_window.title('Generating Artificial Skyglow Map...')
self.prg_window.iconbitmap(constants.ICO)
self.prg_window.resizable(False, False)
prg_frame = Frame(self.prg_window)
prg_frame.pack(fill=BOTH)
# Creates Scrollbar, Progressbar, and Label for checking progress..
prg_scroll = Scrollbar(prg_frame)
prg_scroll.pack(fill=Y, side="right")
self.prg_bar = ttk.Progressbar(prg_frame, orient=HORIZONTAL, length=750,
mode='indeterminate')
self.prg_bar.pack()
self.prg_bar.start()
prg_lbl_txt = StringVar()
prg_lbl = Label(prg_frame, textvariable=prg_lbl_txt)
prg_lbl.pack()
# Displays message log that prints from log file and starts darkskypy.
self.prg_log = Text(prg_frame, width=90, padx=5, pady=5, relief="sunken")
self.prg_log.pack()
self.prg_log.insert("end", "*****Progress Log*****\n=======================\n")
self.prg_log.tag_add("abt", "1.0", "3.0")
self.prg_log.tag_config("abt", font='Courier 12 bold', justify=CENTER)
self.txt_redir = LogRedirector(self.prg_log)
logger.addHandler(self.txt_redir)
sys.stderr = StderrRedirector(self.prg_log)
prg_lbl_txt.set("Start time: " + str(time.asctime()))
self.no_progress = 0
def update_progress(self):
"""Update progress window to prevent it from freezing."""
self.prg_log.update()
# if only one thread exists, stop progress bar and close window
if len(threading.enumerate()) == 1:
self.prg_bar.stop()
self.no_progress += 1
if self.no_progress == 3:
self.prg_window.withdraw()
else:
self.prg_bar.start()
self.root.after(1000, self.update_progress)
def generate_map(self):
"""Call darksky.sgmapper in background thread."""
# Acquires input arguments.
lat_in, k_in, zen_in, azi_in, file_in, krn_file_in = 0, 0, 0, 0, '', ''
if self.krn_var.get():
krn_file_in = self.krn_ent_var.get()
else:
lat_in = float(self.lat_entry.get())
k_in = float(self.k_entry.get())
zen_in = float(self.zen_entry.get())
azi_in = float(self.azi_entry.get())
file_in = self.file_log_var.get()
self.progress()
# Create new threads to run light propagation model simultaneously.
p_thread = threading.Thread(target=self.update_progress())
t_thread = threading.Thread(target=darksky.sgmapper,
args=(lat_in, k_in, zen_in, azi_in, file_in, krn_file_in))
t_thread.setDaemon(True)
p_thread.start()
t_thread.start()
def generate_krn(self):
"""Start kernel generation in background threads."""
# Acquires input arguments
csv_in, file_in, lat_in, k_in, hem = '', '', 0, 0, False
csv_in = self.csv_file_var.get()
file_in = self.file_log_var.get()
lat_in = float(self.lat_entry.get())
k_in = float(self.k_entry.get())
hem = self.hem_var.get()
self.progress()
# Create new threads to run light propagation model simultaneously.
p_thread = threading.Thread(target=self.update_progress())
with open(csv_in, "rb") as f:
angle_list = loadtxt(f, delimiter=",", skiprows=1)
p_thread.start()
for angle_set in angle_list:
t_thread = threading.Thread(target=darksky.generate_krn,
args=(lat_in, k_in, angle_set[0],
angle_set[1], file_in, hem))
t_thread.setDaemon(True)
t_thread.start()
def generate_mmap(self):
"""Start brightness map creation from kernels."""
# Acquires input arguments
krn_folder_in, file_in, output_in, = '', '', ''
krn_folder_in = self.krn_folder_var.get()
file_in = self.file_log_var.get()
output_in = self.output_folder_var.get()
self.progress()
# Create new threads to run light propagation model simultaneously.
p_thread = threading.Thread(target=self.update_progress())
t_thread = threading.Thread(target=darksky.multisgmapper,
args=(file_in, krn_folder_in, output_in))
t_thread.setDaemon(True)
p_thread.start()
t_thread.start()
def generate_hem(self):
"""Generate hemisphere."""
sgmap_folder_in, lat_in, lon_in, = '', 0, 0
sgmap_folder_in = self.sgmap_folder_var.get()
lat_in = float(self.lat_entry.get())
lon_in = float(self.lon_entry.get())
darksky.generate_hem(lat_in, lon_in, sgmap_folder_in)
class LogRedirector(logging.Handler):
"""Class that redirects formatted lines from the log file to a widget."""
def __init__(self, widget):
logging.Handler.__init__(self)
self.widget = widget
def emit(self, record):
self.widget.config(state='normal')
self.widget.insert("end", self.format(record) + '\n')
self.widget.see("end")
self.widget.config(state='disabled')
class StderrRedirector(object):
"""Class that redirects errors to a widget."""
def __init__(self, widget):
self.widget = widget
def write(self, msg):
self.widget.insert("end", msg)
self.widget.see("end")
def main():
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')
# Creates Tkinter root and initializes SET.
logger.info('Starting SET program')
root = Tk()
tbox = SkyglowEstimationToolbox(root)
# Sets up main screen of SET window.
logger.info('Setting up main screen')
tbox.main_screen()
# Runs program.
root.mainloop()
logger.removeHandler(tbox.txt_redir)
logger.info('Terminated SET Program')
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"tkinter.filedialog.askdirectory",
"webbrowser.open_new",
"tkinter.Button",
"numpy.loadtxt",
"tkinter.Label",
"tkinter.Frame",
"tkinter.Entry",
"PIL.ImageOps.autocontrast",
"tkinter.StringVar",
"tkinter.Menubutton",
"tkinter.filedialog.askopenfilename",
"PIL.ImageTk.Phot... | [((961, 984), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (975, 984), False, 'import matplotlib\n'), ((1171, 1190), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1188, 1190), False, 'import logging\n'), ((27731, 27808), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(levelname)s: %(message)s"""'}), "(level=logging.DEBUG, format='%(levelname)s: %(message)s')\n", (27750, 27808), False, 'import logging\n'), ((27908, 27912), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (27910, 27912), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((1572, 1583), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1581, 1583), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((1612, 1623), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1621, 1623), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((1654, 1665), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1663, 1665), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((1699, 1710), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1708, 1710), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((1743, 1754), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1752, 1754), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((1783, 1794), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1792, 1794), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((2693, 2706), 'tkinter.PanedWindow', 'PanedWindow', ([], {}), '()\n', (2704, 2706), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((3038, 3049), 'tkinter.Frame', 'Frame', (['sub2'], {}), '(sub2)\n', (3043, 3049), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((3167, 3213), 'tkinter.Frame', 'Frame', (['sub1'], {'bd': '(2)', 'bg': '"""white"""', 'relief': '"""sunken"""'}), "(sub1, bd=2, bg='white', relief='sunken')\n", (3172, 3213), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((3636, 3721), 'tkinter.Menubutton', 'Menubutton', (['self.input_frame'], {'text': '"""Help"""', 'relief': '"""raised"""', 'bd': '(2)', 'width': '(8)', 'pady': '(1)'}), "(self.input_frame, text='Help', relief='raised', bd=2, width=8,\n pady=1)\n", (3646, 3721), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((3894, 3924), 'tkinter.Menu', 'Menu', (['self.help_btn'], {'tearoff': '(0)'}), '(self.help_btn, tearoff=0)\n', (3898, 3924), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((4434, 4442), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (4440, 4442), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((4630, 4667), 'tkinter.font.Font', 'Font', ([], {'family': '"""TkDefaultFont"""', 'size': '(12)'}), "(family='TkDefaultFont', size=12)\n", (4634, 4667), False, 'from tkinter.font import Font\n'), ((4700, 4872), 'tkinter.Radiobutton', 'Radiobutton', (['self.input_frame'], {'text': '"""Generate Artificial Skyglow Map"""', 'font': 'radio_font', 'width': 'btn_width', 'variable': 'self.action', 'value': '"""sng"""', 'command': 'self.sng_popup'}), "(self.input_frame, text='Generate Artificial Skyglow Map', font=\n radio_font, width=btn_width, variable=self.action, value='sng', command\n =self.sng_popup)\n", (4711, 4872), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((4978, 5142), 'tkinter.Radiobutton', 'Radiobutton', (['self.input_frame'], {'text': '"""Generate Kernel Library"""', 'font': 'radio_font', 'width': 'btn_width', 'variable': 'self.action', 'value': '"""krn"""', 'command': 'self.krn_popup'}), "(self.input_frame, text='Generate Kernel Library', font=\n radio_font, width=btn_width, variable=self.action, value='krn', command\n =self.krn_popup)\n", (4989, 5142), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((5240, 5414), 'tkinter.Radiobutton', 'Radiobutton', (['self.input_frame'], {'text': '"""Generate Maps from Multiple Kernels"""', 'font': 'radio_font', 'width': 'btn_width', 'variable': 'self.action', 'value': '"""mul"""', 'command': 'self.mul_popup'}), "(self.input_frame, text='Generate Maps from Multiple Kernels',\n font=radio_font, width=btn_width, variable=self.action, value='mul',\n command=self.mul_popup)\n", (5251, 5414), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((5516, 5691), 'tkinter.Radiobutton', 'Radiobutton', (['self.input_frame'], {'text': '"""Generate Hemispherical Visualization"""', 'font': 'radio_font', 'width': 'btn_width', 'variable': 'self.action', 'value': '"""hem"""', 'command': 'self.hem_popup'}), "(self.input_frame, text='Generate Hemispherical Visualization',\n font=radio_font, width=btn_width, variable=self.action, value='hem',\n command=self.hem_popup)\n", (5527, 5691), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((6100, 6170), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Image File:"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Image File:', width=lbl_width, anchor=E)\n", (6105, 6170), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((6195, 6295), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'file_width', 'bd': '(2)', 'relief': '"""sunken"""', 'textvariable': 'self.file_log_var'}), "(self.input_frame, width=file_width, bd=2, relief='sunken',\n textvariable=self.file_log_var)\n", (6200, 6295), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((6343, 6409), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Browse"""', 'command': 'self.import_viirs'}), "(self.input_frame, text='Browse', command=self.import_viirs)\n", (6349, 6409), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((6465, 6540), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Angles CSV File:"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Angles CSV File:', width=lbl_width, anchor=E)\n", (6470, 6540), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((6569, 6669), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'file_width', 'bd': '(2)', 'relief': '"""sunken"""', 'textvariable': 'self.csv_file_var'}), "(self.input_frame, width=file_width, bd=2, relief='sunken',\n textvariable=self.csv_file_var)\n", (6574, 6669), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((6721, 6785), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Browse"""', 'command': 'self.import_csv'}), "(self.input_frame, text='Browse', command=self.import_csv)\n", (6727, 6785), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((6859, 6932), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Kernel Folder:"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Kernel Folder:', width=lbl_width, anchor=E)\n", (6864, 6932), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((6961, 7063), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'file_width', 'bd': '(2)', 'relief': '"""sunken"""', 'textvariable': 'self.krn_folder_var'}), "(self.input_frame, width=file_width, bd=2, relief='sunken',\n textvariable=self.krn_folder_var)\n", (6966, 7063), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((7115, 7186), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Browse"""', 'command': 'self.import_krn_folder'}), "(self.input_frame, text='Browse', command=self.import_krn_folder)\n", (7121, 7186), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((7253, 7328), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Output Location:"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Output Location:', width=lbl_width, anchor=E)\n", (7258, 7328), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((7355, 7460), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'file_width', 'bd': '(2)', 'relief': '"""sunken"""', 'textvariable': 'self.output_folder_var'}), "(self.input_frame, width=file_width, bd=2, relief='sunken',\n textvariable=self.output_folder_var)\n", (7360, 7460), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((7500, 7571), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Browse"""', 'command': 'self.import_out_folder'}), "(self.input_frame, text='Browse', command=self.import_out_folder)\n", (7506, 7571), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((7642, 7727), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Skyglow Map Location:"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Skyglow Map Location:', width=lbl_width, anchor=E\n )\n", (7647, 7727), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((7755, 7859), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'file_width', 'bd': '(2)', 'relief': '"""sunken"""', 'textvariable': 'self.sgmap_folder_var'}), "(self.input_frame, width=file_width, bd=2, relief='sunken',\n textvariable=self.sgmap_folder_var)\n", (7760, 7859), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((7905, 7978), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Browse"""', 'command': 'self.import_sgmap_folder'}), "(self.input_frame, text='Browse', command=self.import_sgmap_folder)\n", (7911, 7978), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((8041, 8114), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Import Kernel:"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Import Kernel:', width=lbl_width, anchor=E)\n", (8046, 8114), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((8139, 8233), 'tkinter.Checkbutton', 'Checkbutton', (['self.input_frame'], {'anchor': 'W', 'variable': 'self.krn_var', 'command': 'self.checkbtn_val'}), '(self.input_frame, anchor=W, variable=self.krn_var, command=self\n .checkbtn_val)\n', (8150, 8233), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((8287, 8383), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Generate kernels for hemisphere:"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Generate kernels for hemisphere:', width=\n lbl_width, anchor=E)\n", (8292, 8383), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((8403, 8465), 'tkinter.Checkbutton', 'Checkbutton', (['self.input_frame'], {'anchor': 'W', 'variable': 'self.hem_var'}), '(self.input_frame, anchor=W, variable=self.hem_var)\n', (8414, 8465), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((8569, 8643), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Latitude (deg):"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Latitude (deg):', width=lbl_width, anchor=E)\n", (8574, 8643), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((8669, 8732), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'btn_width', 'bd': '(2)', 'relief': '"""sunken"""'}), "(self.input_frame, width=btn_width, bd=2, relief='sunken')\n", (8674, 8732), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((8756, 8831), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Longitude (deg):"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Longitude (deg):', width=lbl_width, anchor=E)\n", (8761, 8831), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((8857, 8920), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'btn_width', 'bd': '(2)', 'relief': '"""sunken"""'}), "(self.input_frame, width=btn_width, bd=2, relief='sunken')\n", (8862, 8920), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9006, 9100), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Atmospheric Clarity Parameter:"""', 'width': 'btn_width', 'anchor': 'E'}), "(self.input_frame, text='Atmospheric Clarity Parameter:', width=\n btn_width, anchor=E)\n", (9011, 9100), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9134, 9197), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'btn_width', 'bd': '(2)', 'relief': '"""sunken"""'}), "(self.input_frame, width=btn_width, bd=2, relief='sunken')\n", (9139, 9197), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9276, 9354), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Zenith Angle (deg):"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Zenith Angle (deg):', width=lbl_width, anchor=E)\n", (9281, 9354), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9380, 9443), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'btn_width', 'bd': '(2)', 'relief': '"""sunken"""'}), "(self.input_frame, width=btn_width, bd=2, relief='sunken')\n", (9385, 9443), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9498, 9577), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Azimuth Angle (deg):"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Azimuth Angle (deg):', width=lbl_width, anchor=E)\n", (9503, 9577), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9603, 9666), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'btn_width', 'bd': '(2)', 'relief': '"""sunken"""'}), "(self.input_frame, width=btn_width, bd=2, relief='sunken')\n", (9608, 9666), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9691, 9762), 'tkinter.Label', 'Label', (['self.input_frame'], {'text': '"""Kernel File:"""', 'width': 'lbl_width', 'anchor': 'E'}), "(self.input_frame, text='Kernel File:', width=lbl_width, anchor=E)\n", (9696, 9762), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9786, 9885), 'tkinter.Entry', 'Entry', (['self.input_frame'], {'width': 'file_width', 'bd': '(2)', 'relief': '"""sunken"""', 'textvariable': 'self.krn_ent_var'}), "(self.input_frame, width=file_width, bd=2, relief='sunken',\n textvariable=self.krn_ent_var)\n", (9791, 9885), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((9934, 9998), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Browse"""', 'command': 'self.import_krn'}), "(self.input_frame, text='Browse', command=self.import_krn)\n", (9940, 9998), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((10072, 10185), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Generate Artificial Skyglow Map"""', 'width': 'gen_width', 'command': 'self.generate_map'}), "(self.input_frame, text='Generate Artificial Skyglow Map', width=\n gen_width, command=self.generate_map)\n", (10078, 10185), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((10290, 10394), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Generate Kernel Library"""', 'width': 'gen_width', 'command': 'self.generate_krn'}), "(self.input_frame, text='Generate Kernel Library', width=gen_width,\n command=self.generate_krn)\n", (10296, 10394), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((10519, 10637), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Generate Maps from Multiple Kernels"""', 'width': 'gen_width', 'command': 'self.generate_mmap'}), "(self.input_frame, text='Generate Maps from Multiple Kernels', width=\n gen_width, command=self.generate_mmap)\n", (10525, 10637), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((10762, 10862), 'tkinter.Button', 'Button', (['self.input_frame'], {'text': '"""Generate Hemisphere"""', 'width': 'gen_width', 'command': 'self.generate_hem'}), "(self.input_frame, text='Generate Hemisphere', width=gen_width,\n command=self.generate_hem)\n", (10768, 10862), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((11126, 11216), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""/"""', 'title': '"""Select file"""', 'filetypes': 'file_types'}), "(initialdir='/', title='Select file', filetypes=\n file_types)\n", (11152, 11216), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((12240, 12330), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""/"""', 'title': '"""Select file"""', 'filetypes': 'file_types'}), "(initialdir='/', title='Select file', filetypes=\n file_types)\n", (12266, 12330), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((12520, 12589), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'initialdir': '"""/"""', 'title': '"""Select kernel folder"""'}), "(initialdir='/', title='Select kernel folder')\n", (12543, 12589), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((12798, 12867), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'initialdir': '"""/"""', 'title': '"""Select output folder"""'}), "(initialdir='/', title='Select output folder')\n", (12821, 12867), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((13142, 13232), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""/"""', 'title': '"""Select file"""', 'filetypes': 'file_types'}), "(initialdir='/', title='Select file', filetypes=\n file_types)\n", (13168, 13232), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((13389, 13463), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'initialdir': '"""/"""', 'title': '"""Select skyglow map folder"""'}), "(initialdir='/', title='Select skyglow map folder')\n", (13412, 13463), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((19796, 19820), 'webbrowser.open_new', 'webbrowser.open_new', (['url'], {}), '(url)\n', (19815, 19820), False, 'import webbrowser\n'), ((19974, 19993), 'tkinter.Toplevel', 'Toplevel', (['self.root'], {}), '(self.root)\n', (19982, 19993), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((20271, 20294), 'tkinter.Scrollbar', 'Scrollbar', (['instr_window'], {}), '(instr_window)\n', (20280, 20294), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((20365, 20396), 'tkinter.Frame', 'Frame', (['instr_window'], {'bg': '"""white"""'}), "(instr_window, bg='white')\n", (20370, 20396), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((20547, 20621), 'tkinter.Text', 'Text', (['instr_frame'], {'width': '(65)', 'height': '(40)', 'padx': '(10)', 'pady': '(5)', 'bd': '(0)', 'wrap': '"""word"""'}), "(instr_frame, width=65, height=40, padx=10, pady=5, bd=0, wrap='word')\n", (20551, 20621), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((20691, 20733), 'PIL.Image.open', 'Image.open', (['"""./static/cinzano_diagram.PNG"""'], {}), "('./static/cinzano_diagram.PNG')\n", (20701, 20733), False, 'from PIL import Image, ImageTk, ImageOps\n'), ((20829, 20862), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['cdiagram_file'], {}), '(cdiagram_file)\n', (20847, 20862), False, 'from PIL import Image, ImageTk, ImageOps\n'), ((21434, 21453), 'tkinter.Toplevel', 'Toplevel', (['self.root'], {}), '(self.root)\n', (21442, 21453), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((21686, 21742), 'tkinter.Text', 'Text', (['about_window'], {'width': '(50)', 'height': '(30)', 'padx': '(10)', 'pady': '(3)'}), '(about_window, width=50, height=30, padx=10, pady=3)\n', (21690, 21742), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((22126, 22145), 'tkinter.Toplevel', 'Toplevel', (['self.root'], {}), '(self.root)\n', (22134, 22145), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((22386, 22408), 'tkinter.Frame', 'Frame', (['self.prg_window'], {}), '(self.prg_window)\n', (22391, 22408), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((22541, 22561), 'tkinter.Scrollbar', 'Scrollbar', (['prg_frame'], {}), '(prg_frame)\n', (22550, 22561), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((22631, 22710), 'tkinter.ttk.Progressbar', 'ttk.Progressbar', (['prg_frame'], {'orient': 'HORIZONTAL', 'length': '(750)', 'mode': '"""indeterminate"""'}), "(prg_frame, orient=HORIZONTAL, length=750, mode='indeterminate')\n", (22646, 22710), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((22824, 22835), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (22833, 22835), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((22854, 22896), 'tkinter.Label', 'Label', (['prg_frame'], {'textvariable': 'prg_lbl_txt'}), '(prg_frame, textvariable=prg_lbl_txt)\n', (22859, 22896), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((23023, 23081), 'tkinter.Text', 'Text', (['prg_frame'], {'width': '(90)', 'padx': '(5)', 'pady': '(5)', 'relief': '"""sunken"""'}), "(prg_frame, width=90, padx=5, pady=5, relief='sunken')\n", (23027, 23081), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((24775, 24879), 'threading.Thread', 'threading.Thread', ([], {'target': 'darksky.sgmapper', 'args': '(lat_in, k_in, zen_in, azi_in, file_in, krn_file_in)'}), '(target=darksky.sgmapper, args=(lat_in, k_in, zen_in,\n azi_in, file_in, krn_file_in))\n', (24791, 24879), False, 'import threading\n'), ((26512, 26604), 'threading.Thread', 'threading.Thread', ([], {'target': 'darksky.multisgmapper', 'args': '(file_in, krn_folder_in, output_in)'}), '(target=darksky.multisgmapper, args=(file_in, krn_folder_in,\n output_in))\n', (26528, 26604), False, 'import threading\n'), ((26988, 27041), 'skyglow.darksky.generate_hem', 'darksky.generate_hem', (['lat_in', 'lon_in', 'sgmap_folder_in'], {}), '(lat_in, lon_in, sgmap_folder_in)\n', (27008, 27041), True, 'import skyglow.darksky as darksky\n'), ((27200, 27230), 'logging.Handler.__init__', 'logging.Handler.__init__', (['self'], {}), '(self)\n', (27224, 27230), False, 'import logging\n'), ((1832, 1840), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (1838, 1840), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((1842, 1850), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (1848, 1850), False, 'from tkinter import Tk, Toplevel, PanedWindow, Frame, Label, Entry, Button, Canvas, Scrollbar, Text, Menubutton, Menu, Checkbutton, BOTH, VERTICAL, HORIZONTAL, CENTER, NE, E, W, Y, N, S, StringVar, IntVar, ttk, filedialog, Radiobutton, IntVar\n'), ((11379, 11400), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (11389, 11400), False, 'from PIL import Image, ImageTk, ImageOps\n'), ((11840, 11898), 'PIL.ImageOps.autocontrast', 'ImageOps.autocontrast', (['pilimg_col'], {'cutoff': '(0.4)', 'ignore': 'None'}), '(pilimg_col, cutoff=0.4, ignore=None)\n', (11861, 11898), False, 'from PIL import Image, ImageTk, ImageOps\n'), ((11921, 11952), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['pilimg_cont'], {}), '(pilimg_cont)\n', (11939, 11952), False, 'from PIL import Image, ImageTk, ImageOps\n'), ((25619, 25656), 'numpy.loadtxt', 'loadtxt', (['f'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(f, delimiter=',', skiprows=1)\n", (25626, 25656), False, 'from numpy import loadtxt\n'), ((25742, 25855), 'threading.Thread', 'threading.Thread', ([], {'target': 'darksky.generate_krn', 'args': '(lat_in, k_in, angle_set[0], angle_set[1], file_in, hem)'}), '(target=darksky.generate_krn, args=(lat_in, k_in, angle_set\n [0], angle_set[1], file_in, hem))\n', (25758, 25855), False, 'import threading\n'), ((2510, 2521), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2519, 2521), False, 'import sys, os\n'), ((11760, 11788), 'PIL.ImageOps.grayscale', 'ImageOps.grayscale', (['pilimg_r'], {}), '(pilimg_r)\n', (11778, 11788), False, 'from PIL import Image, ImageTk, ImageOps\n'), ((23781, 23802), 'threading.enumerate', 'threading.enumerate', ([], {}), '()\n', (23800, 23802), False, 'import threading\n'), ((23519, 23533), 'time.asctime', 'time.asctime', ([], {}), '()\n', (23531, 23533), False, 'import time\n')] |
import torch
import numpy as np
import os
import json
default_device = torch.device(f"cuda:0" if torch.cuda.is_available() else "cpu")
def test(model, iter, repeats=5, steps=50, device=default_device):
res_path = os.path.join(model.model_dir, 'test_results.json')
model.eval()
accs = []
for _ in range(repeats):
eq = 0
total = 0
for data in iter:
data = data.to(device)
y = data.y
data = model(data, steps)
correct = data.y_pred.argmax(dim=1).eq(y.view(-1)).sum()
eq += correct.cpu().detach().numpy()
total += float(y.shape[0])
accs.append(eq / float(total))
acc = np.mean(accs)
std = np.std(accs)
print(f'Test Score: {acc:.4f} (+-{std:.4f})')
with open(res_path, 'w') as f:
json.dump({'score': float(acc), 'std': float(std)}, f, indent=4)
return acc, std
| [
"numpy.mean",
"torch.cuda.is_available",
"os.path.join",
"numpy.std"
] | [((220, 270), 'os.path.join', 'os.path.join', (['model.model_dir', '"""test_results.json"""'], {}), "(model.model_dir, 'test_results.json')\n", (232, 270), False, 'import os\n'), ((697, 710), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (704, 710), True, 'import numpy as np\n'), ((721, 733), 'numpy.std', 'np.std', (['accs'], {}), '(accs)\n', (727, 733), True, 'import numpy as np\n'), ((98, 123), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (121, 123), False, 'import torch\n')] |
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
import numpy as np
from py3d import *
if __name__ == "__main__":
print("Testing kdtree in py3d ...")
print("Load a point cloud and paint it gray.")
pcd = read_point_cloud("../../TestData/Feature/cloud_bin_0.pcd")
pcd.paint_uniform_color([0.5, 0.5, 0.5])
pcd_tree = KDTreeFlann(pcd)
print("Paint the 1500th point red.")
pcd.colors[1500] = [1, 0, 0]
print("Find its 200 nearest neighbors, paint blue.")
[k, idx, _] = pcd_tree.search_knn_vector_3d(pcd.points[1500], 200)
np.asarray(pcd.colors)[idx[1:], :] = [0, 0, 1]
print("Find its neighbors with distance less than 0.2, paint green.")
[k, idx, _] = pcd_tree.search_radius_vector_3d(pcd.points[1500], 0.2)
np.asarray(pcd.colors)[idx[1:], :] = [0, 1, 0]
print("Visualize the point cloud.")
draw_geometries([pcd])
print("")
| [
"numpy.asarray"
] | [((617, 639), 'numpy.asarray', 'np.asarray', (['pcd.colors'], {}), '(pcd.colors)\n', (627, 639), True, 'import numpy as np\n'), ((817, 839), 'numpy.asarray', 'np.asarray', (['pcd.colors'], {}), '(pcd.colors)\n', (827, 839), True, 'import numpy as np\n')] |
import numpy.testing as npt
from afib.risk_scores import npoaf, NPoafC
def test_npoaf():
tmp = npoaf(64, "None", False)
npt.assert_equal(tmp, 0)
tmp = npoaf(65, "None", False)
npt.assert_equal(tmp, 2)
tmp = npoaf(75, "None", False)
npt.assert_equal(tmp, 3)
tmp = npoaf(75, "Mild", False)
npt.assert_equal(tmp, 4)
tmp = npoaf(75, "Moderate", False)
npt.assert_equal(tmp, 6)
tmp = npoaf(75, "Severe", False)
npt.assert_equal(tmp, 6)
tmp = npoaf(75, "Severe", True)
npt.assert_equal(tmp, 7)
def test_NPoafC():
model = NPoafC()
tmp = model.score({"age": 64,
"mvd": "None",
"lad": False})
npt.assert_almost_equal(tmp, 0, decimal=3)
tmp = model.score({"age": 65,
"mvd": "None",
"lad": False})
npt.assert_almost_equal(tmp, 2, decimal=3)
tmp = model.score({"age": 75,
"mvd": "None",
"lad": False})
npt.assert_almost_equal(tmp, 3, decimal=3)
tmp = model.score({"age": 75,
"mvd": "Mild",
"lad": False})
npt.assert_almost_equal(tmp, 4, decimal=3)
tmp = model.score({"age": 75,
"mvd": "Moderate",
"lad": False})
npt.assert_almost_equal(tmp, 6, decimal=3)
tmp = model.score({"age": 75,
"mvd": "Severe",
"lad": False})
npt.assert_almost_equal(tmp, 6, decimal=3)
tmp = model.score({"age": 75,
"mvd": "Severe",
"lad": True})
npt.assert_almost_equal(tmp, 7, decimal=3)
| [
"numpy.testing.assert_almost_equal",
"afib.risk_scores.npoaf",
"afib.risk_scores.NPoafC",
"numpy.testing.assert_equal"
] | [((101, 125), 'afib.risk_scores.npoaf', 'npoaf', (['(64)', '"""None"""', '(False)'], {}), "(64, 'None', False)\n", (106, 125), False, 'from afib.risk_scores import npoaf, NPoafC\n'), ((130, 154), 'numpy.testing.assert_equal', 'npt.assert_equal', (['tmp', '(0)'], {}), '(tmp, 0)\n', (146, 154), True, 'import numpy.testing as npt\n'), ((165, 189), 'afib.risk_scores.npoaf', 'npoaf', (['(65)', '"""None"""', '(False)'], {}), "(65, 'None', False)\n", (170, 189), False, 'from afib.risk_scores import npoaf, NPoafC\n'), ((194, 218), 'numpy.testing.assert_equal', 'npt.assert_equal', (['tmp', '(2)'], {}), '(tmp, 2)\n', (210, 218), True, 'import numpy.testing as npt\n'), ((229, 253), 'afib.risk_scores.npoaf', 'npoaf', (['(75)', '"""None"""', '(False)'], {}), "(75, 'None', False)\n", (234, 253), False, 'from afib.risk_scores import npoaf, NPoafC\n'), ((258, 282), 'numpy.testing.assert_equal', 'npt.assert_equal', (['tmp', '(3)'], {}), '(tmp, 3)\n', (274, 282), True, 'import numpy.testing as npt\n'), ((293, 317), 'afib.risk_scores.npoaf', 'npoaf', (['(75)', '"""Mild"""', '(False)'], {}), "(75, 'Mild', False)\n", (298, 317), False, 'from afib.risk_scores import npoaf, NPoafC\n'), ((322, 346), 'numpy.testing.assert_equal', 'npt.assert_equal', (['tmp', '(4)'], {}), '(tmp, 4)\n', (338, 346), True, 'import numpy.testing as npt\n'), ((357, 385), 'afib.risk_scores.npoaf', 'npoaf', (['(75)', '"""Moderate"""', '(False)'], {}), "(75, 'Moderate', False)\n", (362, 385), False, 'from afib.risk_scores import npoaf, NPoafC\n'), ((390, 414), 'numpy.testing.assert_equal', 'npt.assert_equal', (['tmp', '(6)'], {}), '(tmp, 6)\n', (406, 414), True, 'import numpy.testing as npt\n'), ((425, 451), 'afib.risk_scores.npoaf', 'npoaf', (['(75)', '"""Severe"""', '(False)'], {}), "(75, 'Severe', False)\n", (430, 451), False, 'from afib.risk_scores import npoaf, NPoafC\n'), ((456, 480), 'numpy.testing.assert_equal', 'npt.assert_equal', (['tmp', '(6)'], {}), '(tmp, 6)\n', (472, 480), True, 'import numpy.testing as npt\n'), ((491, 516), 'afib.risk_scores.npoaf', 'npoaf', (['(75)', '"""Severe"""', '(True)'], {}), "(75, 'Severe', True)\n", (496, 516), False, 'from afib.risk_scores import npoaf, NPoafC\n'), ((521, 545), 'numpy.testing.assert_equal', 'npt.assert_equal', (['tmp', '(7)'], {}), '(tmp, 7)\n', (537, 545), True, 'import numpy.testing as npt\n'), ((578, 586), 'afib.risk_scores.NPoafC', 'NPoafC', ([], {}), '()\n', (584, 586), False, 'from afib.risk_scores import npoaf, NPoafC\n'), ((703, 745), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(0)'], {'decimal': '(3)'}), '(tmp, 0, decimal=3)\n', (726, 745), True, 'import numpy.testing as npt\n'), ((862, 904), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(2)'], {'decimal': '(3)'}), '(tmp, 2, decimal=3)\n', (885, 904), True, 'import numpy.testing as npt\n'), ((1021, 1063), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(3)'], {'decimal': '(3)'}), '(tmp, 3, decimal=3)\n', (1044, 1063), True, 'import numpy.testing as npt\n'), ((1180, 1222), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(4)'], {'decimal': '(3)'}), '(tmp, 4, decimal=3)\n', (1203, 1222), True, 'import numpy.testing as npt\n'), ((1343, 1385), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(6)'], {'decimal': '(3)'}), '(tmp, 6, decimal=3)\n', (1366, 1385), True, 'import numpy.testing as npt\n'), ((1504, 1546), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(6)'], {'decimal': '(3)'}), '(tmp, 6, decimal=3)\n', (1527, 1546), True, 'import numpy.testing as npt\n'), ((1664, 1706), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(7)'], {'decimal': '(3)'}), '(tmp, 7, decimal=3)\n', (1687, 1706), True, 'import numpy.testing as npt\n')] |
import numpy as np
reveal_type(np.uint128())
reveal_type(np.uint256())
reveal_type(np.int128())
reveal_type(np.int256())
reveal_type(np.float80())
reveal_type(np.float96())
reveal_type(np.float128())
reveal_type(np.float256())
reveal_type(np.complex160())
reveal_type(np.complex192())
reveal_type(np.complex256())
reveal_type(np.complex512())
| [
"numpy.uint128",
"numpy.int128",
"numpy.complex160",
"numpy.complex256",
"numpy.int256",
"numpy.float96",
"numpy.uint256",
"numpy.float128",
"numpy.complex512",
"numpy.float256",
"numpy.complex192",
"numpy.float80"
] | [((34, 46), 'numpy.uint128', 'np.uint128', ([], {}), '()\n', (44, 46), True, 'import numpy as np\n'), ((61, 73), 'numpy.uint256', 'np.uint256', ([], {}), '()\n', (71, 73), True, 'import numpy as np\n'), ((90, 101), 'numpy.int128', 'np.int128', ([], {}), '()\n', (99, 101), True, 'import numpy as np\n'), ((116, 127), 'numpy.int256', 'np.int256', ([], {}), '()\n', (125, 127), True, 'import numpy as np\n'), ((144, 156), 'numpy.float80', 'np.float80', ([], {}), '()\n', (154, 156), True, 'import numpy as np\n'), ((171, 183), 'numpy.float96', 'np.float96', ([], {}), '()\n', (181, 183), True, 'import numpy as np\n'), ((198, 211), 'numpy.float128', 'np.float128', ([], {}), '()\n', (209, 211), True, 'import numpy as np\n'), ((226, 239), 'numpy.float256', 'np.float256', ([], {}), '()\n', (237, 239), True, 'import numpy as np\n'), ((256, 271), 'numpy.complex160', 'np.complex160', ([], {}), '()\n', (269, 271), True, 'import numpy as np\n'), ((286, 301), 'numpy.complex192', 'np.complex192', ([], {}), '()\n', (299, 301), True, 'import numpy as np\n'), ((316, 331), 'numpy.complex256', 'np.complex256', ([], {}), '()\n', (329, 331), True, 'import numpy as np\n'), ((346, 361), 'numpy.complex512', 'np.complex512', ([], {}), '()\n', (359, 361), True, 'import numpy as np\n')] |
# Authors:
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# Source: https://git.ee.ethz.ch/baumgach/discriminative_learning_toolbox/blob/master/utils.py
import nibabel as nib
import numpy as np
import os
import logging
from skimage import measure, transform
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
try:
import cv2
except:
logging.warning('Could not import opencv. Augmentation functions will be unavailable.')
else:
def rotate_image(img, angle, interp=cv2.INTER_LINEAR):
rows, cols = img.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
out = cv2.warpAffine(img, rotation_matrix, (cols, rows), flags=interp, borderMode=cv2.BORDER_REPLICATE)
return np.reshape(out, img.shape)
def rotate_image_as_onehot(img, angle, nlabels, interp=cv2.INTER_LINEAR):
onehot_output = rotate_image(convert_to_onehot(img, nlabels=nlabels), angle, interp)
return np.argmax(onehot_output, axis=-1)
def resize_image(im, size, interp=cv2.INTER_LINEAR):
im_resized = cv2.resize(im, (size[1], size[0]), interpolation=interp) # swap sizes to account for weird OCV API
#add last dimension again if it was removed by resize
if im.ndim > im_resized.ndim:
im_resized = np.expand_dims(im_resized, im.ndim-1)
return im_resized
def resize_image_as_onehot(im, size, nlabels, interp=cv2.INTER_LINEAR):
onehot_output = resize_image(convert_to_onehot(im, nlabels), size, interp=interp)
return np.argmax(onehot_output, axis=-1)
def deformation_to_transformation(dx, dy):
nx, ny = dx.shape
grid_y, grid_x = np.meshgrid(np.arange(nx), np.arange(ny), indexing="ij")
map_x = (grid_x + dx).astype(np.float32)
map_y = (grid_y + dy).astype(np.float32)
return map_x, map_y
def dense_image_warp(im, dx, dy, interp=cv2.INTER_LINEAR):
map_x, map_y = deformation_to_transformation(dx, dy)
do_optimization = (interp == cv2.INTER_LINEAR)
# The following command converts the maps to compact fixed point representation
# this leads to a ~20% increase in speed but could lead to accuracy losses
# Can be uncommented
if do_optimization:
map_x, map_y = cv2.convertMaps(map_x, map_y, dstmap1type=cv2.CV_16SC2)
remapped = cv2.remap(im, map_x, map_y, interpolation=interp, borderMode=cv2.BORDER_REFLECT) #borderValue=float(np.min(im)))
if im.ndim > remapped.ndim:
remapped = np.expand_dims(remapped, im.ndim)
return remapped
def dense_image_warp_as_onehot(im, dx, dy, nlabels, interp=cv2.INTER_LINEAR, do_optimisation=True):
onehot_output = dense_image_warp(convert_to_onehot(im, nlabels), dx, dy, interp, do_optimisation=do_optimisation)
return np.argmax(onehot_output, axis=-1)
def convert_to_onehot(lblmap, nlabels):
output = np.zeros((lblmap.shape[0], lblmap.shape[1], nlabels))
for ii in range(nlabels):
output[:,:,ii] = (lblmap == ii).astype(np.uint8)
return output
def ncc(a,v, zero_norm=True):
a = a.flatten()
v = v.flatten()
if zero_norm:
a = (a - np.mean(a)) / (np.std(a) * len(a))
v = (v - np.mean(v)) / np.std(v)
else:
a = (a) / (np.std(a) * len(a))
v = (v) / np.std(v)
return np.correlate(a,v)
def norm_l2(a,v):
a = a.flatten()
v = v.flatten()
a = (a - np.mean(a)) / (np.std(a) * len(a))
v = (v - np.mean(v)) / np.std(v)
return np.mean(np.sqrt(a**2 + v**2))
def all_argmax(arr, axis=None):
return np.argwhere(arr == np.amax(arr, axis=axis))
def makefolder(folder):
'''
Helper function to make a new folder if doesn't exist
:param folder: path to new folder
:return: True if folder created, False if folder already exists
'''
if not os.path.exists(folder):
os.makedirs(folder)
return True
return False
def load_nii(img_path):
'''
Shortcut to load a nifti file
'''
nimg = nib.load(img_path)
return nimg.get_data(), nimg.affine, nimg.header
def save_nii(img_path, data, affine, header):
'''
Shortcut to save a nifty file
'''
nimg = nib.Nifti1Image(data, affine=affine, header=header)
nimg.to_filename(img_path)
def create_and_save_nii(data, img_path):
img = nib.Nifti1Image(data, np.eye(4))
nib.save(img, img_path)
class Bunch:
# Useful shortcut for making struct like contructs
# Example:
# mystruct = Bunch(a=1, b=2)
# print(mystruct.a)
# >>> 1
def __init__(self, **kwds):
self.__dict__.update(kwds)
def convert_to_uint8(image):
image = image - image.min()
image = 255.0*np.divide(image.astype(np.float32),image.max())
return image.astype(np.uint8)
def normalise_image(image):
'''
make image zero mean and unit standard deviation
'''
img_o = np.float32(image.copy())
m = np.mean(img_o)
s = np.std(img_o)
return np.divide((img_o - m), s)
def map_image_to_intensity_range(image, min_o, max_o, percentiles=0):
# If percentile = 0 uses min and max. Percentile >0 makes normalisation more robust to outliers.
if image.dtype in [np.uint8, np.uint16, np.uint32]:
assert min_o >= 0, 'Input image type is uintXX but you selected a negative min_o: %f' % min_o
if image.dtype == np.uint8:
assert max_o <= 255, 'Input image type is uint8 but you selected a max_o > 255: %f' % max_o
min_i = np.percentile(image, 0 + percentiles)
max_i = np.percentile(image, 100 - percentiles)
image = (np.divide((image - min_i), max_i - min_i) * (max_o - min_o) + min_o).copy()
image[image > max_o] = max_o
image[image < min_o] = min_o
return image
def map_images_to_intensity_range(X, min_o, max_o, percentiles=0):
X_mapped = np.zeros(X.shape, dtype=np.float32)
for ii in range(X.shape[0]):
Xc = X[ii,...]
X_mapped[ii,...] = map_image_to_intensity_range(Xc, min_o, max_o, percentiles)
return X_mapped.astype(np.float32)
def normalise_images(X):
'''
Helper for making the images zero mean and unit standard deviation i.e. `white`
'''
X_white = np.zeros(X.shape, dtype=np.float32)
for ii in range(X.shape[0]):
Xc = X[ii,...]
X_white[ii,...] = normalise_image(Xc)
return X_white.astype(np.float32)
def keep_largest_connected_components(mask):
'''
Keeps only the largest connected components of each label for a segmentation mask.
'''
out_img = np.zeros(mask.shape, dtype=np.uint8)
for struc_id in [1, 2, 3]:
binary_img = mask == struc_id
blobs = measure.label(binary_img, connectivity=1)
props = measure.regionprops(blobs)
if not props:
continue
area = [ele.area for ele in props]
largest_blob_ind = np.argmax(area)
largest_blob_label = props[largest_blob_ind].label
out_img[blobs == largest_blob_label] = struc_id
return out_img
| [
"numpy.sqrt",
"nibabel.load",
"cv2.remap",
"numpy.divide",
"numpy.arange",
"numpy.mean",
"os.path.exists",
"numpy.reshape",
"numpy.eye",
"cv2.warpAffine",
"nibabel.save",
"skimage.measure.regionprops",
"logging.warning",
"numpy.argmax",
"numpy.correlate",
"numpy.std",
"nibabel.Nifti1... | [((260, 333), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s')\n", (279, 333), False, 'import logging\n'), ((3037, 3090), 'numpy.zeros', 'np.zeros', (['(lblmap.shape[0], lblmap.shape[1], nlabels)'], {}), '((lblmap.shape[0], lblmap.shape[1], nlabels))\n', (3045, 3090), True, 'import numpy as np\n'), ((3492, 3510), 'numpy.correlate', 'np.correlate', (['a', 'v'], {}), '(a, v)\n', (3504, 3510), True, 'import numpy as np\n'), ((4221, 4239), 'nibabel.load', 'nib.load', (['img_path'], {}), '(img_path)\n', (4229, 4239), True, 'import nibabel as nib\n'), ((4410, 4461), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['data'], {'affine': 'affine', 'header': 'header'}), '(data, affine=affine, header=header)\n', (4425, 4461), True, 'import nibabel as nib\n'), ((4591, 4614), 'nibabel.save', 'nib.save', (['img', 'img_path'], {}), '(img, img_path)\n', (4599, 4614), True, 'import nibabel as nib\n'), ((5171, 5185), 'numpy.mean', 'np.mean', (['img_o'], {}), '(img_o)\n', (5178, 5185), True, 'import numpy as np\n'), ((5195, 5208), 'numpy.std', 'np.std', (['img_o'], {}), '(img_o)\n', (5201, 5208), True, 'import numpy as np\n'), ((5221, 5244), 'numpy.divide', 'np.divide', (['(img_o - m)', 's'], {}), '(img_o - m, s)\n', (5230, 5244), True, 'import numpy as np\n'), ((5739, 5776), 'numpy.percentile', 'np.percentile', (['image', '(0 + percentiles)'], {}), '(image, 0 + percentiles)\n', (5752, 5776), True, 'import numpy as np\n'), ((5790, 5829), 'numpy.percentile', 'np.percentile', (['image', '(100 - percentiles)'], {}), '(image, 100 - percentiles)\n', (5803, 5829), True, 'import numpy as np\n'), ((6102, 6137), 'numpy.zeros', 'np.zeros', (['X.shape'], {'dtype': 'np.float32'}), '(X.shape, dtype=np.float32)\n', (6110, 6137), True, 'import numpy as np\n'), ((6480, 6515), 'numpy.zeros', 'np.zeros', (['X.shape'], {'dtype': 'np.float32'}), '(X.shape, dtype=np.float32)\n', (6488, 6515), True, 'import numpy as np\n'), ((6839, 6875), 'numpy.zeros', 'np.zeros', (['mask.shape'], {'dtype': 'np.uint8'}), '(mask.shape, dtype=np.uint8)\n', (6847, 6875), True, 'import numpy as np\n'), ((372, 464), 'logging.warning', 'logging.warning', (['"""Could not import opencv. Augmentation functions will be unavailable."""'], {}), "(\n 'Could not import opencv. Augmentation functions will be unavailable.')\n", (387, 464), False, 'import logging\n'), ((592, 647), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', 'angle', '(1)'], {}), '((cols / 2, rows / 2), angle, 1)\n', (615, 647), False, 'import cv2\n'), ((663, 765), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'rotation_matrix', '(cols, rows)'], {'flags': 'interp', 'borderMode': 'cv2.BORDER_REPLICATE'}), '(img, rotation_matrix, (cols, rows), flags=interp, borderMode\n =cv2.BORDER_REPLICATE)\n', (677, 765), False, 'import cv2\n'), ((777, 803), 'numpy.reshape', 'np.reshape', (['out', 'img.shape'], {}), '(out, img.shape)\n', (787, 803), True, 'import numpy as np\n'), ((995, 1028), 'numpy.argmax', 'np.argmax', (['onehot_output'], {'axis': '(-1)'}), '(onehot_output, axis=-1)\n', (1004, 1028), True, 'import numpy as np\n'), ((1111, 1167), 'cv2.resize', 'cv2.resize', (['im', '(size[1], size[0])'], {'interpolation': 'interp'}), '(im, (size[1], size[0]), interpolation=interp)\n', (1121, 1167), False, 'import cv2\n'), ((1602, 1635), 'numpy.argmax', 'np.argmax', (['onehot_output'], {'axis': '(-1)'}), '(onehot_output, axis=-1)\n', (1611, 1635), True, 'import numpy as np\n'), ((2459, 2544), 'cv2.remap', 'cv2.remap', (['im', 'map_x', 'map_y'], {'interpolation': 'interp', 'borderMode': 'cv2.BORDER_REFLECT'}), '(im, map_x, map_y, interpolation=interp, borderMode=cv2.BORDER_REFLECT\n )\n', (2468, 2544), False, 'import cv2\n'), ((2942, 2975), 'numpy.argmax', 'np.argmax', (['onehot_output'], {'axis': '(-1)'}), '(onehot_output, axis=-1)\n', (2951, 2975), True, 'import numpy as np\n'), ((3656, 3665), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (3662, 3665), True, 'import numpy as np\n'), ((3688, 3712), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 + v ** 2)'], {}), '(a ** 2 + v ** 2)\n', (3695, 3712), True, 'import numpy as np\n'), ((4033, 4055), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (4047, 4055), False, 'import os\n'), ((4066, 4085), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (4077, 4085), False, 'import os\n'), ((4575, 4584), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4581, 4584), True, 'import numpy as np\n'), ((6968, 7009), 'skimage.measure.label', 'measure.label', (['binary_img'], {'connectivity': '(1)'}), '(binary_img, connectivity=1)\n', (6981, 7009), False, 'from skimage import measure, transform\n'), ((7029, 7055), 'skimage.measure.regionprops', 'measure.regionprops', (['blobs'], {}), '(blobs)\n', (7048, 7055), False, 'from skimage import measure, transform\n'), ((7177, 7192), 'numpy.argmax', 'np.argmax', (['area'], {}), '(area)\n', (7186, 7192), True, 'import numpy as np\n'), ((1349, 1388), 'numpy.expand_dims', 'np.expand_dims', (['im_resized', '(im.ndim - 1)'], {}), '(im_resized, im.ndim - 1)\n', (1363, 1388), True, 'import numpy as np\n'), ((1755, 1768), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (1764, 1768), True, 'import numpy as np\n'), ((1770, 1783), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (1779, 1783), True, 'import numpy as np\n'), ((2381, 2436), 'cv2.convertMaps', 'cv2.convertMaps', (['map_x', 'map_y'], {'dstmap1type': 'cv2.CV_16SC2'}), '(map_x, map_y, dstmap1type=cv2.CV_16SC2)\n', (2396, 2436), False, 'import cv2\n'), ((2633, 2666), 'numpy.expand_dims', 'np.expand_dims', (['remapped', 'im.ndim'], {}), '(remapped, im.ndim)\n', (2647, 2666), True, 'import numpy as np\n'), ((3384, 3393), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (3390, 3393), True, 'import numpy as np\n'), ((3468, 3477), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (3474, 3477), True, 'import numpy as np\n'), ((3593, 3603), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (3600, 3603), True, 'import numpy as np\n'), ((3608, 3617), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (3614, 3617), True, 'import numpy as np\n'), ((3642, 3652), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (3649, 3652), True, 'import numpy as np\n'), ((3782, 3805), 'numpy.amax', 'np.amax', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (3789, 3805), True, 'import numpy as np\n'), ((3317, 3327), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (3324, 3327), True, 'import numpy as np\n'), ((3332, 3341), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (3338, 3341), True, 'import numpy as np\n'), ((3370, 3380), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (3377, 3380), True, 'import numpy as np\n'), ((3429, 3438), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (3435, 3438), True, 'import numpy as np\n'), ((5846, 5885), 'numpy.divide', 'np.divide', (['(image - min_i)', '(max_i - min_i)'], {}), '(image - min_i, max_i - min_i)\n', (5855, 5885), True, 'import numpy as np\n')] |
import numpy as np
from dipy.data import get_gtab_taiwan_dsi
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal,
assert_equal,
run_module_suite,
assert_raises)
from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix
from dipy.reconst import dti, mapmri
from dipy.sims.voxel import (MultiTensor,
multi_tensor_pdf,
single_tensor,
cylinders_and_ball_soderman)
from scipy.special import gamma
from scipy.misc import factorial
from dipy.data import get_sphere
from dipy.sims.voxel import add_noise
import scipy.integrate as integrate
from dipy.core.sphere_stats import angular_similarity
from dipy.direction.peaks import peak_directions
from dipy.reconst.odf import gfa
from dipy.reconst.tests.test_dsi import sticks_and_ball_dummies
from dipy.core.subdivide_octahedron import create_unit_sphere
from dipy.reconst.shm import sh_to_sf
import time
def int_func(n):
f = np.sqrt(2) * factorial(n) / float(((gamma(1 + n / 2.0)) *
np.sqrt(2**(n + 1) * factorial(n))))
return f
def generate_signal_crossing(gtab, lambda1, lambda2, lambda3, angle2=60):
mevals = np.array(([lambda1, lambda2, lambda3],
[lambda1, lambda2, lambda3]))
angl = [(0, 0), (angle2, 0)]
S, sticks = MultiTensor(gtab, mevals, S0=100.0, angles=angl,
fractions=[50, 50], snr=None)
return S, sticks
def test_orthogonality_basis_functions():
# numerical integration parameters
diffusivity = 0.0015
qmin = 0
qmax = 1000
int1 = integrate.quad(lambda x:
np.real(mapmri.mapmri_phi_1d(0, x, diffusivity)) *
np.real(mapmri.mapmri_phi_1d(2, x, diffusivity)),
qmin, qmax)[0]
int2 = integrate.quad(lambda x:
np.real(mapmri.mapmri_phi_1d(2, x, diffusivity)) *
np.real(mapmri.mapmri_phi_1d(4, x, diffusivity)),
qmin, qmax)[0]
int3 = integrate.quad(lambda x:
np.real(mapmri.mapmri_phi_1d(4, x, diffusivity)) *
np.real(mapmri.mapmri_phi_1d(6, x, diffusivity)),
qmin, qmax)[0]
int4 = integrate.quad(lambda x:
np.real(mapmri.mapmri_phi_1d(6, x, diffusivity)) *
np.real(mapmri.mapmri_phi_1d(8, x, diffusivity)),
qmin, qmax)[0]
# checking for first 5 basis functions if they are indeed orthogonal
assert_almost_equal(int1, 0.)
assert_almost_equal(int2, 0.)
assert_almost_equal(int3, 0.)
assert_almost_equal(int4, 0.)
# do the same for the isotropic mapmri basis functions
# we already know the spherical harmonics are orthonormal
# only check j>0, l=0 basis functions
C1 = mapmri.mapmri_isotropic_radial_pdf_basis(1, 0, diffusivity, 0)
C2 = mapmri.mapmri_isotropic_radial_pdf_basis(2, 0, diffusivity, 0)
C3 = mapmri.mapmri_isotropic_radial_pdf_basis(3, 0, diffusivity, 0)
C4 = mapmri.mapmri_isotropic_radial_pdf_basis(4, 0, diffusivity, 0)
C5 = mapmri.mapmri_isotropic_radial_pdf_basis(4, 0, diffusivity, 0)
int1 = integrate.quad(lambda q:
mapmri.mapmri_isotropic_radial_signal_basis(
1, 0, diffusivity, q) *
mapmri.mapmri_isotropic_radial_signal_basis(
2, 0, diffusivity, q) * q ** 2, qmin, qmax)[0]
int2 = integrate.quad(lambda q:
mapmri.mapmri_isotropic_radial_signal_basis(
2, 0, diffusivity, q) *
mapmri.mapmri_isotropic_radial_signal_basis(
3, 0, diffusivity, q) * q ** 2, qmin, qmax)[0]
int3 = integrate.quad(lambda q:
mapmri.mapmri_isotropic_radial_signal_basis(
3, 0, diffusivity, q) *
mapmri.mapmri_isotropic_radial_signal_basis(
4, 0, diffusivity, q) * q ** 2, qmin, qmax)[0]
int4 = integrate.quad(lambda q:
mapmri.mapmri_isotropic_radial_signal_basis(
4, 0, diffusivity, q) *
mapmri.mapmri_isotropic_radial_signal_basis(
5, 0, diffusivity, q) * q ** 2, qmin, qmax)[0]
# checking for first 5 basis functions if they are indeed orthogonal
assert_almost_equal(int1, 0.)
assert_almost_equal(int2, 0.)
assert_almost_equal(int3, 0.)
assert_almost_equal(int4, 0.)
def test_mapmri_number_of_coefficients(radial_order=6):
indices = mapmri_index_matrix(radial_order)
n_c = indices.shape[0]
F = radial_order / 2
n_gt = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))
assert_equal(n_c, n_gt)
def test_mapmri_initialize_radial_error():
"""
Test initialization conditions
"""
gtab = get_gtab_taiwan_dsi()
# No negative radial_order allowed
assert_raises(ValueError, MapmriModel, gtab, radial_order=-1)
# No odd radial order allowed:
assert_raises(ValueError, MapmriModel, gtab, radial_order=3)
def test_mapmri_initialize_gcv():
"""
Test initialization conditions
"""
gtab = get_gtab_taiwan_dsi()
# When string is provided it has to be "GCV"
assert_raises(ValueError, MapmriModel, gtab, laplacian_weighting="notGCV")
def test_mapmri_initialize_pos_radius():
"""
Test initialization conditions
"""
gtab = get_gtab_taiwan_dsi()
# When string is provided it has to be "adaptive"
assert_raises(ValueError, MapmriModel, gtab, positivity_constraint=True,
pos_radius="notadaptive")
# When a number is provided it has to be positive
assert_raises(ValueError, MapmriModel, gtab, positivity_constraint=True,
pos_radius=-1)
def test_mapmri_signal_fitting(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
S, _ = generate_signal_crossing(gtab, l1, l2, l3)
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.02)
mapfit = mapm.fit(S)
S_reconst = mapfit.predict(gtab, 1.0)
# test the signal reconstruction
S = S / S[0]
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 3)
# Test with multidimensional signals:
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.02)
# Each voxel is identical:
mapfit = mapm.fit(S[:, None, None].T * np.ones((3, 3, 3, S.shape[0])))
# Predict back with an array of ones or a single value:
for S0 in [S[0], np.ones((3, 3, 3, 203))]:
S_reconst = mapfit.predict(gtab, S0=S0)
# test the signal reconstruction for one voxel:
nmse_signal = (np.sqrt(np.sum((S - S_reconst[0, 0, 0]) ** 2)) /
(S.sum()))
assert_almost_equal(nmse_signal, 0.0, 3)
# do the same for isotropic implementation
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.0001,
anisotropic_scaling=False)
mapfit = mapm.fit(S)
S_reconst = mapfit.predict(gtab, 1.0)
# test the signal reconstruction
S = S / S[0]
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 3)
# do the same without the positivity constraint:
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.0001,
positivity_constraint=False,
anisotropic_scaling=False)
mapfit = mapm.fit(S)
S_reconst = mapfit.predict(gtab, 1.0)
# test the signal reconstruction
S = S / S[0]
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 3)
# Repeat with a gtab with big_delta and small_delta:
gtab.big_delta = 5
gtab.small_delta = 3
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.0001,
positivity_constraint=False,
anisotropic_scaling=False)
mapfit = mapm.fit(S)
S_reconst = mapfit.predict(gtab, 1.0)
# test the signal reconstruction
S = S / S[0]
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 3)
if mapmri.have_cvxpy:
# Positivity constraint and anisotropic scaling:
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.0001,
positivity_constraint=True,
anisotropic_scaling=False,
pos_radius=2)
mapfit = mapm.fit(S)
S_reconst = mapfit.predict(gtab, 1.0)
# test the signal reconstruction
S = S / S[0]
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 3)
# Positivity constraint and anisotropic scaling:
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=None,
positivity_constraint=True,
anisotropic_scaling=False,
pos_radius=2)
mapfit = mapm.fit(S)
S_reconst = mapfit.predict(gtab, 1.0)
# test the signal reconstruction
S = S / S[0]
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 2)
def test_mapmri_isotropic_static_scale_factor(radial_order=6):
gtab = get_gtab_taiwan_dsi()
D = 0.7e-3
tau = 1 / (4 * np.pi ** 2)
mu = np.sqrt(D * 2 * tau)
l1, l2, l3 = [D, D, D]
S = single_tensor(gtab, evals=np.r_[l1, l2, l3])
S_array = np.tile(S, (5, 1))
stat_weight = 0.1
mapm_scale_stat_reg_stat = MapmriModel(gtab,
radial_order=radial_order,
anisotropic_scaling=False,
dti_scale_estimation=False,
static_diffusivity=D,
laplacian_regularization=True,
laplacian_weighting=stat_weight)
mapm_scale_adapt_reg_stat = MapmriModel(gtab,
radial_order=radial_order,
anisotropic_scaling=False,
dti_scale_estimation=True,
laplacian_regularization=True,
laplacian_weighting=stat_weight)
start = time.time()
mapf_scale_stat_reg_stat = mapm_scale_stat_reg_stat.fit(S_array)
time_scale_stat_reg_stat = time.time() - start
start = time.time()
mapf_scale_adapt_reg_stat = mapm_scale_adapt_reg_stat.fit(S_array)
time_scale_adapt_reg_stat = time.time() - start
# test if indeed the scale factor is fixed now
assert_equal(np.all(mapf_scale_stat_reg_stat.mu == mu),
True)
# test if computation time is shorter
assert_equal(time_scale_stat_reg_stat < time_scale_adapt_reg_stat,
True)
# check if the fitted signal is the same
assert_almost_equal(mapf_scale_stat_reg_stat.fitted_signal(),
mapf_scale_adapt_reg_stat.fitted_signal())
def test_mapmri_signal_fitting_over_radial_order(order_max=8):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0012, 0.0003, 0.0003]
S, _ = generate_signal_crossing(gtab, l1, l2, l3, angle2=60)
# take radial order 0, 4 and 8
orders = [0, 4, 8]
error_array = np.zeros(len(orders))
for i, order in enumerate(orders):
mapm = MapmriModel(gtab, radial_order=order,
laplacian_regularization=False)
mapfit = mapm.fit(S)
S_reconst = mapfit.predict(gtab, 100.0)
error_array[i] = np.mean((S - S_reconst) ** 2)
# check if the fitting error decreases as radial order increases
assert_equal(np.diff(error_array) < 0., True)
def test_mapmri_pdf_integral_unity(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
S, _ = generate_signal_crossing(gtab, l1, l2, l3)
sphere = get_sphere('symmetric724')
# test MAPMRI fitting
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.02)
mapfit = mapm.fit(S)
c_map = mapfit.mapmri_coeff
# test if the analytical integral of the pdf is equal to one
indices = mapmri_index_matrix(radial_order)
integral = 0
for i in range(indices.shape[0]):
n1, n2, n3 = indices[i]
integral += c_map[i] * int_func(n1) * int_func(n2) * int_func(n3)
assert_almost_equal(integral, 1.0, 3)
# test if numerical integral of odf is equal to one
odf = mapfit.odf(sphere, s=0)
odf_sum = odf.sum() / sphere.vertices.shape[0] * (4 * np.pi)
assert_almost_equal(odf_sum, 1.0, 2)
# do the same for isotropic implementation
radius_max = 0.04 # 40 microns
gridsize = 17
r_points = mapmri.create_rspace(gridsize, radius_max)
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.02,
anisotropic_scaling=False)
mapfit = mapm.fit(S)
pdf = mapfit.pdf(r_points)
pdf[r_points[:, 2] == 0.] /= 2 # for antipodal symmetry on z-plane
point_volume = (radius_max / (gridsize // 2)) ** 3
integral = pdf.sum() * point_volume * 2
assert_almost_equal(integral, 1.0, 3)
odf = mapfit.odf(sphere, s=0)
odf_sum = odf.sum() / sphere.vertices.shape[0] * (4 * np.pi)
assert_almost_equal(odf_sum, 1.0, 2)
def test_mapmri_compare_fitted_pdf_with_multi_tensor(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
S, _ = generate_signal_crossing(gtab, l1, l2, l3)
radius_max = 0.02 # 40 microns
gridsize = 10
r_points = mapmri.create_rspace(gridsize, radius_max)
# test MAPMRI fitting
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_weighting=0.0001)
mapfit = mapm.fit(S)
# compare the mapmri pdf with the ground truth multi_tensor pdf
mevals = np.array(([l1, l2, l3],
[l1, l2, l3]))
angl = [(0, 0), (60, 0)]
pdf_mt = multi_tensor_pdf(r_points, mevals=mevals,
angles=angl, fractions=[50, 50])
pdf_map = mapfit.pdf(r_points)
nmse_pdf = np.sqrt(np.sum((pdf_mt - pdf_map) ** 2)) / (pdf_mt.sum())
assert_almost_equal(nmse_pdf, 0.0, 2)
def test_mapmri_metrics_anisotropic(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
S, _ = generate_signal_crossing(gtab, l1, l2, l3, angle2=0)
# test MAPMRI q-space indices
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False)
mapfit = mapm.fit(S)
tau = 1 / (4 * np.pi ** 2)
# ground truth indices estimated from the DTI tensor
rtpp_gt = 1. / (2 * np.sqrt(np.pi * l1 * tau))
rtap_gt = (
1. / (2 * np.sqrt(np.pi * l2 * tau)) * 1. /
(2 * np.sqrt(np.pi * l3 * tau))
)
rtop_gt = rtpp_gt * rtap_gt
msd_gt = 2 * (l1 + l2 + l3) * tau
qiv_gt = (
(64 * np.pi ** (7 / 2.) * (l1 * l2 * l3 * tau ** 3) ** (3 / 2.)) /
((l2 * l3 + l1 * (l2 + l3)) * tau ** 2)
)
assert_almost_equal(mapfit.rtap(), rtap_gt, 5)
assert_almost_equal(mapfit.rtpp(), rtpp_gt, 5)
assert_almost_equal(mapfit.rtop(), rtop_gt, 5)
assert_almost_equal(mapfit.ng(), 0., 5)
assert_almost_equal(mapfit.ng_parallel(), 0., 5)
assert_almost_equal(mapfit.ng_perpendicular(), 0., 5)
assert_almost_equal(mapfit.msd(), msd_gt, 5)
assert_almost_equal(mapfit.qiv(), qiv_gt, 5)
def test_mapmri_metrics_isotropic(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0003, 0.0003, 0.0003] # isotropic diffusivities
S = single_tensor(gtab, evals=np.r_[l1, l2, l3])
# test MAPMRI q-space indices
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
anisotropic_scaling=False)
mapfit = mapm.fit(S)
tau = 1 / (4 * np.pi ** 2)
# ground truth indices estimated from the DTI tensor
rtpp_gt = 1. / (2 * np.sqrt(np.pi * l1 * tau))
rtap_gt = (
1. / (2 * np.sqrt(np.pi * l2 * tau)) * 1. /
(2 * np.sqrt(np.pi * l3 * tau))
)
rtop_gt = rtpp_gt * rtap_gt
msd_gt = 2 * (l1 + l2 + l3) * tau
qiv_gt = (
(64 * np.pi ** (7 / 2.) * (l1 * l2 * l3 * tau ** 3) ** (3 / 2.)) /
((l2 * l3 + l1 * (l2 + l3)) * tau ** 2)
)
assert_almost_equal(mapfit.rtap(), rtap_gt, 5)
assert_almost_equal(mapfit.rtpp(), rtpp_gt, 5)
assert_almost_equal(mapfit.rtop(), rtop_gt, 4)
assert_almost_equal(mapfit.msd(), msd_gt, 5)
assert_almost_equal(mapfit.qiv(), qiv_gt, 5)
def test_mapmri_laplacian_anisotropic(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
S = single_tensor(gtab, evals=np.r_[l1, l2, l3])
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False)
mapfit = mapm.fit(S)
tau = 1 / (4 * np.pi ** 2)
# ground truth norm of laplacian of tensor
norm_of_laplacian_gt = (
(3 * (l1 ** 2 + l2 ** 2 + l3 ** 2) +
2 * l2 * l3 + 2 * l1 * (l2 + l3)) * (np.pi ** (5 / 2.) * tau) /
(np.sqrt(2 * l1 * l2 * l3 * tau))
)
# check if estimated laplacian corresponds with ground truth
laplacian_matrix = mapmri.mapmri_laplacian_reg_matrix(
mapm.ind_mat, mapfit.mu, mapm.S_mat,
mapm.T_mat, mapm.U_mat)
coef = mapfit._mapmri_coef
norm_of_laplacian = np.dot(np.dot(coef, laplacian_matrix), coef)
assert_almost_equal(norm_of_laplacian, norm_of_laplacian_gt)
def test_mapmri_laplacian_isotropic(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0003, 0.0003, 0.0003] # isotropic diffusivities
S = single_tensor(gtab, evals=np.r_[l1, l2, l3])
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
anisotropic_scaling=False)
mapfit = mapm.fit(S)
tau = 1 / (4 * np.pi ** 2)
# ground truth norm of laplacian of tensor
norm_of_laplacian_gt = (
(3 * (l1 ** 2 + l2 ** 2 + l3 ** 2) +
2 * l2 * l3 + 2 * l1 * (l2 + l3)) * (np.pi ** (5 / 2.) * tau) /
(np.sqrt(2 * l1 * l2 * l3 * tau))
)
# check if estimated laplacian corresponds with ground truth
laplacian_matrix = mapmri.mapmri_isotropic_laplacian_reg_matrix(
radial_order, mapfit.mu[0])
coef = mapfit._mapmri_coef
norm_of_laplacian = np.dot(np.dot(coef, laplacian_matrix), coef)
assert_almost_equal(norm_of_laplacian, norm_of_laplacian_gt)
def test_signal_fitting_equality_anisotropic_isotropic(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
S, _ = generate_signal_crossing(gtab, l1, l2, l3, angle2=60)
gridsize = 17
radius_max = 0.02
r_points = mapmri.create_rspace(gridsize, radius_max)
tenmodel = dti.TensorModel(gtab)
evals = tenmodel.fit(S).evals
tau = 1 / (4 * np.pi ** 2)
# estimate isotropic scale factor
u0 = mapmri.isotropic_scale_factor(evals * 2 * tau)
mu = np.array([u0, u0, u0])
qvals = np.sqrt(gtab.bvals / tau) / (2 * np.pi)
q = gtab.bvecs * qvals[:, None]
M_aniso = mapmri.mapmri_phi_matrix(radial_order, mu, q)
K_aniso = mapmri.mapmri_psi_matrix(radial_order, mu, r_points)
M_iso = mapmri.mapmri_isotropic_phi_matrix(radial_order, u0, q)
K_iso = mapmri.mapmri_isotropic_psi_matrix(radial_order, u0, r_points)
coef_aniso = np.dot(np.linalg.pinv(M_aniso), S)
coef_iso = np.dot(np.linalg.pinv(M_iso), S)
# test if anisotropic and isotropic implementation produce equal results
# if the same isotropic scale factors are used
s_fitted_aniso = np.dot(M_aniso, coef_aniso)
s_fitted_iso = np.dot(M_iso, coef_iso)
assert_array_almost_equal(s_fitted_aniso, s_fitted_iso)
# the same test for the PDF
pdf_fitted_aniso = np.dot(K_aniso, coef_aniso)
pdf_fitted_iso = np.dot(K_iso, coef_iso)
assert_array_almost_equal(pdf_fitted_aniso / pdf_fitted_iso,
np.ones_like(pdf_fitted_aniso), 3)
# test if the implemented version also produces the same result
mapm = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
anisotropic_scaling=False)
s_fitted_implemented_isotropic = mapm.fit(S).fitted_signal()
# normalize non-implemented fitted signal with b0 value
s_fitted_aniso_norm = s_fitted_aniso / s_fitted_aniso.max()
assert_array_almost_equal(s_fitted_aniso_norm,
s_fitted_implemented_isotropic)
# test if norm of signal laplacians are the same
laplacian_matrix_iso = mapmri.mapmri_isotropic_laplacian_reg_matrix(
radial_order, mu[0])
ind_mat = mapmri.mapmri_index_matrix(radial_order)
S_mat, T_mat, U_mat = mapmri.mapmri_STU_reg_matrices(radial_order)
laplacian_matrix_aniso = mapmri.mapmri_laplacian_reg_matrix(
ind_mat, mu, S_mat, T_mat, U_mat)
norm_aniso = np.dot(coef_aniso, np.dot(coef_aniso, laplacian_matrix_aniso))
norm_iso = np.dot(coef_iso, np.dot(coef_iso, laplacian_matrix_iso))
assert_almost_equal(norm_iso, norm_aniso)
def test_mapmri_isotropic_design_matrix_separability(radial_order=6):
gtab = get_gtab_taiwan_dsi()
tau = 1 / (4 * np.pi ** 2)
qvals = np.sqrt(gtab.bvals / tau) / (2 * np.pi)
q = gtab.bvecs * qvals[:, None]
mu = 0.0003 # random value
M = mapmri.mapmri_isotropic_phi_matrix(radial_order, mu, q)
M_independent = mapmri.mapmri_isotropic_M_mu_independent(radial_order, q)
M_dependent = mapmri.mapmri_isotropic_M_mu_dependent(radial_order, mu,
qvals)
M_reconstructed = M_independent * M_dependent
assert_array_almost_equal(M, M_reconstructed)
def test_estimate_radius_with_rtap(radius_gt=5e-3):
gtab = get_gtab_taiwan_dsi()
tau = 1 / (4 * np.pi ** 2)
# we estimate the infinite diffusion time case for a perfectly reflecting
# cylinder using the Callaghan model
E = cylinders_and_ball_soderman(gtab, tau, radii=[radius_gt], snr=None,
angles=[(0, 90)], fractions=[100])[0]
# estimate radius using anisotropic MAP-MRI.
mapmod = mapmri.MapmriModel(gtab, radial_order=6,
laplacian_regularization=True,
laplacian_weighting=0.01)
mapfit = mapmod.fit(E)
radius_estimated = np.sqrt(1 / (np.pi * mapfit.rtap()))
assert_almost_equal(radius_estimated, radius_gt, 5)
# estimate radius using isotropic MAP-MRI.
# note that the radial order is higher and the precision is lower due to
# less accurate signal extrapolation.
mapmod = mapmri.MapmriModel(gtab, radial_order=8,
laplacian_regularization=True,
laplacian_weighting=0.01,
anisotropic_scaling=False)
mapfit = mapmod.fit(E)
radius_estimated = np.sqrt(1 / (np.pi * mapfit.rtap()))
assert_almost_equal(radius_estimated, radius_gt, 4)
@np.testing.dec.skipif(not mapmri.have_cvxpy)
def test_positivity_constraint(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
S, _ = generate_signal_crossing(gtab, l1, l2, l3, angle2=60)
S_noise = add_noise(S, snr=20, S0=100.)
gridsize = 20
max_radius = 15e-3 # 20 microns maximum radius
r_grad = mapmri.create_rspace(gridsize, max_radius)
# the posivitivity constraint does not make the pdf completely positive
# but greatly decreases the amount of negativity in the constrained points.
# we test if the amount of negative pdf has decreased more than 90%
mapmod_no_constraint = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
positivity_constraint=False)
mapfit_no_constraint = mapmod_no_constraint.fit(S_noise)
pdf = mapfit_no_constraint.pdf(r_grad)
pdf_negative_no_constraint = pdf[pdf < 0].sum()
mapmod_constraint = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
positivity_constraint=True,
pos_grid=gridsize,
pos_radius='adaptive')
mapfit_constraint = mapmod_constraint.fit(S_noise)
pdf = mapfit_constraint.pdf(r_grad)
pdf_negative_constraint = pdf[pdf < 0].sum()
assert_equal((pdf_negative_constraint / pdf_negative_no_constraint) < 0.1,
True)
# the same for isotropic scaling
mapmod_no_constraint = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
positivity_constraint=False,
anisotropic_scaling=False)
mapfit_no_constraint = mapmod_no_constraint.fit(S_noise)
pdf = mapfit_no_constraint.pdf(r_grad)
pdf_negative_no_constraint = pdf[pdf < 0].sum()
mapmod_constraint = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
positivity_constraint=True,
anisotropic_scaling=False,
pos_grid=gridsize,
pos_radius='adaptive')
mapfit_constraint = mapmod_constraint.fit(S_noise)
pdf = mapfit_constraint.pdf(r_grad)
pdf_negative_constraint = pdf[pdf < 0].sum()
assert_equal((pdf_negative_constraint / pdf_negative_no_constraint) < 0.1,
True)
def test_laplacian_regularization(radial_order=6):
gtab = get_gtab_taiwan_dsi()
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
S, _ = generate_signal_crossing(gtab, l1, l2, l3, angle2=60)
S_noise = add_noise(S, snr=20, S0=100.)
weight_array = np.linspace(0, .3, 301)
mapmod_unreg = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
laplacian_weighting=weight_array)
mapmod_laplacian_array = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=True,
laplacian_weighting=weight_array)
mapmod_laplacian_gcv = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=True,
laplacian_weighting="GCV")
# test the Generalized Cross Validation
# test if GCV gives very low if there is no noise
mapfit_laplacian_array = mapmod_laplacian_array.fit(S)
assert_equal(mapfit_laplacian_array.lopt < 0.01, True)
# test if GCV gives higher values if there is noise
mapfit_laplacian_array = mapmod_laplacian_array.fit(S_noise)
lopt_array = mapfit_laplacian_array.lopt
assert_equal(lopt_array > 0.01, True)
# test if continuous GCV gives the same the one based on an array
mapfit_laplacian_gcv = mapmod_laplacian_gcv.fit(S_noise)
lopt_gcv = mapfit_laplacian_gcv.lopt
assert_almost_equal(lopt_array, lopt_gcv, 2)
# test if laplacian reduced the norm of the laplacian in the reconstruction
mu = mapfit_laplacian_gcv.mu
laplacian_matrix = mapmri.mapmri_laplacian_reg_matrix(
mapmod_laplacian_gcv.ind_mat, mu, mapmod_laplacian_gcv.S_mat,
mapmod_laplacian_gcv.T_mat, mapmod_laplacian_gcv.U_mat)
coef_unreg = mapmod_unreg.fit(S_noise)._mapmri_coef
coef_laplacian = mapfit_laplacian_gcv._mapmri_coef
laplacian_norm_unreg = np.dot(
coef_unreg, np.dot(coef_unreg, laplacian_matrix))
laplacian_norm_laplacian = np.dot(
coef_laplacian, np.dot(coef_laplacian, laplacian_matrix))
assert_equal(laplacian_norm_laplacian < laplacian_norm_unreg, True)
# the same for isotropic scaling
mapmod_unreg = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=False,
laplacian_weighting=weight_array,
anisotropic_scaling=False)
mapmod_laplacian_array = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=True,
laplacian_weighting=weight_array,
anisotropic_scaling=False)
mapmod_laplacian_gcv = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=True,
laplacian_weighting="GCV",
anisotropic_scaling=False)
# test the Generalized Cross Validation
# test if GCV gives zero if there is no noise
mapfit_laplacian_array = mapmod_laplacian_array.fit(S)
assert_equal(mapfit_laplacian_array.lopt < 0.01, True)
# test if GCV gives higher values if there is noise
mapfit_laplacian_array = mapmod_laplacian_array.fit(S_noise)
lopt_array = mapfit_laplacian_array.lopt
assert_equal(lopt_array > 0.01, True)
# test if continuous GCV gives the same the one based on an array
mapfit_laplacian_gcv = mapmod_laplacian_gcv.fit(S_noise)
lopt_gcv = mapfit_laplacian_gcv.lopt
assert_almost_equal(lopt_array, lopt_gcv, 2)
# test if laplacian reduced the norm of the laplacian in the reconstruction
mu = mapfit_laplacian_gcv.mu
laplacian_matrix = mapmri.mapmri_isotropic_laplacian_reg_matrix(
radial_order, mu[0])
coef_unreg = mapmod_unreg.fit(S_noise)._mapmri_coef
coef_laplacian = mapfit_laplacian_gcv._mapmri_coef
laplacian_norm_unreg = np.dot(
coef_unreg, np.dot(coef_unreg, laplacian_matrix))
laplacian_norm_laplacian = np.dot(
coef_laplacian, np.dot(coef_laplacian, laplacian_matrix))
assert_equal(laplacian_norm_laplacian < laplacian_norm_unreg, True)
def test_mapmri_odf(radial_order=6):
gtab = get_gtab_taiwan_dsi()
# load symmetric 724 sphere
sphere = get_sphere('symmetric724')
# load icosahedron sphere
l1, l2, l3 = [0.0015, 0.0003, 0.0003]
data, golden_directions = generate_signal_crossing(gtab, l1, l2, l3,
angle2=90)
mapmod = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=True,
laplacian_weighting=0.01)
# symmetric724
sphere2 = create_unit_sphere(5)
mapfit = mapmod.fit(data)
odf = mapfit.odf(sphere)
directions, _, _ = peak_directions(odf, sphere, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(
angular_similarity(directions, golden_directions), 2, 1)
# 5 subdivisions
odf = mapfit.odf(sphere2)
directions, _, _ = peak_directions(odf, sphere2, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(
angular_similarity(directions, golden_directions), 2, 1)
sb_dummies = sticks_and_ball_dummies(gtab)
for sbd in sb_dummies:
data, golden_directions = sb_dummies[sbd]
asmfit = mapmod.fit(data)
odf = asmfit.odf(sphere2)
directions, _, _ = peak_directions(odf, sphere2, .35, 25)
if len(directions) <= 3:
assert_equal(len(directions), len(golden_directions))
if len(directions) > 3:
assert_equal(gfa(odf) < 0.1, True)
# for the isotropic implementation check if the odf spherical harmonics
# actually represent the discrete sphere function.
mapmod = MapmriModel(gtab, radial_order=radial_order,
laplacian_regularization=True,
laplacian_weighting=0.01,
anisotropic_scaling=False)
mapfit = mapmod.fit(data)
odf = mapfit.odf(sphere)
odf_sh = mapfit.odf_sh()
odf_from_sh = sh_to_sf(odf_sh, sphere, radial_order, basis_type=None)
assert_almost_equal(odf, odf_from_sh, 10)
if __name__ == '__main__':
run_module_suite()
| [
"dipy.sims.voxel.MultiTensor",
"numpy.sqrt",
"numpy.testing.assert_equal",
"dipy.reconst.dti.TensorModel",
"dipy.reconst.mapmri.mapmri_psi_matrix",
"dipy.sims.voxel.add_noise",
"numpy.linalg.pinv",
"scipy.misc.factorial",
"numpy.testing.assert_raises",
"dipy.direction.peaks.peak_directions",
"nu... | [((24180, 24224), 'numpy.testing.dec.skipif', 'np.testing.dec.skipif', (['(not mapmri.have_cvxpy)'], {}), '(not mapmri.have_cvxpy)\n', (24201, 24224), True, 'import numpy as np\n'), ((1334, 1402), 'numpy.array', 'np.array', (['([lambda1, lambda2, lambda3], [lambda1, lambda2, lambda3])'], {}), '(([lambda1, lambda2, lambda3], [lambda1, lambda2, lambda3]))\n', (1342, 1402), True, 'import numpy as np\n'), ((1475, 1553), 'dipy.sims.voxel.MultiTensor', 'MultiTensor', (['gtab', 'mevals'], {'S0': '(100.0)', 'angles': 'angl', 'fractions': '[50, 50]', 'snr': 'None'}), '(gtab, mevals, S0=100.0, angles=angl, fractions=[50, 50], snr=None)\n', (1486, 1553), False, 'from dipy.sims.voxel import MultiTensor, multi_tensor_pdf, single_tensor, cylinders_and_ball_soderman\n'), ((2739, 2769), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['int1', '(0.0)'], {}), '(int1, 0.0)\n', (2758, 2769), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((2773, 2803), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['int2', '(0.0)'], {}), '(int2, 0.0)\n', (2792, 2803), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((2807, 2837), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['int3', '(0.0)'], {}), '(int3, 0.0)\n', (2826, 2837), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((2841, 2871), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['int4', '(0.0)'], {}), '(int4, 0.0)\n', (2860, 2871), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((3044, 3106), 'dipy.reconst.mapmri.mapmri_isotropic_radial_pdf_basis', 'mapmri.mapmri_isotropic_radial_pdf_basis', (['(1)', '(0)', 'diffusivity', '(0)'], {}), '(1, 0, diffusivity, 0)\n', (3084, 3106), False, 'from dipy.reconst import dti, mapmri\n'), ((3116, 3178), 'dipy.reconst.mapmri.mapmri_isotropic_radial_pdf_basis', 'mapmri.mapmri_isotropic_radial_pdf_basis', (['(2)', '(0)', 'diffusivity', '(0)'], {}), '(2, 0, diffusivity, 0)\n', (3156, 3178), False, 'from dipy.reconst import dti, mapmri\n'), ((3188, 3250), 'dipy.reconst.mapmri.mapmri_isotropic_radial_pdf_basis', 'mapmri.mapmri_isotropic_radial_pdf_basis', (['(3)', '(0)', 'diffusivity', '(0)'], {}), '(3, 0, diffusivity, 0)\n', (3228, 3250), False, 'from dipy.reconst import dti, mapmri\n'), ((3260, 3322), 'dipy.reconst.mapmri.mapmri_isotropic_radial_pdf_basis', 'mapmri.mapmri_isotropic_radial_pdf_basis', (['(4)', '(0)', 'diffusivity', '(0)'], {}), '(4, 0, diffusivity, 0)\n', (3300, 3322), False, 'from dipy.reconst import dti, mapmri\n'), ((3332, 3394), 'dipy.reconst.mapmri.mapmri_isotropic_radial_pdf_basis', 'mapmri.mapmri_isotropic_radial_pdf_basis', (['(4)', '(0)', 'diffusivity', '(0)'], {}), '(4, 0, diffusivity, 0)\n', (3372, 3394), False, 'from dipy.reconst import dti, mapmri\n'), ((4710, 4740), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['int1', '(0.0)'], {}), '(int1, 0.0)\n', (4729, 4740), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((4744, 4774), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['int2', '(0.0)'], {}), '(int2, 0.0)\n', (4763, 4774), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((4778, 4808), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['int3', '(0.0)'], {}), '(int3, 0.0)\n', (4797, 4808), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((4812, 4842), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['int4', '(0.0)'], {}), '(int4, 0.0)\n', (4831, 4842), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((4914, 4947), 'dipy.reconst.mapmri.mapmri_index_matrix', 'mapmri_index_matrix', (['radial_order'], {}), '(radial_order)\n', (4933, 4947), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((5011, 5062), 'numpy.round', 'np.round', (['(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))'], {}), '(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))\n', (5019, 5062), True, 'import numpy as np\n'), ((5067, 5090), 'numpy.testing.assert_equal', 'assert_equal', (['n_c', 'n_gt'], {}), '(n_c, n_gt)\n', (5079, 5090), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((5198, 5219), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (5217, 5219), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((5263, 5324), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'MapmriModel', 'gtab'], {'radial_order': '(-1)'}), '(ValueError, MapmriModel, gtab, radial_order=-1)\n', (5276, 5324), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((5364, 5424), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'MapmriModel', 'gtab'], {'radial_order': '(3)'}), '(ValueError, MapmriModel, gtab, radial_order=3)\n', (5377, 5424), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((5523, 5544), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (5542, 5544), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((5598, 5672), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'MapmriModel', 'gtab'], {'laplacian_weighting': '"""notGCV"""'}), "(ValueError, MapmriModel, gtab, laplacian_weighting='notGCV')\n", (5611, 5672), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((5778, 5799), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (5797, 5799), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((5858, 5960), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'MapmriModel', 'gtab'], {'positivity_constraint': '(True)', 'pos_radius': '"""notadaptive"""'}), "(ValueError, MapmriModel, gtab, positivity_constraint=True,\n pos_radius='notadaptive')\n", (5871, 5960), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((6033, 6124), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'MapmriModel', 'gtab'], {'positivity_constraint': '(True)', 'pos_radius': '(-1)'}), '(ValueError, MapmriModel, gtab, positivity_constraint=True,\n pos_radius=-1)\n', (6046, 6124), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((6200, 6221), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (6219, 6221), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((6330, 6400), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.02)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.02)\n', (6341, 6400), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((6618, 6658), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nmse_signal', '(0.0)', '(3)'], {}), '(nmse_signal, 0.0, 3)\n', (6637, 6658), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((6713, 6783), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.02)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.02)\n', (6724, 6783), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((7339, 7442), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.0001)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.0001,\n anisotropic_scaling=False)\n', (7350, 7442), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((7679, 7719), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nmse_signal', '(0.0)', '(3)'], {}), '(nmse_signal, 0.0, 3)\n', (7698, 7719), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((7785, 7917), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.0001)', 'positivity_constraint': '(False)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.0001,\n positivity_constraint=False, anisotropic_scaling=False)\n', (7796, 7917), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((8178, 8218), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nmse_signal', '(0.0)', '(3)'], {}), '(nmse_signal, 0.0, 3)\n', (8197, 8218), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((8336, 8468), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.0001)', 'positivity_constraint': '(False)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.0001,\n positivity_constraint=False, anisotropic_scaling=False)\n', (8347, 8468), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((8729, 8769), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nmse_signal', '(0.0)', '(3)'], {}), '(nmse_signal, 0.0, 3)\n', (8748, 8769), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((10037, 10058), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (10056, 10058), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((10114, 10134), 'numpy.sqrt', 'np.sqrt', (['(D * 2 * tau)'], {}), '(D * 2 * tau)\n', (10121, 10134), True, 'import numpy as np\n'), ((10171, 10215), 'dipy.sims.voxel.single_tensor', 'single_tensor', (['gtab'], {'evals': 'np.r_[l1, l2, l3]'}), '(gtab, evals=np.r_[l1, l2, l3])\n', (10184, 10215), False, 'from dipy.sims.voxel import MultiTensor, multi_tensor_pdf, single_tensor, cylinders_and_ball_soderman\n'), ((10230, 10248), 'numpy.tile', 'np.tile', (['S', '(5, 1)'], {}), '(S, (5, 1))\n', (10237, 10248), True, 'import numpy as np\n'), ((10303, 10496), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'anisotropic_scaling': '(False)', 'dti_scale_estimation': '(False)', 'static_diffusivity': 'D', 'laplacian_regularization': '(True)', 'laplacian_weighting': 'stat_weight'}), '(gtab, radial_order=radial_order, anisotropic_scaling=False,\n dti_scale_estimation=False, static_diffusivity=D,\n laplacian_regularization=True, laplacian_weighting=stat_weight)\n', (10314, 10496), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((10779, 10949), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'anisotropic_scaling': '(False)', 'dti_scale_estimation': '(True)', 'laplacian_regularization': '(True)', 'laplacian_weighting': 'stat_weight'}), '(gtab, radial_order=radial_order, anisotropic_scaling=False,\n dti_scale_estimation=True, laplacian_regularization=True,\n laplacian_weighting=stat_weight)\n', (10790, 10949), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((11175, 11186), 'time.time', 'time.time', ([], {}), '()\n', (11184, 11186), False, 'import time\n'), ((11320, 11331), 'time.time', 'time.time', ([], {}), '()\n', (11329, 11331), False, 'import time\n'), ((11636, 11708), 'numpy.testing.assert_equal', 'assert_equal', (['(time_scale_stat_reg_stat < time_scale_adapt_reg_stat)', '(True)'], {}), '(time_scale_stat_reg_stat < time_scale_adapt_reg_stat, True)\n', (11648, 11708), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((11980, 12001), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (11999, 12001), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((12676, 12697), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (12695, 12697), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((12807, 12833), 'dipy.data.get_sphere', 'get_sphere', (['"""symmetric724"""'], {}), "('symmetric724')\n", (12817, 12833), False, 'from dipy.data import get_sphere\n'), ((12872, 12942), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.02)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.02)\n', (12883, 12942), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((13103, 13136), 'dipy.reconst.mapmri.mapmri_index_matrix', 'mapmri_index_matrix', (['radial_order'], {}), '(radial_order)\n', (13122, 13136), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((13303, 13340), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['integral', '(1.0)', '(3)'], {}), '(integral, 1.0, 3)\n', (13322, 13340), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((13501, 13537), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['odf_sum', '(1.0)', '(2)'], {}), '(odf_sum, 1.0, 2)\n', (13520, 13537), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((13655, 13697), 'dipy.reconst.mapmri.create_rspace', 'mapmri.create_rspace', (['gridsize', 'radius_max'], {}), '(gridsize, radius_max)\n', (13675, 13697), False, 'from dipy.reconst import dti, mapmri\n'), ((13709, 13810), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.02)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.02,\n anisotropic_scaling=False)\n', (13720, 13810), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((14085, 14122), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['integral', '(1.0)', '(3)'], {}), '(integral, 1.0, 3)\n', (14104, 14122), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((14227, 14263), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['odf_sum', '(1.0)', '(2)'], {}), '(odf_sum, 1.0, 2)\n', (14246, 14263), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((14347, 14368), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (14366, 14368), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((14535, 14577), 'dipy.reconst.mapmri.create_rspace', 'mapmri.create_rspace', (['gridsize', 'radius_max'], {}), '(gridsize, radius_max)\n', (14555, 14577), False, 'from dipy.reconst import dti, mapmri\n'), ((14616, 14688), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.0001)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.0001)\n', (14627, 14688), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((14820, 14858), 'numpy.array', 'np.array', (['([l1, l2, l3], [l1, l2, l3])'], {}), '(([l1, l2, l3], [l1, l2, l3]))\n', (14828, 14858), True, 'import numpy as np\n'), ((14924, 14998), 'dipy.sims.voxel.multi_tensor_pdf', 'multi_tensor_pdf', (['r_points'], {'mevals': 'mevals', 'angles': 'angl', 'fractions': '[50, 50]'}), '(r_points, mevals=mevals, angles=angl, fractions=[50, 50])\n', (14940, 14998), False, 'from dipy.sims.voxel import MultiTensor, multi_tensor_pdf, single_tensor, cylinders_and_ball_soderman\n'), ((15142, 15179), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nmse_pdf', '(0.0)', '(2)'], {}), '(nmse_pdf, 0.0, 2)\n', (15161, 15179), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((15246, 15267), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (15265, 15267), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((15421, 15497), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=False)\n', (15432, 15497), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((16485, 16506), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (16504, 16506), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((16584, 16628), 'dipy.sims.voxel.single_tensor', 'single_tensor', (['gtab'], {'evals': 'np.r_[l1, l2, l3]'}), '(gtab, evals=np.r_[l1, l2, l3])\n', (16597, 16628), False, 'from dipy.sims.voxel import MultiTensor, multi_tensor_pdf, single_tensor, cylinders_and_ball_soderman\n'), ((16676, 16783), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=False,\n anisotropic_scaling=False)\n', (16687, 16783), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((17640, 17661), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (17659, 17661), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((17712, 17756), 'dipy.sims.voxel.single_tensor', 'single_tensor', (['gtab'], {'evals': 'np.r_[l1, l2, l3]'}), '(gtab, evals=np.r_[l1, l2, l3])\n', (17725, 17756), False, 'from dipy.sims.voxel import MultiTensor, multi_tensor_pdf, single_tensor, cylinders_and_ball_soderman\n'), ((17769, 17845), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=False)\n', (17780, 17845), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((18261, 18360), 'dipy.reconst.mapmri.mapmri_laplacian_reg_matrix', 'mapmri.mapmri_laplacian_reg_matrix', (['mapm.ind_mat', 'mapfit.mu', 'mapm.S_mat', 'mapm.T_mat', 'mapm.U_mat'], {}), '(mapm.ind_mat, mapfit.mu, mapm.S_mat,\n mapm.T_mat, mapm.U_mat)\n', (18295, 18360), False, 'from dipy.reconst import dti, mapmri\n'), ((18480, 18540), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['norm_of_laplacian', 'norm_of_laplacian_gt'], {}), '(norm_of_laplacian, norm_of_laplacian_gt)\n', (18499, 18540), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((18607, 18628), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (18626, 18628), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((18706, 18750), 'dipy.sims.voxel.single_tensor', 'single_tensor', (['gtab'], {'evals': 'np.r_[l1, l2, l3]'}), '(gtab, evals=np.r_[l1, l2, l3])\n', (18719, 18750), False, 'from dipy.sims.voxel import MultiTensor, multi_tensor_pdf, single_tensor, cylinders_and_ball_soderman\n'), ((18763, 18870), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=False,\n anisotropic_scaling=False)\n', (18774, 18870), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((19305, 19377), 'dipy.reconst.mapmri.mapmri_isotropic_laplacian_reg_matrix', 'mapmri.mapmri_isotropic_laplacian_reg_matrix', (['radial_order', 'mapfit.mu[0]'], {}), '(radial_order, mapfit.mu[0])\n', (19349, 19377), False, 'from dipy.reconst import dti, mapmri\n'), ((19493, 19553), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['norm_of_laplacian', 'norm_of_laplacian_gt'], {}), '(norm_of_laplacian, norm_of_laplacian_gt)\n', (19512, 19553), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((19639, 19660), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (19658, 19660), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((19823, 19865), 'dipy.reconst.mapmri.create_rspace', 'mapmri.create_rspace', (['gridsize', 'radius_max'], {}), '(gridsize, radius_max)\n', (19843, 19865), False, 'from dipy.reconst import dti, mapmri\n'), ((19882, 19903), 'dipy.reconst.dti.TensorModel', 'dti.TensorModel', (['gtab'], {}), '(gtab)\n', (19897, 19903), False, 'from dipy.reconst import dti, mapmri\n'), ((20017, 20063), 'dipy.reconst.mapmri.isotropic_scale_factor', 'mapmri.isotropic_scale_factor', (['(evals * 2 * tau)'], {}), '(evals * 2 * tau)\n', (20046, 20063), False, 'from dipy.reconst import dti, mapmri\n'), ((20073, 20095), 'numpy.array', 'np.array', (['[u0, u0, u0]'], {}), '([u0, u0, u0])\n', (20081, 20095), True, 'import numpy as np\n'), ((20200, 20245), 'dipy.reconst.mapmri.mapmri_phi_matrix', 'mapmri.mapmri_phi_matrix', (['radial_order', 'mu', 'q'], {}), '(radial_order, mu, q)\n', (20224, 20245), False, 'from dipy.reconst import dti, mapmri\n'), ((20260, 20312), 'dipy.reconst.mapmri.mapmri_psi_matrix', 'mapmri.mapmri_psi_matrix', (['radial_order', 'mu', 'r_points'], {}), '(radial_order, mu, r_points)\n', (20284, 20312), False, 'from dipy.reconst import dti, mapmri\n'), ((20326, 20381), 'dipy.reconst.mapmri.mapmri_isotropic_phi_matrix', 'mapmri.mapmri_isotropic_phi_matrix', (['radial_order', 'u0', 'q'], {}), '(radial_order, u0, q)\n', (20360, 20381), False, 'from dipy.reconst import dti, mapmri\n'), ((20394, 20456), 'dipy.reconst.mapmri.mapmri_isotropic_psi_matrix', 'mapmri.mapmri_isotropic_psi_matrix', (['radial_order', 'u0', 'r_points'], {}), '(radial_order, u0, r_points)\n', (20428, 20456), False, 'from dipy.reconst import dti, mapmri\n'), ((20707, 20734), 'numpy.dot', 'np.dot', (['M_aniso', 'coef_aniso'], {}), '(M_aniso, coef_aniso)\n', (20713, 20734), True, 'import numpy as np\n'), ((20754, 20777), 'numpy.dot', 'np.dot', (['M_iso', 'coef_iso'], {}), '(M_iso, coef_iso)\n', (20760, 20777), True, 'import numpy as np\n'), ((20782, 20837), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['s_fitted_aniso', 's_fitted_iso'], {}), '(s_fitted_aniso, s_fitted_iso)\n', (20807, 20837), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((20894, 20921), 'numpy.dot', 'np.dot', (['K_aniso', 'coef_aniso'], {}), '(K_aniso, coef_aniso)\n', (20900, 20921), True, 'import numpy as np\n'), ((20943, 20966), 'numpy.dot', 'np.dot', (['K_iso', 'coef_iso'], {}), '(K_iso, coef_iso)\n', (20949, 20966), True, 'import numpy as np\n'), ((21178, 21285), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=False,\n anisotropic_scaling=False)\n', (21189, 21285), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((21523, 21601), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['s_fitted_aniso_norm', 's_fitted_implemented_isotropic'], {}), '(s_fitted_aniso_norm, s_fitted_implemented_isotropic)\n', (21548, 21601), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((21713, 21778), 'dipy.reconst.mapmri.mapmri_isotropic_laplacian_reg_matrix', 'mapmri.mapmri_isotropic_laplacian_reg_matrix', (['radial_order', 'mu[0]'], {}), '(radial_order, mu[0])\n', (21757, 21778), False, 'from dipy.reconst import dti, mapmri\n'), ((21821, 21861), 'dipy.reconst.mapmri.mapmri_index_matrix', 'mapmri.mapmri_index_matrix', (['radial_order'], {}), '(radial_order)\n', (21847, 21861), False, 'from dipy.reconst import dti, mapmri\n'), ((21888, 21932), 'dipy.reconst.mapmri.mapmri_STU_reg_matrices', 'mapmri.mapmri_STU_reg_matrices', (['radial_order'], {}), '(radial_order)\n', (21918, 21932), False, 'from dipy.reconst import dti, mapmri\n'), ((21962, 22030), 'dipy.reconst.mapmri.mapmri_laplacian_reg_matrix', 'mapmri.mapmri_laplacian_reg_matrix', (['ind_mat', 'mu', 'S_mat', 'T_mat', 'U_mat'], {}), '(ind_mat, mu, S_mat, T_mat, U_mat)\n', (21996, 22030), False, 'from dipy.reconst import dti, mapmri\n'), ((22197, 22238), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['norm_iso', 'norm_aniso'], {}), '(norm_iso, norm_aniso)\n', (22216, 22238), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((22322, 22343), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (22341, 22343), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((22504, 22559), 'dipy.reconst.mapmri.mapmri_isotropic_phi_matrix', 'mapmri.mapmri_isotropic_phi_matrix', (['radial_order', 'mu', 'q'], {}), '(radial_order, mu, q)\n', (22538, 22559), False, 'from dipy.reconst import dti, mapmri\n'), ((22580, 22637), 'dipy.reconst.mapmri.mapmri_isotropic_M_mu_independent', 'mapmri.mapmri_isotropic_M_mu_independent', (['radial_order', 'q'], {}), '(radial_order, q)\n', (22620, 22637), False, 'from dipy.reconst import dti, mapmri\n'), ((22656, 22719), 'dipy.reconst.mapmri.mapmri_isotropic_M_mu_dependent', 'mapmri.mapmri_isotropic_M_mu_dependent', (['radial_order', 'mu', 'qvals'], {}), '(radial_order, mu, qvals)\n', (22694, 22719), False, 'from dipy.reconst import dti, mapmri\n'), ((22832, 22877), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['M', 'M_reconstructed'], {}), '(M, M_reconstructed)\n', (22857, 22877), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((22943, 22964), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (22962, 22964), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((23328, 23429), 'dipy.reconst.mapmri.MapmriModel', 'mapmri.MapmriModel', (['gtab'], {'radial_order': '(6)', 'laplacian_regularization': '(True)', 'laplacian_weighting': '(0.01)'}), '(gtab, radial_order=6, laplacian_regularization=True,\n laplacian_weighting=0.01)\n', (23346, 23429), False, 'from dipy.reconst import dti, mapmri\n'), ((23581, 23632), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['radius_estimated', 'radius_gt', '(5)'], {}), '(radius_estimated, radius_gt, 5)\n', (23600, 23632), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((23813, 23941), 'dipy.reconst.mapmri.MapmriModel', 'mapmri.MapmriModel', (['gtab'], {'radial_order': '(8)', 'laplacian_regularization': '(True)', 'laplacian_weighting': '(0.01)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=8, laplacian_regularization=True,\n laplacian_weighting=0.01, anisotropic_scaling=False)\n', (23831, 23941), False, 'from dipy.reconst import dti, mapmri\n'), ((24125, 24176), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['radius_estimated', 'radius_gt', '(4)'], {}), '(radius_estimated, radius_gt, 4)\n', (24144, 24176), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((24284, 24305), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (24303, 24305), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((24427, 24457), 'dipy.sims.voxel.add_noise', 'add_noise', (['S'], {'snr': '(20)', 'S0': '(100.0)'}), '(S, snr=20, S0=100.0)\n', (24436, 24457), False, 'from dipy.sims.voxel import add_noise\n'), ((24541, 24583), 'dipy.reconst.mapmri.create_rspace', 'mapmri.create_rspace', (['gridsize', 'max_radius'], {}), '(gridsize, max_radius)\n', (24561, 24583), False, 'from dipy.reconst import dti, mapmri\n'), ((24841, 24950), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'positivity_constraint': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=False,\n positivity_constraint=False)\n', (24852, 24950), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((25206, 25356), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'positivity_constraint': '(True)', 'pos_grid': 'gridsize', 'pos_radius': '"""adaptive"""'}), "(gtab, radial_order=radial_order, laplacian_regularization=False,\n positivity_constraint=True, pos_grid=gridsize, pos_radius='adaptive')\n", (25217, 25356), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((25646, 25724), 'numpy.testing.assert_equal', 'assert_equal', (['(pdf_negative_constraint / pdf_negative_no_constraint < 0.1)', '(True)'], {}), '(pdf_negative_constraint / pdf_negative_no_constraint < 0.1, True)\n', (25658, 25724), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((25809, 25945), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'positivity_constraint': '(False)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=False,\n positivity_constraint=False, anisotropic_scaling=False)\n', (25820, 25945), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((26240, 26422), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'positivity_constraint': '(True)', 'anisotropic_scaling': '(False)', 'pos_grid': 'gridsize', 'pos_radius': '"""adaptive"""'}), "(gtab, radial_order=radial_order, laplacian_regularization=False,\n positivity_constraint=True, anisotropic_scaling=False, pos_grid=\n gridsize, pos_radius='adaptive')\n", (26251, 26422), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((26743, 26821), 'numpy.testing.assert_equal', 'assert_equal', (['(pdf_negative_constraint / pdf_negative_no_constraint < 0.1)', '(True)'], {}), '(pdf_negative_constraint / pdf_negative_no_constraint < 0.1, True)\n', (26755, 26821), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((26905, 26926), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (26924, 26926), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((27048, 27078), 'dipy.sims.voxel.add_noise', 'add_noise', (['S'], {'snr': '(20)', 'S0': '(100.0)'}), '(S, snr=20, S0=100.0)\n', (27057, 27078), False, 'from dipy.sims.voxel import add_noise\n'), ((27098, 27122), 'numpy.linspace', 'np.linspace', (['(0)', '(0.3)', '(301)'], {}), '(0, 0.3, 301)\n', (27109, 27122), True, 'import numpy as np\n'), ((27141, 27255), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'laplacian_weighting': 'weight_array'}), '(gtab, radial_order=radial_order, laplacian_regularization=False,\n laplacian_weighting=weight_array)\n', (27152, 27255), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((27343, 27456), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(True)', 'laplacian_weighting': 'weight_array'}), '(gtab, radial_order=radial_order, laplacian_regularization=True,\n laplacian_weighting=weight_array)\n', (27354, 27456), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((27562, 27668), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(True)', 'laplacian_weighting': '"""GCV"""'}), "(gtab, radial_order=radial_order, laplacian_regularization=True,\n laplacian_weighting='GCV')\n", (27573, 27668), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((27905, 27959), 'numpy.testing.assert_equal', 'assert_equal', (['(mapfit_laplacian_array.lopt < 0.01)', '(True)'], {}), '(mapfit_laplacian_array.lopt < 0.01, True)\n', (27917, 27959), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((28131, 28168), 'numpy.testing.assert_equal', 'assert_equal', (['(lopt_array > 0.01)', '(True)'], {}), '(lopt_array > 0.01, True)\n', (28143, 28168), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((28346, 28390), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['lopt_array', 'lopt_gcv', '(2)'], {}), '(lopt_array, lopt_gcv, 2)\n', (28365, 28390), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((28528, 28688), 'dipy.reconst.mapmri.mapmri_laplacian_reg_matrix', 'mapmri.mapmri_laplacian_reg_matrix', (['mapmod_laplacian_gcv.ind_mat', 'mu', 'mapmod_laplacian_gcv.S_mat', 'mapmod_laplacian_gcv.T_mat', 'mapmod_laplacian_gcv.U_mat'], {}), '(mapmod_laplacian_gcv.ind_mat, mu,\n mapmod_laplacian_gcv.S_mat, mapmod_laplacian_gcv.T_mat,\n mapmod_laplacian_gcv.U_mat)\n', (28562, 28688), False, 'from dipy.reconst import dti, mapmri\n'), ((29014, 29081), 'numpy.testing.assert_equal', 'assert_equal', (['(laplacian_norm_laplacian < laplacian_norm_unreg)', '(True)'], {}), '(laplacian_norm_laplacian < laplacian_norm_unreg, True)\n', (29026, 29081), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((29139, 29280), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(False)', 'laplacian_weighting': 'weight_array', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=False,\n laplacian_weighting=weight_array, anisotropic_scaling=False)\n', (29150, 29280), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((29399, 29539), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(True)', 'laplacian_weighting': 'weight_array', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=True,\n laplacian_weighting=weight_array, anisotropic_scaling=False)\n', (29410, 29539), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((29686, 29819), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(True)', 'laplacian_weighting': '"""GCV"""', 'anisotropic_scaling': '(False)'}), "(gtab, radial_order=radial_order, laplacian_regularization=True,\n laplacian_weighting='GCV', anisotropic_scaling=False)\n", (29697, 29819), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((30091, 30145), 'numpy.testing.assert_equal', 'assert_equal', (['(mapfit_laplacian_array.lopt < 0.01)', '(True)'], {}), '(mapfit_laplacian_array.lopt < 0.01, True)\n', (30103, 30145), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((30317, 30354), 'numpy.testing.assert_equal', 'assert_equal', (['(lopt_array > 0.01)', '(True)'], {}), '(lopt_array > 0.01, True)\n', (30329, 30354), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((30532, 30576), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['lopt_array', 'lopt_gcv', '(2)'], {}), '(lopt_array, lopt_gcv, 2)\n', (30551, 30576), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((30714, 30779), 'dipy.reconst.mapmri.mapmri_isotropic_laplacian_reg_matrix', 'mapmri.mapmri_isotropic_laplacian_reg_matrix', (['radial_order', 'mu[0]'], {}), '(radial_order, mu[0])\n', (30758, 30779), False, 'from dipy.reconst import dti, mapmri\n'), ((31105, 31172), 'numpy.testing.assert_equal', 'assert_equal', (['(laplacian_norm_laplacian < laplacian_norm_unreg)', '(True)'], {}), '(laplacian_norm_laplacian < laplacian_norm_unreg, True)\n', (31117, 31172), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((31223, 31244), 'dipy.data.get_gtab_taiwan_dsi', 'get_gtab_taiwan_dsi', ([], {}), '()\n', (31242, 31244), False, 'from dipy.data import get_gtab_taiwan_dsi\n'), ((31291, 31317), 'dipy.data.get_sphere', 'get_sphere', (['"""symmetric724"""'], {}), "('symmetric724')\n", (31301, 31317), False, 'from dipy.data import get_sphere\n'), ((31543, 31648), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(True)', 'laplacian_weighting': '(0.01)'}), '(gtab, radial_order=radial_order, laplacian_regularization=True,\n laplacian_weighting=0.01)\n', (31554, 31648), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((31728, 31749), 'dipy.core.subdivide_octahedron.create_unit_sphere', 'create_unit_sphere', (['(5)'], {}), '(5)\n', (31746, 31749), False, 'from dipy.core.subdivide_octahedron import create_unit_sphere\n'), ((31833, 31871), 'dipy.direction.peaks.peak_directions', 'peak_directions', (['odf', 'sphere', '(0.35)', '(25)'], {}), '(odf, sphere, 0.35, 25)\n', (31848, 31871), False, 'from dipy.direction.peaks import peak_directions\n'), ((32073, 32112), 'dipy.direction.peaks.peak_directions', 'peak_directions', (['odf', 'sphere2', '(0.35)', '(25)'], {}), '(odf, sphere2, 0.35, 25)\n', (32088, 32112), False, 'from dipy.direction.peaks import peak_directions\n'), ((32257, 32286), 'dipy.reconst.tests.test_dsi.sticks_and_ball_dummies', 'sticks_and_ball_dummies', (['gtab'], {}), '(gtab)\n', (32280, 32286), False, 'from dipy.reconst.tests.test_dsi import sticks_and_ball_dummies\n'), ((32821, 32953), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_regularization': '(True)', 'laplacian_weighting': '(0.01)', 'anisotropic_scaling': '(False)'}), '(gtab, radial_order=radial_order, laplacian_regularization=True,\n laplacian_weighting=0.01, anisotropic_scaling=False)\n', (32832, 32953), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((33131, 33186), 'dipy.reconst.shm.sh_to_sf', 'sh_to_sf', (['odf_sh', 'sphere', 'radial_order'], {'basis_type': 'None'}), '(odf_sh, sphere, radial_order, basis_type=None)\n', (33139, 33186), False, 'from dipy.reconst.shm import sh_to_sf\n'), ((33191, 33232), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['odf', 'odf_from_sh', '(10)'], {}), '(odf, odf_from_sh, 10)\n', (33210, 33232), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((33266, 33284), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (33282, 33284), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((6995, 7018), 'numpy.ones', 'np.ones', (['(3, 3, 3, 203)'], {}), '((3, 3, 3, 203))\n', (7002, 7018), True, 'import numpy as np\n'), ((7239, 7279), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nmse_signal', '(0.0)', '(3)'], {}), '(nmse_signal, 0.0, 3)\n', (7258, 7279), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((8869, 9014), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': '(0.0001)', 'positivity_constraint': '(True)', 'anisotropic_scaling': '(False)', 'pos_radius': '(2)'}), '(gtab, radial_order=radial_order, laplacian_weighting=0.0001,\n positivity_constraint=True, anisotropic_scaling=False, pos_radius=2)\n', (8880, 9014), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((9338, 9378), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nmse_signal', '(0.0)', '(3)'], {}), '(nmse_signal, 0.0, 3)\n', (9357, 9378), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((9452, 9595), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'radial_order', 'laplacian_weighting': 'None', 'positivity_constraint': '(True)', 'anisotropic_scaling': '(False)', 'pos_radius': '(2)'}), '(gtab, radial_order=radial_order, laplacian_weighting=None,\n positivity_constraint=True, anisotropic_scaling=False, pos_radius=2)\n', (9463, 9595), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((9919, 9959), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nmse_signal', '(0.0)', '(2)'], {}), '(nmse_signal, 0.0, 2)\n', (9938, 9959), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_equal, run_module_suite, assert_raises\n'), ((11287, 11298), 'time.time', 'time.time', ([], {}), '()\n', (11296, 11298), False, 'import time\n'), ((11435, 11446), 'time.time', 'time.time', ([], {}), '()\n', (11444, 11446), False, 'import time\n'), ((11524, 11565), 'numpy.all', 'np.all', (['(mapf_scale_stat_reg_stat.mu == mu)'], {}), '(mapf_scale_stat_reg_stat.mu == mu)\n', (11530, 11565), True, 'import numpy as np\n'), ((12263, 12332), 'dipy.reconst.mapmri.MapmriModel', 'MapmriModel', (['gtab'], {'radial_order': 'order', 'laplacian_regularization': '(False)'}), '(gtab, radial_order=order, laplacian_regularization=False)\n', (12274, 12332), False, 'from dipy.reconst.mapmri import MapmriModel, mapmri_index_matrix\n'), ((12462, 12491), 'numpy.mean', 'np.mean', (['((S - S_reconst) ** 2)'], {}), '((S - S_reconst) ** 2)\n', (12469, 12491), True, 'import numpy as np\n'), ((18129, 18160), 'numpy.sqrt', 'np.sqrt', (['(2 * l1 * l2 * l3 * tau)'], {}), '(2 * l1 * l2 * l3 * tau)\n', (18136, 18160), True, 'import numpy as np\n'), ((18437, 18467), 'numpy.dot', 'np.dot', (['coef', 'laplacian_matrix'], {}), '(coef, laplacian_matrix)\n', (18443, 18467), True, 'import numpy as np\n'), ((19173, 19204), 'numpy.sqrt', 'np.sqrt', (['(2 * l1 * l2 * l3 * tau)'], {}), '(2 * l1 * l2 * l3 * tau)\n', (19180, 19204), True, 'import numpy as np\n'), ((19450, 19480), 'numpy.dot', 'np.dot', (['coef', 'laplacian_matrix'], {}), '(coef, laplacian_matrix)\n', (19456, 19480), True, 'import numpy as np\n'), ((20109, 20134), 'numpy.sqrt', 'np.sqrt', (['(gtab.bvals / tau)'], {}), '(gtab.bvals / tau)\n', (20116, 20134), True, 'import numpy as np\n'), ((20482, 20505), 'numpy.linalg.pinv', 'np.linalg.pinv', (['M_aniso'], {}), '(M_aniso)\n', (20496, 20505), True, 'import numpy as np\n'), ((20532, 20553), 'numpy.linalg.pinv', 'np.linalg.pinv', (['M_iso'], {}), '(M_iso)\n', (20546, 20553), True, 'import numpy as np\n'), ((21063, 21093), 'numpy.ones_like', 'np.ones_like', (['pdf_fitted_aniso'], {}), '(pdf_fitted_aniso)\n', (21075, 21093), True, 'import numpy as np\n'), ((22077, 22119), 'numpy.dot', 'np.dot', (['coef_aniso', 'laplacian_matrix_aniso'], {}), '(coef_aniso, laplacian_matrix_aniso)\n', (22083, 22119), True, 'import numpy as np\n'), ((22153, 22191), 'numpy.dot', 'np.dot', (['coef_iso', 'laplacian_matrix_iso'], {}), '(coef_iso, laplacian_matrix_iso)\n', (22159, 22191), True, 'import numpy as np\n'), ((22387, 22412), 'numpy.sqrt', 'np.sqrt', (['(gtab.bvals / tau)'], {}), '(gtab.bvals / tau)\n', (22394, 22412), True, 'import numpy as np\n'), ((23123, 23230), 'dipy.sims.voxel.cylinders_and_ball_soderman', 'cylinders_and_ball_soderman', (['gtab', 'tau'], {'radii': '[radius_gt]', 'snr': 'None', 'angles': '[(0, 90)]', 'fractions': '[100]'}), '(gtab, tau, radii=[radius_gt], snr=None, angles=\n [(0, 90)], fractions=[100])\n', (23150, 23230), False, 'from dipy.sims.voxel import MultiTensor, multi_tensor_pdf, single_tensor, cylinders_and_ball_soderman\n'), ((28866, 28902), 'numpy.dot', 'np.dot', (['coef_unreg', 'laplacian_matrix'], {}), '(coef_unreg, laplacian_matrix)\n', (28872, 28902), True, 'import numpy as np\n'), ((28967, 29007), 'numpy.dot', 'np.dot', (['coef_laplacian', 'laplacian_matrix'], {}), '(coef_laplacian, laplacian_matrix)\n', (28973, 29007), True, 'import numpy as np\n'), ((30957, 30993), 'numpy.dot', 'np.dot', (['coef_unreg', 'laplacian_matrix'], {}), '(coef_unreg, laplacian_matrix)\n', (30963, 30993), True, 'import numpy as np\n'), ((31058, 31098), 'numpy.dot', 'np.dot', (['coef_laplacian', 'laplacian_matrix'], {}), '(coef_laplacian, laplacian_matrix)\n', (31064, 31098), True, 'import numpy as np\n'), ((31941, 31990), 'dipy.core.sphere_stats.angular_similarity', 'angular_similarity', (['directions', 'golden_directions'], {}), '(directions, golden_directions)\n', (31959, 31990), False, 'from dipy.core.sphere_stats import angular_similarity\n'), ((32182, 32231), 'dipy.core.sphere_stats.angular_similarity', 'angular_similarity', (['directions', 'golden_directions'], {}), '(directions, golden_directions)\n', (32200, 32231), False, 'from dipy.core.sphere_stats import angular_similarity\n'), ((32459, 32498), 'dipy.direction.peaks.peak_directions', 'peak_directions', (['odf', 'sphere2', '(0.35)', '(25)'], {}), '(odf, sphere2, 0.35, 25)\n', (32474, 32498), False, 'from dipy.direction.peaks import peak_directions\n'), ((1095, 1105), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1102, 1105), True, 'import numpy as np\n'), ((1108, 1120), 'scipy.misc.factorial', 'factorial', (['n'], {}), '(n)\n', (1117, 1120), False, 'from scipy.misc import factorial\n'), ((6572, 6600), 'numpy.sum', 'np.sum', (['((S - S_reconst) ** 2)'], {}), '((S - S_reconst) ** 2)\n', (6578, 6600), True, 'import numpy as np\n'), ((6881, 6911), 'numpy.ones', 'np.ones', (['(3, 3, 3, S.shape[0])'], {}), '((3, 3, 3, S.shape[0]))\n', (6888, 6911), True, 'import numpy as np\n'), ((7633, 7661), 'numpy.sum', 'np.sum', (['((S - S_reconst) ** 2)'], {}), '((S - S_reconst) ** 2)\n', (7639, 7661), True, 'import numpy as np\n'), ((8132, 8160), 'numpy.sum', 'np.sum', (['((S - S_reconst) ** 2)'], {}), '((S - S_reconst) ** 2)\n', (8138, 8160), True, 'import numpy as np\n'), ((8683, 8711), 'numpy.sum', 'np.sum', (['((S - S_reconst) ** 2)'], {}), '((S - S_reconst) ** 2)\n', (8689, 8711), True, 'import numpy as np\n'), ((12578, 12598), 'numpy.diff', 'np.diff', (['error_array'], {}), '(error_array)\n', (12585, 12598), True, 'import numpy as np\n'), ((15088, 15119), 'numpy.sum', 'np.sum', (['((pdf_mt - pdf_map) ** 2)'], {}), '((pdf_mt - pdf_map) ** 2)\n', (15094, 15119), True, 'import numpy as np\n'), ((15659, 15684), 'numpy.sqrt', 'np.sqrt', (['(np.pi * l1 * tau)'], {}), '(np.pi * l1 * tau)\n', (15666, 15684), True, 'import numpy as np\n'), ((15767, 15792), 'numpy.sqrt', 'np.sqrt', (['(np.pi * l3 * tau)'], {}), '(np.pi * l3 * tau)\n', (15774, 15792), True, 'import numpy as np\n'), ((16965, 16990), 'numpy.sqrt', 'np.sqrt', (['(np.pi * l1 * tau)'], {}), '(np.pi * l1 * tau)\n', (16972, 16990), True, 'import numpy as np\n'), ((17073, 17098), 'numpy.sqrt', 'np.sqrt', (['(np.pi * l3 * tau)'], {}), '(np.pi * l3 * tau)\n', (17080, 17098), True, 'import numpy as np\n'), ((1131, 1149), 'scipy.special.gamma', 'gamma', (['(1 + n / 2.0)'], {}), '(1 + n / 2.0)\n', (1136, 1149), False, 'from scipy.special import gamma\n'), ((7156, 7193), 'numpy.sum', 'np.sum', (['((S - S_reconst[0, 0, 0]) ** 2)'], {}), '((S - S_reconst[0, 0, 0]) ** 2)\n', (7162, 7193), True, 'import numpy as np\n'), ((9288, 9316), 'numpy.sum', 'np.sum', (['((S - S_reconst) ** 2)'], {}), '((S - S_reconst) ** 2)\n', (9294, 9316), True, 'import numpy as np\n'), ((9869, 9897), 'numpy.sum', 'np.sum', (['((S - S_reconst) ** 2)'], {}), '((S - S_reconst) ** 2)\n', (9875, 9897), True, 'import numpy as np\n'), ((15720, 15745), 'numpy.sqrt', 'np.sqrt', (['(np.pi * l2 * tau)'], {}), '(np.pi * l2 * tau)\n', (15727, 15745), True, 'import numpy as np\n'), ((17026, 17051), 'numpy.sqrt', 'np.sqrt', (['(np.pi * l2 * tau)'], {}), '(np.pi * l2 * tau)\n', (17033, 17051), True, 'import numpy as np\n'), ((32654, 32662), 'dipy.reconst.odf.gfa', 'gfa', (['odf'], {}), '(odf)\n', (32657, 32662), False, 'from dipy.reconst.odf import gfa\n'), ((1216, 1228), 'scipy.misc.factorial', 'factorial', (['n'], {}), '(n)\n', (1225, 1228), False, 'from scipy.misc import factorial\n'), ((1811, 1850), 'dipy.reconst.mapmri.mapmri_phi_1d', 'mapmri.mapmri_phi_1d', (['(0)', 'x', 'diffusivity'], {}), '(0, x, diffusivity)\n', (1831, 1850), False, 'from dipy.reconst import dti, mapmri\n'), ((1888, 1927), 'dipy.reconst.mapmri.mapmri_phi_1d', 'mapmri.mapmri_phi_1d', (['(2)', 'x', 'diffusivity'], {}), '(2, x, diffusivity)\n', (1908, 1927), False, 'from dipy.reconst import dti, mapmri\n'), ((2041, 2080), 'dipy.reconst.mapmri.mapmri_phi_1d', 'mapmri.mapmri_phi_1d', (['(2)', 'x', 'diffusivity'], {}), '(2, x, diffusivity)\n', (2061, 2080), False, 'from dipy.reconst import dti, mapmri\n'), ((2118, 2157), 'dipy.reconst.mapmri.mapmri_phi_1d', 'mapmri.mapmri_phi_1d', (['(4)', 'x', 'diffusivity'], {}), '(4, x, diffusivity)\n', (2138, 2157), False, 'from dipy.reconst import dti, mapmri\n'), ((2271, 2310), 'dipy.reconst.mapmri.mapmri_phi_1d', 'mapmri.mapmri_phi_1d', (['(4)', 'x', 'diffusivity'], {}), '(4, x, diffusivity)\n', (2291, 2310), False, 'from dipy.reconst import dti, mapmri\n'), ((2348, 2387), 'dipy.reconst.mapmri.mapmri_phi_1d', 'mapmri.mapmri_phi_1d', (['(6)', 'x', 'diffusivity'], {}), '(6, x, diffusivity)\n', (2368, 2387), False, 'from dipy.reconst import dti, mapmri\n'), ((2501, 2540), 'dipy.reconst.mapmri.mapmri_phi_1d', 'mapmri.mapmri_phi_1d', (['(6)', 'x', 'diffusivity'], {}), '(6, x, diffusivity)\n', (2521, 2540), False, 'from dipy.reconst import dti, mapmri\n'), ((2578, 2617), 'dipy.reconst.mapmri.mapmri_phi_1d', 'mapmri.mapmri_phi_1d', (['(8)', 'x', 'diffusivity'], {}), '(8, x, diffusivity)\n', (2598, 2617), False, 'from dipy.reconst import dti, mapmri\n'), ((3458, 3523), 'dipy.reconst.mapmri.mapmri_isotropic_radial_signal_basis', 'mapmri.mapmri_isotropic_radial_signal_basis', (['(1)', '(0)', 'diffusivity', 'q'], {}), '(1, 0, diffusivity, q)\n', (3501, 3523), False, 'from dipy.reconst import dti, mapmri\n'), ((3583, 3648), 'dipy.reconst.mapmri.mapmri_isotropic_radial_signal_basis', 'mapmri.mapmri_isotropic_radial_signal_basis', (['(2)', '(0)', 'diffusivity', 'q'], {}), '(2, 0, diffusivity, q)\n', (3626, 3648), False, 'from dipy.reconst import dti, mapmri\n'), ((3767, 3832), 'dipy.reconst.mapmri.mapmri_isotropic_radial_signal_basis', 'mapmri.mapmri_isotropic_radial_signal_basis', (['(2)', '(0)', 'diffusivity', 'q'], {}), '(2, 0, diffusivity, q)\n', (3810, 3832), False, 'from dipy.reconst import dti, mapmri\n'), ((3892, 3957), 'dipy.reconst.mapmri.mapmri_isotropic_radial_signal_basis', 'mapmri.mapmri_isotropic_radial_signal_basis', (['(3)', '(0)', 'diffusivity', 'q'], {}), '(3, 0, diffusivity, q)\n', (3935, 3957), False, 'from dipy.reconst import dti, mapmri\n'), ((4076, 4141), 'dipy.reconst.mapmri.mapmri_isotropic_radial_signal_basis', 'mapmri.mapmri_isotropic_radial_signal_basis', (['(3)', '(0)', 'diffusivity', 'q'], {}), '(3, 0, diffusivity, q)\n', (4119, 4141), False, 'from dipy.reconst import dti, mapmri\n'), ((4201, 4266), 'dipy.reconst.mapmri.mapmri_isotropic_radial_signal_basis', 'mapmri.mapmri_isotropic_radial_signal_basis', (['(4)', '(0)', 'diffusivity', 'q'], {}), '(4, 0, diffusivity, q)\n', (4244, 4266), False, 'from dipy.reconst import dti, mapmri\n'), ((4385, 4450), 'dipy.reconst.mapmri.mapmri_isotropic_radial_signal_basis', 'mapmri.mapmri_isotropic_radial_signal_basis', (['(4)', '(0)', 'diffusivity', 'q'], {}), '(4, 0, diffusivity, q)\n', (4428, 4450), False, 'from dipy.reconst import dti, mapmri\n'), ((4510, 4575), 'dipy.reconst.mapmri.mapmri_isotropic_radial_signal_basis', 'mapmri.mapmri_isotropic_radial_signal_basis', (['(5)', '(0)', 'diffusivity', 'q'], {}), '(5, 0, diffusivity, q)\n', (4553, 4575), False, 'from dipy.reconst import dti, mapmri\n')] |
import os
import json
import numpy as np
import torch
import torch.nn.functional as F
import pickle
from fairseq.data import FairseqDataset, data_utils
from torch.utils.data.dataloader import default_collate
import sys
class GlyDataset(FairseqDataset):
def __init__(self, file,shuffle=False):
self.file = file
with open(self.file, 'rb') as inFH:
self.data = pickle.load(inFH)
self.keys=list(self.data.keys())
def __getitem__(self, index):
return torch.from_numpy(self.data[self.keys[index]][0]).long(),torch.from_numpy(self.data[self.keys[index]][1])
def __len__(self):
return len(self.data)
def num_tokens(self, index):
return self.data[self.keys[index]][0].shape[0]
def size(self, index):
return data[self.keys[index]][0].shape
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices
def collater(self, samples):
# num_objects = [features.shape[0] for features, _ in samples]
num_objects = [features.shape[0] for features,_ in samples]
max_objects = max(num_objects)
feature_samples_padded = []
label_samples_padded = []
for (features,label), n in zip(samples, num_objects):
features_padded = F.pad(features, pad=[0,0,0, max_objects-n], mode='constant', value=0)
feature_samples_padded.append(features_padded)
label_samples_padded.append(label)
return default_collate(feature_samples_padded),default_collate(label_samples_padded)
class GlyDataset_ml(FairseqDataset):
def __init__(self, file,shuffle=False):
self.file = file
with open(self.file, 'rb') as inFH:
self.data = pickle.load(inFH)
self.keys=list(self.data.keys())
def __getitem__(self, index):
tdata=torch.from_numpy(self.data[self.keys[index]][0]).long()
lilabel=torch.from_numpy(self.data[self.keys[index]][1])
lk0=torch.FloatTensor([int(i) for i in self.data[self.keys[index]][2][0] if i>-1]).unsqueeze(0)
lk1=torch.FloatTensor([int(i) for i in self.data[self.keys[index]][2][1] if i>-1]).unsqueeze(0)
lk2=torch.FloatTensor([int(i) for i in self.data[self.keys[index]][2][2] if i>-1]).unsqueeze(0)
lk3=torch.FloatTensor([int(i) for i in self.data[self.keys[index]][2][3] if i>-1]).unsqueeze(0)
lk4=torch.FloatTensor([int(i) for i in self.data[self.keys[index]][2][4] if i>-1]).unsqueeze(0)
lk5=torch.FloatTensor([int(i) for i in self.data[self.keys[index]][2][5] if i>-1]).unsqueeze(0)
tlk0 = torch.zeros(lk0.size(0), 4).scatter_(1, lk0.type(torch.int64), 1.)
tlk1 = torch.zeros(lk1.size(0), 6).scatter_(1, lk1.type(torch.int64), 1.)
tlk2 = torch.zeros(lk2.size(0), 28).scatter_(1, lk2.type(torch.int64), 1.)
tlk3 = torch.zeros(lk3.size(0), 84).scatter_(1, lk3.type(torch.int64), 1.)
tlk4 = torch.zeros(lk4.size(0), 206).scatter_(1, lk4.type(torch.int64), 1.)
tlk5 = torch.zeros(lk5.size(0), 408).scatter_(1, lk5.type(torch.int64), 1.)
binary_repr_v = np.vectorize(np.binary_repr)
cl=torch.from_numpy(np.array([list(b) for b in binary_repr_v(tdata[:,2],9)]).astype(np.int8))
return tdata,lilabel,tlk0,tlk1,tlk2,tlk3,tlk4,tlk5,cl
def __len__(self):
return len(self.data)
def num_tokens(self, index):
return self.data[self.keys[index]][0].shape[0]
def size(self, index):
return data[self.keys[index]][0].shape
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices
def collater(self, samples):
num_objects = [features.shape[0] for features,_,_,_,_,_,_,_,_ in samples]
max_objects = max(num_objects)
feature_samples_padded = []
label_samples_padded = []
l0=[]
l1=[]
l2=[]
l3=[]
l4=[]
l5=[]
cl=[]
for (features,label,ll0,ll1,ll2,ll3,ll4,ll5,cll), n in zip(samples, num_objects):
features_padded = F.pad(features, pad=[0,0,0, max_objects-n], mode='constant', value=0)
feature_samples_padded.append(features_padded)
label_samples_padded.append(label)
l0.append(ll0)
l1.append(ll1)
l2.append(ll2)
l3.append(ll3)
l4.append(ll4)
l5.append(ll5)
pcll= F.pad(cll, pad=[0,0,0, max_objects-n], mode='constant', value=0)
cl.append(pcll)
return default_collate(feature_samples_padded),default_collate(label_samples_padded),default_collate(l0),default_collate(l1),default_collate(l2),default_collate(l3),default_collate(l4),default_collate(l5),default_collate(cl)
class GlyDataset_cfg(FairseqDataset):
def __init__(self, file,shuffle=False):
self.file = file
with open('encoding_pickles/CFG_glycans_535_feb9.pickle', 'rb') as inFH:
self.data = pickle.load(inFH)
with open(self.file, 'rb') as inFH:
self.label = pickle.load(inFH)
self.keys=list(self.label.keys())
def __getitem__(self, index):
return torch.from_numpy(self.data[self.keys[index]][0]).long(),torch.from_numpy(self.label[self.keys[index]])
def __len__(self):
return len(self.label)
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices
def collater(self, samples):
num_objects = [features.shape[0] for features,_ in samples]
max_objects = max(num_objects)
feature_samples_padded = []
label_samples_padded = []
for (features,label), n in zip(samples, num_objects):
features_padded = F.pad(features, pad=[0,0,0, max_objects-n], mode='constant', value=0)
feature_samples_padded.append(features_padded)
label_samples_padded.append(label)
return default_collate(feature_samples_padded),default_collate(label_samples_padded)
class GlyDataset_cfg_cv(FairseqDataset):
def __init__(self, file,shuffle=False):
self.file = file
# with open('encoding_pickles/CFG_glycans_535_feb9.pickle', 'rb') as inFH:
with open('encoding_pickles/CFG_glycans_535_apr12.pickle', 'rb') as inFH:
self.data = pickle.load(inFH)
with open(self.file, 'rb') as inFH:
self.label = pickle.load(inFH)
self.keys=list(self.label.keys())
def __getitem__(self, index):
return torch.from_numpy(self.data[self.keys[index]][0]).long(),torch.from_numpy(np.asarray(self.label[self.keys[index]]))
def __len__(self):
return len(self.label)
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices
def collater(self, samples):
# num_objects = [features.shape[0] for features, _ in samples]
num_objects = [features.shape[0] for features,_ in samples]
max_objects = max(num_objects)
feature_samples_padded = []
label_samples_padded = []
for (features,label), n in zip(samples, num_objects):
features_padded = F.pad(features, pad=[0,0,0, max_objects-n], mode='constant', value=0)
feature_samples_padded.append(features_padded)
label_samples_padded.append(label)
return default_collate(feature_samples_padded),default_collate(label_samples_padded)
class GlyDataset_tax_tune(FairseqDataset):
def __init__(self, file,level=0,shuffle=False):
self.file = file
with open(self.file, 'rb') as inFH:
self.data = pickle.load(inFH)
self.keys=list(self.data.keys())
self.l=level
def __getitem__(self, index):
tdata=torch.from_numpy(self.data[self.keys[index]][0]).long()
lk0=torch.FloatTensor([int(i) for i in self.data[self.keys[index]][1] if i>-1]).unsqueeze(0)
ls=[4,3,28,84,206,408]
tlk0 = torch.zeros(lk0.size(0), ls[self.l]).scatter_(1, lk0.type(torch.int64), 1.)
return tdata,tlk0
def __len__(self):
return len(self.data)
def num_tokens(self, index):
return self.data[self.keys[index]][0].shape[0]
def size(self, index):
return data[self.keys[index]][0].shape
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices
def collater(self, samples):
num_objects = [features.shape[0] for features,_ in samples]
max_objects = max(num_objects)
feature_samples_padded = []
l0=[]
for (features,ll0), n in zip(samples, num_objects):
features_padded = F.pad(features, pad=[0,0,0, max_objects-n], mode='constant', value=0)
feature_samples_padded.append(features_padded)
l0.append(ll0)
return default_collate(feature_samples_padded),default_collate(l0)
class GlyDataset_tax_tune_single(FairseqDataset):
def __init__(self, file,level=0,shuffle=False):
self.file = file
with open(self.file, 'rb') as inFH:
self.data = pickle.load(inFH)
self.keys=list(self.data.keys())
self.l=level
def __getitem__(self, index):
tdata=torch.from_numpy(self.data[self.keys[index]][0]).long()
lk0=torch.FloatTensor([self.data[self.keys[index]][1][0]])
return tdata,lk0.long()
def __len__(self):
return len(self.data)
def num_tokens(self, index):
return self.data[self.keys[index]][0].shape[0]
def size(self, index):
return data[self.keys[index]][0].shape
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices
def collater(self, samples):
num_objects = [features.shape[0] for features,_ in samples]
max_objects = max(num_objects)
feature_samples_padded = []
l0=[]
for (features,ll0), n in zip(samples, num_objects):
features_padded = F.pad(features, pad=[0,0,0, max_objects-n], mode='constant', value=0)
feature_samples_padded.append(features_padded)
l0.append(ll0)
return default_collate(feature_samples_padded),default_collate(l0)
class GlyDataset_tax_tune_single_jcgg(FairseqDataset):
def __init__(self, file,level=0,shuffle=False):
self.file = file
with open(self.file, 'rb') as inFH:
self.data = pickle.load(inFH)
self.keys=list(self.data.keys())
self.l=level
def __getitem__(self, index):
tdata=torch.from_numpy(self.data[self.keys[index]][0]).long()
lk0=torch.FloatTensor([self.data[self.keys[index]][1]])
return tdata,lk0
def __len__(self):
return len(self.data)
def num_tokens(self, index):
return self.data[self.keys[index]][0].shape[0]
def size(self, index):
return data[self.keys[index]][0].shape
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices
def collater(self, samples):
num_objects = [features.shape[0] for features,_ in samples]
max_objects = max(num_objects)
feature_samples_padded = []
l0=[]
for (features,ll0), n in zip(samples, num_objects):
features_padded = F.pad(features, pad=[0,0,0, max_objects-n], mode='constant', value=0)
feature_samples_padded.append(features_padded)
l0.append(ll0)
return default_collate(feature_samples_padded),default_collate(l0) | [
"torch.utils.data.dataloader.default_collate",
"pickle.load",
"numpy.asarray",
"torch.from_numpy",
"torch.nn.functional.pad",
"numpy.vectorize",
"torch.FloatTensor"
] | [((2063, 2111), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][1]'], {}), '(self.data[self.keys[index]][1])\n', (2079, 2111), False, 'import torch\n'), ((3276, 3304), 'numpy.vectorize', 'np.vectorize', (['np.binary_repr'], {}), '(np.binary_repr)\n', (3288, 3304), True, 'import numpy as np\n'), ((10061, 10115), 'torch.FloatTensor', 'torch.FloatTensor', (['[self.data[self.keys[index]][1][0]]'], {}), '([self.data[self.keys[index]][1][0]])\n', (10078, 10115), False, 'import torch\n'), ((11556, 11607), 'torch.FloatTensor', 'torch.FloatTensor', (['[self.data[self.keys[index]][1]]'], {}), '([self.data[self.keys[index]][1]])\n', (11573, 11607), False, 'import torch\n'), ((394, 411), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (405, 411), False, 'import pickle\n'), ((576, 624), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][1]'], {}), '(self.data[self.keys[index]][1])\n', (592, 624), False, 'import torch\n'), ((1428, 1501), 'torch.nn.functional.pad', 'F.pad', (['features'], {'pad': '[0, 0, 0, max_objects - n]', 'mode': '"""constant"""', 'value': '(0)'}), "(features, pad=[0, 0, 0, max_objects - n], mode='constant', value=0)\n", (1433, 1501), True, 'import torch.nn.functional as F\n'), ((1620, 1659), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['feature_samples_padded'], {}), '(feature_samples_padded)\n', (1635, 1659), False, 'from torch.utils.data.dataloader import default_collate\n'), ((1660, 1697), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['label_samples_padded'], {}), '(label_samples_padded)\n', (1675, 1697), False, 'from torch.utils.data.dataloader import default_collate\n'), ((1883, 1900), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (1894, 1900), False, 'import pickle\n'), ((4350, 4423), 'torch.nn.functional.pad', 'F.pad', (['features'], {'pad': '[0, 0, 0, max_objects - n]', 'mode': '"""constant"""', 'value': '(0)'}), "(features, pad=[0, 0, 0, max_objects - n], mode='constant', value=0)\n", (4355, 4423), True, 'import torch.nn.functional as F\n'), ((4706, 4774), 'torch.nn.functional.pad', 'F.pad', (['cll'], {'pad': '[0, 0, 0, max_objects - n]', 'mode': '"""constant"""', 'value': '(0)'}), "(cll, pad=[0, 0, 0, max_objects - n], mode='constant', value=0)\n", (4711, 4774), True, 'import torch.nn.functional as F\n'), ((4827, 4866), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['feature_samples_padded'], {}), '(feature_samples_padded)\n', (4842, 4866), False, 'from torch.utils.data.dataloader import default_collate\n'), ((4867, 4904), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['label_samples_padded'], {}), '(label_samples_padded)\n', (4882, 4904), False, 'from torch.utils.data.dataloader import default_collate\n'), ((4905, 4924), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l0'], {}), '(l0)\n', (4920, 4924), False, 'from torch.utils.data.dataloader import default_collate\n'), ((4925, 4944), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l1'], {}), '(l1)\n', (4940, 4944), False, 'from torch.utils.data.dataloader import default_collate\n'), ((4945, 4964), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l2'], {}), '(l2)\n', (4960, 4964), False, 'from torch.utils.data.dataloader import default_collate\n'), ((4965, 4984), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l3'], {}), '(l3)\n', (4980, 4984), False, 'from torch.utils.data.dataloader import default_collate\n'), ((4985, 5004), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l4'], {}), '(l4)\n', (5000, 5004), False, 'from torch.utils.data.dataloader import default_collate\n'), ((5005, 5024), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l5'], {}), '(l5)\n', (5020, 5024), False, 'from torch.utils.data.dataloader import default_collate\n'), ((5025, 5044), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['cl'], {}), '(cl)\n', (5040, 5044), False, 'from torch.utils.data.dataloader import default_collate\n'), ((5276, 5293), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (5287, 5293), False, 'import pickle\n'), ((5376, 5393), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (5387, 5393), False, 'import pickle\n'), ((5570, 5616), 'torch.from_numpy', 'torch.from_numpy', (['self.label[self.keys[index]]'], {}), '(self.label[self.keys[index]])\n', (5586, 5616), False, 'import torch\n'), ((6182, 6255), 'torch.nn.functional.pad', 'F.pad', (['features'], {'pad': '[0, 0, 0, max_objects - n]', 'mode': '"""constant"""', 'value': '(0)'}), "(features, pad=[0, 0, 0, max_objects - n], mode='constant', value=0)\n", (6187, 6255), True, 'import torch.nn.functional as F\n'), ((6374, 6413), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['feature_samples_padded'], {}), '(feature_samples_padded)\n', (6389, 6413), False, 'from torch.utils.data.dataloader import default_collate\n'), ((6414, 6451), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['label_samples_padded'], {}), '(label_samples_padded)\n', (6429, 6451), False, 'from torch.utils.data.dataloader import default_collate\n'), ((6770, 6787), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (6781, 6787), False, 'import pickle\n'), ((6870, 6887), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (6881, 6887), False, 'import pickle\n'), ((7759, 7832), 'torch.nn.functional.pad', 'F.pad', (['features'], {'pad': '[0, 0, 0, max_objects - n]', 'mode': '"""constant"""', 'value': '(0)'}), "(features, pad=[0, 0, 0, max_objects - n], mode='constant', value=0)\n", (7764, 7832), True, 'import torch.nn.functional as F\n'), ((7951, 7990), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['feature_samples_padded'], {}), '(feature_samples_padded)\n', (7966, 7990), False, 'from torch.utils.data.dataloader import default_collate\n'), ((7991, 8028), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['label_samples_padded'], {}), '(label_samples_padded)\n', (8006, 8028), False, 'from torch.utils.data.dataloader import default_collate\n'), ((8222, 8239), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (8233, 8239), False, 'import pickle\n'), ((9402, 9475), 'torch.nn.functional.pad', 'F.pad', (['features'], {'pad': '[0, 0, 0, max_objects - n]', 'mode': '"""constant"""', 'value': '(0)'}), "(features, pad=[0, 0, 0, max_objects - n], mode='constant', value=0)\n", (9407, 9475), True, 'import torch.nn.functional as F\n'), ((9599, 9638), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['feature_samples_padded'], {}), '(feature_samples_padded)\n', (9614, 9638), False, 'from torch.utils.data.dataloader import default_collate\n'), ((9639, 9658), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l0'], {}), '(l0)\n', (9654, 9658), False, 'from torch.utils.data.dataloader import default_collate\n'), ((9864, 9881), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (9875, 9881), False, 'import pickle\n'), ((10892, 10965), 'torch.nn.functional.pad', 'F.pad', (['features'], {'pad': '[0, 0, 0, max_objects - n]', 'mode': '"""constant"""', 'value': '(0)'}), "(features, pad=[0, 0, 0, max_objects - n], mode='constant', value=0)\n", (10897, 10965), True, 'import torch.nn.functional as F\n'), ((11089, 11128), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['feature_samples_padded'], {}), '(feature_samples_padded)\n', (11104, 11128), False, 'from torch.utils.data.dataloader import default_collate\n'), ((11129, 11148), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l0'], {}), '(l0)\n', (11144, 11148), False, 'from torch.utils.data.dataloader import default_collate\n'), ((11359, 11376), 'pickle.load', 'pickle.load', (['inFH'], {}), '(inFH)\n', (11370, 11376), False, 'import pickle\n'), ((12377, 12450), 'torch.nn.functional.pad', 'F.pad', (['features'], {'pad': '[0, 0, 0, max_objects - n]', 'mode': '"""constant"""', 'value': '(0)'}), "(features, pad=[0, 0, 0, max_objects - n], mode='constant', value=0)\n", (12382, 12450), True, 'import torch.nn.functional as F\n'), ((12574, 12613), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['feature_samples_padded'], {}), '(feature_samples_padded)\n', (12589, 12613), False, 'from torch.utils.data.dataloader import default_collate\n'), ((12614, 12633), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['l0'], {}), '(l0)\n', (12629, 12633), False, 'from torch.utils.data.dataloader import default_collate\n'), ((1991, 2039), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][0]'], {}), '(self.data[self.keys[index]][0])\n', (2007, 2039), False, 'import torch\n'), ((7081, 7121), 'numpy.asarray', 'np.asarray', (['self.label[self.keys[index]]'], {}), '(self.label[self.keys[index]])\n', (7091, 7121), True, 'import numpy as np\n'), ((8351, 8399), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][0]'], {}), '(self.data[self.keys[index]][0])\n', (8367, 8399), False, 'import torch\n'), ((9993, 10041), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][0]'], {}), '(self.data[self.keys[index]][0])\n', (10009, 10041), False, 'import torch\n'), ((11488, 11536), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][0]'], {}), '(self.data[self.keys[index]][0])\n', (11504, 11536), False, 'import torch\n'), ((520, 568), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][0]'], {}), '(self.data[self.keys[index]][0])\n', (536, 568), False, 'import torch\n'), ((5514, 5562), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][0]'], {}), '(self.data[self.keys[index]][0])\n', (5530, 5562), False, 'import torch\n'), ((7008, 7056), 'torch.from_numpy', 'torch.from_numpy', (['self.data[self.keys[index]][0]'], {}), '(self.data[self.keys[index]][0])\n', (7024, 7056), False, 'import torch\n')] |
from unittest import TestCase
import qelos as q
from torch.autograd import Variable
import torch
from torch import nn
import numpy as np
class TestWordEmb(TestCase):
def test_creation_simple(self):
dic = dict(zip(map(chr, range(97, 122)), range(122-97)))
m = q.WordEmb(10, worddic=dic)
embedding, _ = m(Variable(torch.LongTensor([0,1,2])))
self.assertEqual(embedding.size(), (3, 10))
trueemb = m.weight.cpu().detach().numpy()[0]
self.assertTrue(np.allclose(trueemb, embedding[0].detach().numpy()))
def test_creation_masked(self):
dic = dict(zip(map(chr, range(97, 122)), range(1, 122-97+1)))
dic[q.WordEmb.masktoken] = 0
m = q.WordEmb(10, worddic=dic)
embedding, mask = m(Variable(torch.LongTensor([0, 1, 2])))
self.assertEqual(embedding.size(), (3, 10))
trueemb = m.weight.cpu().detach().numpy()[1]
self.assertTrue(np.allclose(trueemb, embedding[1].detach().numpy()))
self.assertTrue(np.allclose(embedding[0].detach().numpy(), np.zeros((10,))))
print(mask)
self.assertTrue(np.allclose(mask.detach().numpy(), [0,1,1]))
class TestGlove(TestCase):
def setUp(self):
path = "../data/glove/miniglove.50d"
self.path = path
self.glove = q.WordEmb.load_pretrained_path(path)
self.thevector = np.asarray([ 4.18000013e-01, 2.49679998e-01, -4.12420005e-01,
1.21699996e-01, 3.45270008e-01, -4.44569997e-02,
-4.96879995e-01, -1.78619996e-01, -6.60229998e-04,
-6.56599998e-01, 2.78430015e-01, -1.47670001e-01,
-5.56770027e-01, 1.46579996e-01, -9.50950012e-03,
1.16579998e-02, 1.02040000e-01, -1.27920002e-01,
-8.44299972e-01, -1.21809997e-01, -1.68009996e-02,
-3.32789987e-01, -1.55200005e-01, -2.31309995e-01,
-1.91809997e-01, -1.88230002e+00, -7.67459989e-01,
9.90509987e-02, -4.21249986e-01, -1.95260003e-01,
4.00710011e+00, -1.85939997e-01, -5.22870004e-01,
-3.16810012e-01, 5.92130003e-04, 7.44489999e-03,
1.77780002e-01, -1.58969998e-01, 1.20409997e-02,
-5.42230010e-02, -2.98709989e-01, -1.57490000e-01,
-3.47579986e-01, -4.56370004e-02, -4.42510009e-01,
1.87849998e-01, 2.78489990e-03, -1.84110001e-01,
-1.15139998e-01, -7.85809994e-01])
def test_loaded(self):
thevector = self.glove.weight[self.glove.D["the"]].detach().numpy()
self.assertEqual(self.glove.D["the"], 0)
print(np.linalg.norm(thevector - self.thevector))
self.assertTrue(np.allclose(thevector, self.thevector))
self.assertEqual(self.glove.weight.size(), (4000, 50))
def test_loaded_selection(self):
D = "<MASK> <RARE> cat dog person earlgreytea the".split()
D = dict(zip(D, range(len(D))))
glove = q.WordEmb.load_pretrained_path(self.path, selectD=D)
thevector = glove.weight[glove.D["the"]].detach().numpy()
print(glove.D["the"])
print(np.linalg.norm(thevector - self.thevector))
self.assertTrue(np.allclose(thevector, self.thevector))
print(glove.weight[0][:10])
class TestSwitchedWordEmb(TestCase):
def test_it(self):
D = "<MASK> <RARE> cat dog person earlgreytea the".split()
D = dict(zip(D, range(len(D))))
base = q.WordEmb(50, worddic=D)
switched = q.SwitchedWordEmb(base)
words = "cat dog person".split()
over = q.WordEmb(50, worddic=D)
switched.override(over, selectwords=words)
x = torch.arange(0, len(D)).unsqueeze(0)
y, ymask = switched(x)
ybase, _ = base(x)
yover, _ = over(x)
ymix = torch.tensor([0,0,1,1,1,0,0]).float().unsqueeze(0).unsqueeze(-1)
y_ref = ybase * (1 - ymix) + yover * ymix
print((y - y_ref).norm())
self.assertTrue(np.allclose(y.detach().numpy(), y_ref.detach().numpy()))
print(y.size())
def test_it_with_glove(self):
path = "../data/glove/miniglove.50d"
D = "<MASK> <RARE> cat dog person earlgreytea the".split()
D = dict(zip(D, range(len(D))))
base = q.WordEmb(50, worddic=D)
switched = q.SwitchedWordEmb(base)
words = "cat dog person".split()
over = q.WordEmb.load_pretrained_path(path, selectD=D)
switched.override(over, selectwords=words)
x = torch.arange(0, len(D)).unsqueeze(0)
y, ymask = switched(x)
ybase, _ = base(x)
yover, _ = over(x)
ymix = torch.tensor([0, 0, 0, 1, 1, 0, 0]).float().unsqueeze(0).unsqueeze(-1)
y_ref = ybase * (1 - ymix) + yover * ymix
print((y - y_ref).norm())
self.assertTrue(np.allclose(y.detach().numpy(), y_ref.detach().numpy()))
print(y.size())
| [
"numpy.allclose",
"torch.LongTensor",
"qelos.SwitchedWordEmb",
"numpy.asarray",
"qelos.WordEmb.load_pretrained_path",
"torch.tensor",
"qelos.WordEmb",
"numpy.zeros",
"numpy.linalg.norm"
] | [((281, 307), 'qelos.WordEmb', 'q.WordEmb', (['(10)'], {'worddic': 'dic'}), '(10, worddic=dic)\n', (290, 307), True, 'import qelos as q\n'), ((708, 734), 'qelos.WordEmb', 'q.WordEmb', (['(10)'], {'worddic': 'dic'}), '(10, worddic=dic)\n', (717, 734), True, 'import qelos as q\n'), ((1299, 1335), 'qelos.WordEmb.load_pretrained_path', 'q.WordEmb.load_pretrained_path', (['path'], {}), '(path)\n', (1329, 1335), True, 'import qelos as q\n'), ((1361, 2112), 'numpy.asarray', 'np.asarray', (['[0.418000013, 0.249679998, -0.412420005, 0.121699996, 0.345270008, -\n 0.0444569997, -0.496879995, -0.178619996, -0.000660229998, -0.656599998,\n 0.278430015, -0.147670001, -0.556770027, 0.146579996, -0.00950950012, \n 0.0116579998, 0.10204, -0.127920002, -0.844299972, -0.121809997, -\n 0.0168009996, -0.332789987, -0.155200005, -0.231309995, -0.191809997, -\n 1.88230002, -0.767459989, 0.0990509987, -0.421249986, -0.195260003, \n 4.00710011, -0.185939997, -0.522870004, -0.316810012, 0.000592130003, \n 0.00744489999, 0.177780002, -0.158969998, 0.0120409997, -0.054223001, -\n 0.298709989, -0.15749, -0.347579986, -0.0456370004, -0.442510009, \n 0.187849998, 0.0027848999, -0.184110001, -0.115139998, -0.785809994]'], {}), '([0.418000013, 0.249679998, -0.412420005, 0.121699996, \n 0.345270008, -0.0444569997, -0.496879995, -0.178619996, -0.000660229998,\n -0.656599998, 0.278430015, -0.147670001, -0.556770027, 0.146579996, -\n 0.00950950012, 0.0116579998, 0.10204, -0.127920002, -0.844299972, -\n 0.121809997, -0.0168009996, -0.332789987, -0.155200005, -0.231309995, -\n 0.191809997, -1.88230002, -0.767459989, 0.0990509987, -0.421249986, -\n 0.195260003, 4.00710011, -0.185939997, -0.522870004, -0.316810012, \n 0.000592130003, 0.00744489999, 0.177780002, -0.158969998, 0.0120409997,\n -0.054223001, -0.298709989, -0.15749, -0.347579986, -0.0456370004, -\n 0.442510009, 0.187849998, 0.0027848999, -0.184110001, -0.115139998, -\n 0.785809994])\n', (1371, 2112), True, 'import numpy as np\n'), ((2885, 2937), 'qelos.WordEmb.load_pretrained_path', 'q.WordEmb.load_pretrained_path', (['self.path'], {'selectD': 'D'}), '(self.path, selectD=D)\n', (2915, 2937), True, 'import qelos as q\n'), ((3376, 3400), 'qelos.WordEmb', 'q.WordEmb', (['(50)'], {'worddic': 'D'}), '(50, worddic=D)\n', (3385, 3400), True, 'import qelos as q\n'), ((3420, 3443), 'qelos.SwitchedWordEmb', 'q.SwitchedWordEmb', (['base'], {}), '(base)\n', (3437, 3443), True, 'import qelos as q\n'), ((3500, 3524), 'qelos.WordEmb', 'q.WordEmb', (['(50)'], {'worddic': 'D'}), '(50, worddic=D)\n', (3509, 3524), True, 'import qelos as q\n'), ((4183, 4207), 'qelos.WordEmb', 'q.WordEmb', (['(50)'], {'worddic': 'D'}), '(50, worddic=D)\n', (4192, 4207), True, 'import qelos as q\n'), ((4227, 4250), 'qelos.SwitchedWordEmb', 'q.SwitchedWordEmb', (['base'], {}), '(base)\n', (4244, 4250), True, 'import qelos as q\n'), ((4307, 4354), 'qelos.WordEmb.load_pretrained_path', 'q.WordEmb.load_pretrained_path', (['path'], {'selectD': 'D'}), '(path, selectD=D)\n', (4337, 4354), True, 'import qelos as q\n'), ((2553, 2595), 'numpy.linalg.norm', 'np.linalg.norm', (['(thevector - self.thevector)'], {}), '(thevector - self.thevector)\n', (2567, 2595), True, 'import numpy as np\n'), ((2621, 2659), 'numpy.allclose', 'np.allclose', (['thevector', 'self.thevector'], {}), '(thevector, self.thevector)\n', (2632, 2659), True, 'import numpy as np\n'), ((3048, 3090), 'numpy.linalg.norm', 'np.linalg.norm', (['(thevector - self.thevector)'], {}), '(thevector - self.thevector)\n', (3062, 3090), True, 'import numpy as np\n'), ((3116, 3154), 'numpy.allclose', 'np.allclose', (['thevector', 'self.thevector'], {}), '(thevector, self.thevector)\n', (3127, 3154), True, 'import numpy as np\n'), ((342, 369), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (358, 369), False, 'import torch\n'), ((772, 799), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (788, 799), False, 'import torch\n'), ((1051, 1066), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (1059, 1066), True, 'import numpy as np\n'), ((3727, 3762), 'torch.tensor', 'torch.tensor', (['[0, 0, 1, 1, 1, 0, 0]'], {}), '([0, 0, 1, 1, 1, 0, 0])\n', (3739, 3762), False, 'import torch\n'), ((4557, 4592), 'torch.tensor', 'torch.tensor', (['[0, 0, 0, 1, 1, 0, 0]'], {}), '([0, 0, 0, 1, 1, 0, 0])\n', (4569, 4592), False, 'import torch\n')] |
# -*- coding:utf-8 -*-
# &Author AnFany
from Heart_Data import model_data as H_Data
import numpy as np
#计算混淆矩阵
from prettytable import PrettyTable
def confusion(realy, outy):
mix = PrettyTable()
type = sorted(list(set(realy.T[0])), reverse=True)
mix.field_names = [' '] + ['预测:%d类'%si for si in type]
# 字典形式存储混淆矩阵数据
cmdict = {}
for jkj in type:
cmdict[jkj] = []
for hh in type:
hu = len(['0' for jj in range(len(realy)) if realy[jj][0] == jkj and outy[jj][0] == hh])
cmdict[jkj].append(hu)
# 输出表格
for fu in type:
mix.add_row(['真实:%d类'%fu] + cmdict[fu])
return mix
# 返回混淆矩阵用到的数据TP,TN,FP,FN
def getmatrix(realy, outy, possclass=1): # 默认类1 为正类
TP = len(['0' for jj in range(len(realy)) if realy[jj][0] == possclass and outy[jj][0] == possclass]) # 实际正预测正
TN = len(['0' for jj in range(len(realy)) if realy[jj][0] == 1 - possclass and outy[jj][0] == 1 - possclass]) # 实际负预测负
FP = len(['0' for jj in range(len(realy)) if realy[jj][0] == 1- possclass and outy[jj][0] == possclass]) # 实际负预测正
FN = len(['0' for jj in range(len(realy)) if realy[jj][0] == possclass and outy[jj][0] == 1 - possclass]) # 实际正预测负
# 假正率
FPR = FP / (FP + TN)
# 真正率
TPR = TP / (TP + FN)
return [FPR, TPR]
class LRReg:
def __init__(self, learn_rate=0.5, iter_times=40000, error=1e-9, cpn='L2'):
self.learn_rate = learn_rate
self.iter_times = iter_times
self.error = error
self.cpn = cpn
# w和b合为一个参数,也就是x最后加上一列全为1的数据。
def trans(self, xdata):
one1 = np.ones(len(xdata))
xta = np.append(xdata, one1.reshape(-1, 1), axis=1)
return xta
# 梯度下降法
def Gradient(self, xdata, ydata, func=trans):
xdata = func(self, xdata)
# 系数w,b的初始化
self.weights = np.zeros((len(xdata[0]), 1))
# 存储成本函数的值
cost_function = []
for i in range(self.iter_times):
# 得到回归的值
y_predict = np.dot(xdata, self.weights)
# Sigmoid函数的值
s_y_pre = 1/ (1 + np.exp(-y_predict))
# 计算最大似然的值
like = np.sum(np.dot(ydata.T, np.log(s_y_pre)) + np.dot((1 - ydata).T, np.log(1- s_y_pre)))
# 正则化
if self.cpn == 'L2':
# 成本函数中添加系数的L2范数
l2norm = np.sum(0.5 * np.dot(self.weights.T, self.weights) / len(xdata))
cost = -like / len(xdata) + l2norm
grad_W = np.dot(xdata.T, (s_y_pre - ydata)) / len(xdata) + 0.9 * self.weights / len(xdata)
else:
cost = -like / (len(xdata))
grad_W = np.dot(xdata.T, (s_y_pre - ydata)) / len(xdata)
cost_function.append(cost)
print(cost, like)
# 训练提前结束
if len(cost_function) > 2:
if 0 <= cost_function[-1] - cost_function[-2] <= self.error:
break
#更新
self.weights = self.weights - self.learn_rate * grad_W
return self.weights, cost_function
# 预测
def predict(self, xdata, func=trans, yuzhi=0.5):
pnum = np.dot(func(self, xdata), self.weights)
s_pnum = 1/ (1 + np.exp(-pnum))
latnum = [[1] if jj[0] >= yuzhi else [0] for jj in s_pnum]
return latnum
# 主函数
if __name__ == "__main__":
lr_re = LRReg()
lf = lr_re.Gradient(H_Data[0], H_Data[1])
print('系数为:\n', lr_re.weights)
# 绘制ROC曲线
# 从0到1定义不同的阈值
yuzi = np.linspace(0, 1, 101)
# ROC 曲线数据
roc = []
# 开始遍历不同的阈值
for yy in yuzi:
fdatd = lr_re.predict(H_Data[0], yuzhi=yy)
if yy == 0.5:
print('阈值为%s时的混淆矩阵:\n' % yy, confusion(H_Data[1], fdatd))
roc.append(getmatrix(H_Data[1], fdatd))
# 绘制ROC曲线图
# 首线是FPR按着从小到大排列
fu = np.array(sorted(roc, key=lambda x: x[0]))
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['Microsoft Yahei']
# 开始绘制ROC曲线图
fig, ax1 = plt.subplots()
ax1.plot(list(fu[:, 0]), list(fu[:, 1]), '.', linewidth=4, color='r')
ax1.plot([0, 1], '--', linewidth=4)
ax1.grid('on')
ax1.legend(['分类器模型', '随机判断模型'], loc='lower right', shadow=True, fontsize='medium')
ax1.annotate('完美分类器', xy=(0, 1), xytext=(0.2, 0.7), color='#FF4589', arrowprops=dict(facecolor='#FF67FF'))
ax1.set_title('ROC曲线', color='#123456')
ax1.set_xlabel('False Positive Rate(FPR,假正率)', color='#123456')
ax1.set_ylabel('True Positive Rate(TPR,真正率)', color='#123456')
# 绘制成本函数图
fig, ax2 = plt.subplots()
ax2.plot(list(range(len(lf[1]))), lf[1], '-', linewidth=5)
ax2.set_title('成本函数图')
ax2.set_ylabel('Cost 值')
ax2.set_xlabel('迭代次数')
plt.show()
| [
"prettytable.PrettyTable",
"numpy.log",
"numpy.exp",
"numpy.dot",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((200, 213), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (211, 213), False, 'from prettytable import PrettyTable\n'), ((3616, 3638), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(101)'], {}), '(0, 1, 101)\n', (3627, 3638), True, 'import numpy as np\n'), ((4171, 4185), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4183, 4185), True, 'import matplotlib.pyplot as plt\n'), ((4739, 4753), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4751, 4753), True, 'import matplotlib.pyplot as plt\n'), ((4909, 4919), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4917, 4919), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2097), 'numpy.dot', 'np.dot', (['xdata', 'self.weights'], {}), '(xdata, self.weights)\n', (2076, 2097), True, 'import numpy as np\n'), ((3313, 3326), 'numpy.exp', 'np.exp', (['(-pnum)'], {}), '(-pnum)\n', (3319, 3326), True, 'import numpy as np\n'), ((2158, 2176), 'numpy.exp', 'np.exp', (['(-y_predict)'], {}), '(-y_predict)\n', (2164, 2176), True, 'import numpy as np\n'), ((2742, 2774), 'numpy.dot', 'np.dot', (['xdata.T', '(s_y_pre - ydata)'], {}), '(xdata.T, s_y_pre - ydata)\n', (2748, 2774), True, 'import numpy as np\n'), ((2247, 2262), 'numpy.log', 'np.log', (['s_y_pre'], {}), '(s_y_pre)\n', (2253, 2262), True, 'import numpy as np\n'), ((2288, 2307), 'numpy.log', 'np.log', (['(1 - s_y_pre)'], {}), '(1 - s_y_pre)\n', (2294, 2307), True, 'import numpy as np\n'), ((2568, 2600), 'numpy.dot', 'np.dot', (['xdata.T', '(s_y_pre - ydata)'], {}), '(xdata.T, s_y_pre - ydata)\n', (2574, 2600), True, 'import numpy as np\n'), ((2437, 2473), 'numpy.dot', 'np.dot', (['self.weights.T', 'self.weights'], {}), '(self.weights.T, self.weights)\n', (2443, 2473), True, 'import numpy as np\n')] |
import os
import warnings
import numpy as np
from scipy.spatial import distance
from pycosmosac.molecule.cavity import Cavity
from pycosmosac.param import data
from pycosmosac.utils import elements
BOND_SCALING = 1.2
def get_connectivity(mol, geometry=None):
#TODO improve accuracy
if geometry is None:
geometry = mol.geometry
if not geometry:
raise RuntimeError("molecule not initialized.")
atoms = geometry["atom"]
xyz = geometry["xyz"]
d = distance.cdist(xyz, xyz)
natom = len(atoms)
connectivity = [[] for _ in range(natom)]
for i, atom_i in enumerate(atoms):
for j, atom_j in enumerate(atoms):
if i==j:
continue
l = BOND_SCALING * elements.covalent_bond(atom_i, atom_j)
if d[i,j] <= l:
connectivity[i].append(j)
if not connectivity[i]:
warnings.warn("atom (%s, %s) has no bonds." % (i+1, atom_i))
return connectivity
def _dfs(connectivity, iatm, color, traversalOrder, res, parent=None):
'''
depth-first search
'''
color[iatm] = 1
traversalOrder.append(iatm)
for jatm in connectivity[iatm]:
if color[jatm] == 0:
if len(connectivity[jatm]) < 2:
color[jatm] = 2
else:
_dfs(connectivity, jatm, color, traversalOrder, res, parent=iatm)
elif color[jatm] == 1:
if parent and parent != jatm:
cycle = []
lastatm_index = traversalOrder.index(iatm)
for index in range(lastatm_index, -1, -1):
katm = traversalOrder[index]
if katm == jatm:
break
else:
cycle.append(katm)
cycle.append(jatm)
res.append(cycle)
color[iatm] = 2
traversalOrder.pop()
def find_rings(mol, connectivity=None):
if connectivity is None: connectivity = mol.connectivity
natom = mol.natom
color = np.zeros((natom), dtype=int)
res = []
for i in range(natom):
if color[i] > 0:
continue
if len(connectivity[i]) < 2:
color[i] = 2
continue
traversalOrder = []
_dfs(connectivity, i, color, traversalOrder, res)
return res
def find_ring_atoms(mol, connectivity=None):
if connectivity is None: connectivity = mol.connectivity
res = find_rings(mol, connectivity)
return list(set().union(*res))
def classify_hydrogen_bonds(mol, geometry=None, connectivity=None):
if geometry is None: geometry = mol.geometry
if connectivity is None: connectivity = mol.connectivity
if not geometry or not connectivity:
raise RuntimeError("molecule not initialized.")
atoms = geometry["atom"]
hb_class = []
for i, atom_i in enumerate(atoms):
if atom_i in ['N', 'F']:
hb_class.append("OT")
elif atom_i in ['O', 'H']:
bond_type = 'NHB'
for j in connectivity[i]:
atom_j = atoms[j]
atom_ij = atom_i + atom_j
if atom_ij in ['OH', 'HO']:
bond_type = 'OH'
break
if atom_i == 'O':
bond_type = 'OT'
break
if atom_i == 'H' and atom_j in ['N', 'F']:
bond_type = 'OT'
break
hb_class.append(bond_type)
else:
hb_class.append('NHB')
return hb_class
def get_dispersion_type(mol, geometry=None, connectivity=None):
if geometry is None: geometry = mol.geometry
if connectivity is None: connectivity = mol.connectivity
atoms = geometry["atom"]
if len(atoms) == 3 and atoms.count("O") == 1 and atoms.count("H") == 2:
disp_tot = (data.disp["H(H2O)"] * 2 + data.disp["-O-"]) / 3.0
return disp_tot, "H2O"
disp_type = "NHB"
disp_tot = 0.0
natom = 0
nCOOH = 0
for i, atom_i in enumerate(atoms):
n = len(connectivity[i])
if atom_i == "C":
natom += 1
if n == 4:
disp_tot += data.disp["C(sp3)"]
elif n == 3:
disp_tot += data.disp["C(sp2)"]
atom_js = []
js = []
for j in connectivity[i]:
atom_js.append(atoms[j])
js.append(j)
if atom_js.count("O") == 2:
for j, atom_j in zip(js,atom_js):
if atom_j != "O":
continue
if len(connectivity[j]) == 2:
for k in connectivity[j]:
if atoms[k] == "H":
nCOOH += 1
disp_tot += data.disp["H(COOH)"]
disp_type = "COOH"
elif n == 2:
disp_tot += data.disp["C(sp)"]
elif atom_i == "N":
natom += 1
if n == 3:
disp_tot += data.disp["N(sp3)"]
elif n == 2:
disp_tot += data.disp["N(sp2)"]
elif n == 1:
disp_tot += data.disp["N(sp)"]
elif atom_i == "O":
natom += 1
if n == 2:
disp_tot += data.disp["-O-"]
elif n == 1:
disp_tot += data.disp["=O"]
elif atom_i == "F":
natom += 1
disp_tot += data.disp["F"]
elif atom_i == "Cl":
natom += 1
disp_tot += data.disp["Cl"]
elif atom_i == "Br":
natom += 1
disp_tot += data.disp["Br"]
elif atom_i == "I":
natom += 1
disp_tot += data.disp["I"]
elif atom_i == "P":
natom += 1
disp_tot += data.disp["P"]
elif atom_i == "S":
natom += 1
disp_tot += data.disp["S"]
elif atom_i == "H":
j = connectivity[i][0]
atom_j = atoms[j]
if atom_j == "O":
natom += 1
disp_tot += data.disp["H(OH)"]
elif atom_j == "N":
natom += 1
disp_tot += data.disp["H(NH)"]
else:
warnings.warn("dispersion parameter not available for %s" % atom_i)
disp_tot -= nCOOH * data.disp["H(OH)"]
disp_tot /= natom
return disp_tot, disp_type
def fromstring(string, format='xyz'):
format = format.lower()
if format == 'xyz':
dat = string.splitlines()
natm = int(dat[0])
return '\n'.join(dat[2:natm+2])
elif format == 'raw':
return string
else:
raise NotImplementedError
def fromfile(filename, format=None):
if format is None: # Guess format based on filename
format = os.path.splitext(filename)[1][1:].lower()
if format not in ('xyz', 'zmat', 'sdf', 'mol2'):
format = 'raw'
with open(filename, 'r') as f:
return fromstring(f.read(), format)
def read_geometry(xyz):
if os.path.isfile(xyz):
try:
xyz_raw = fromfile(xyz)
return raw_to_geometry(xyz_raw)
except:
raise ValueError('Failed to parse geometry file %s' % xyz)
else:
return raw_to_geometry(xyz)
def raw_to_geometry(xyz):
geometry = {}
geometry["atom"] = []
geometry["xyz"] = []
def str2atm(line):
dat = line.split()
assert(len(dat) == 4)
geometry["atom"].append(dat[0])
geometry["xyz"].append([float(x) for x in dat[1:4]])
if isinstance(xyz, str):
xyz = str(xyz.replace(';','\n').replace(',',' ').replace('\t',' '))
fmt_atoms = []
for dat in xyz.split('\n'):
dat = dat.strip()
if dat and dat[0] != '#':
fmt_atoms.append(dat)
for line in fmt_atoms:
str2atm(line)
geometry["xyz"] = np.asarray(geometry["xyz"])
else:
raise NotImplementedError
return geometry
def geometry_to_xyz(geometry, name="unknown"):
symb = geometry["atom"]
coord = geometry["xyz"]
natom = len(symb)
xyz = str(natom) + "\n"
xyz += name + "\n"
for i in range(natom):
xyz += symb[i] + " "
xyz += str(coord[i, 0]) + " "
xyz += str(coord[i, 1]) + " "
xyz += str(coord[i, 2]) + " "
xyz += "\n"
return xyz.strip()
class Mole():
'''
Class for molecular information
Attributes:
geometry : dict
Geometry information.
"xyz" : ndarray
"atom" : list
cavity : Cavity
Cavity information
connectivity : list
Connectivity information.
hb_class : list
Hydrogen bond classification.
'''
def __init__(self):
#{"atom" : [], "xyz" : ndarray(natom, 3)}
self.geometry = None
self.cavity = None
self.connectivity = None
self.hb_class = None
@property
def natom(self):
if self.geometry is None:
raise RuntimeError("molecule not initialized")
return len(self.geometry["atom"])
def build(self, geometry=None, cavity=None):
if geometry is not None:
if isinstance(geometry, str):
self.geometry = read_geometry(geometry)
elif isinstance(geometry, dict):
self.geometry = geometry
else:
raise ValueError("unsupported geometry input")
if cavity is not None: self.cavity = cavity
self.connectivity = self.get_connectivity()
self.hb_class = self.classify_hydrogen_bonds()
return self
get_connectivity = get_connectivity
classify_hydrogen_bonds = classify_hydrogen_bonds
get_dispersion_type = get_dispersion_type
find_rings = find_rings
find_ring_atoms = find_ring_atoms
if __name__ == "__main__":
from pycosmosac.utils.misc import fingerprint
geometry = {}
geometry["atom"] = ['O', 'H', 'H']
geometry["xyz"] = np.asarray([[ 0., 0., -0.405655705],
[ 0.770106178, 0., 0.202827852],
[-0.770106178, 0., 0.202827852]])
mol = Mole().build(geometry)
print(mol.connectivity == [[1, 2], [0], [0]])
print(mol.hb_class == ['OH','OH','OH'])
print(mol.get_dispersion_type()[0] - 70.75953333333332)
print(mol.get_dispersion_type()[1] == "H2O")
xyz = '''
N -2.86237 0.53549 -0.00680
C -1.59157 1.12789 -0.00460
C -0.65647 0.06499 -0.00640
N -1.36327 -1.15231 -0.01640
C -2.67117 -0.86471 -0.01510
C 0.72143 0.40079 -0.00170
N 1.06103 1.72159 -0.02520
C 0.08733 2.69019 -0.01940
N -1.23787 2.45869 -0.00720
H 0.42943 3.73339 -0.02780
N 1.75063 -0.53271 0.12520
H -3.73057 1.00639 -0.00350
H -3.47277 -1.60891 -0.01910
H 1.51683 -1.44251 -0.19520
H 2.65133 -0.22311 -0.15800
'''
mol = Mole().build(xyz)
print(mol.geometry["atom"] == ['N', 'C', 'C', 'N', 'C', 'C', 'N', 'C', 'N', 'H', 'N', 'H', 'H', 'H', 'H'])
print(fingerprint(mol.geometry["xyz"]) - -7.705571225872962)
print(mol.connectivity == [[1, 4, 11], [0, 2, 8], [1, 3, 5], [2, 4], [0, 3, 12], [2, 6, 10], [5, 7], [6, 8, 9], [1, 7], [7], [5, 13, 14], [0], [4], [10], [10]])
print(mol.find_ring_atoms() == [0, 1, 2, 3, 4, 5, 6, 7, 8])
print(mol.find_rings() == [[4, 3, 2, 1, 0], [8, 7, 6, 5, 2, 1]])
xyz = '''
C 0.78526 0.09180 -0.07290
C 0.46366 -1.44010 0.02370
C -0.58284 -1.14900 1.15480
C -0.26134 0.38280 1.05820
C -0.33764 0.25930 -1.15480
C -1.38414 0.55030 -0.02370
C -1.70564 -0.98160 0.07290
C -0.65914 -1.27260 -1.05820
H 1.78286 0.52170 -0.13130
H 1.20356 -2.23730 0.04260
H -0.68114 -1.71320 2.07970
H -0.10194 1.04580 1.90590
H -0.23944 0.82320 -2.07990
H -2.12424 1.34730 -0.04270
H -2.70324 -1.41140 0.13130
H -0.81854 -1.93550 -1.90580
'''
mol = Mole().build(xyz)
print(mol.find_ring_atoms() == [0, 1, 2, 3, 4, 5, 6, 7])
| [
"pycosmosac.utils.misc.fingerprint",
"scipy.spatial.distance.cdist",
"pycosmosac.utils.elements.covalent_bond",
"numpy.asarray",
"os.path.splitext",
"os.path.isfile",
"numpy.zeros",
"warnings.warn"
] | [((486, 510), 'scipy.spatial.distance.cdist', 'distance.cdist', (['xyz', 'xyz'], {}), '(xyz, xyz)\n', (500, 510), False, 'from scipy.spatial import distance\n'), ((2035, 2061), 'numpy.zeros', 'np.zeros', (['natom'], {'dtype': 'int'}), '(natom, dtype=int)\n', (2043, 2061), True, 'import numpy as np\n'), ((7158, 7177), 'os.path.isfile', 'os.path.isfile', (['xyz'], {}), '(xyz)\n', (7172, 7177), False, 'import os\n'), ((10178, 10288), 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, -0.405655705], [0.770106178, 0.0, 0.202827852], [-0.770106178, \n 0.0, 0.202827852]]'], {}), '([[0.0, 0.0, -0.405655705], [0.770106178, 0.0, 0.202827852], [-\n 0.770106178, 0.0, 0.202827852]])\n', (10188, 10288), True, 'import numpy as np\n'), ((8038, 8065), 'numpy.asarray', 'np.asarray', (["geometry['xyz']"], {}), "(geometry['xyz'])\n", (8048, 8065), True, 'import numpy as np\n'), ((893, 955), 'warnings.warn', 'warnings.warn', (["('atom (%s, %s) has no bonds.' % (i + 1, atom_i))"], {}), "('atom (%s, %s) has no bonds.' % (i + 1, atom_i))\n", (906, 955), False, 'import warnings\n'), ((11613, 11645), 'pycosmosac.utils.misc.fingerprint', 'fingerprint', (["mol.geometry['xyz']"], {}), "(mol.geometry['xyz'])\n", (11624, 11645), False, 'from pycosmosac.utils.misc import fingerprint\n'), ((740, 778), 'pycosmosac.utils.elements.covalent_bond', 'elements.covalent_bond', (['atom_i', 'atom_j'], {}), '(atom_i, atom_j)\n', (762, 778), False, 'from pycosmosac.utils import elements\n'), ((6921, 6947), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (6937, 6947), False, 'import os\n'), ((6359, 6426), 'warnings.warn', 'warnings.warn', (["('dispersion parameter not available for %s' % atom_i)"], {}), "('dispersion parameter not available for %s' % atom_i)\n", (6372, 6426), False, 'import warnings\n')] |
"""
Mask R-CNN
Configurations and data loading code for Davis 2017.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained DAVIS weights
python davis2017.py train --dataset=/path/to/davis/ --model=davis
# Train a new model starting from ImageNet weights
python davis2017.py train --dataset=/path/to/davis/ --model=imagenet
# Continue training a model that you had trained earlier
python davis2017.py train --dataset=/path/to/davis/ --model=/path/to/weights.h5
# Continue training the last model you trained
python davis2017.py train --dataset=/path/to/davis/ --model=last
# Run DAVIS evaluatoin on the last model you trained
python davis2017.py evaluate --dataset=/path/to/davis/ --model=last
"""
import os
import time
import numpy as np
import zipfile
# import urllib.request
from urllib2 import urlopen
import shutil
import random
from .config import Config
from . import utils
from . import model as modellib
import torch
import skimage
############################################################
# Argument
############################################################
# Root directory of the project
ROOT_DIR = os.getcwd()
# Path to trained weights file
DAVIS_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_davis.pth")
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.pth")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "out", "logs")
DEFAULT_DATASET_YEAR = "2017"
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=False,
default='/data1/shuangjiexu/data/DAVIS_2017',
metavar="/path/to/davis/",
help='Directory of the DAVIS dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=False,
default="coco",
metavar="/path/to/weights/",
help="Path to weights .pth file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=100,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--seq', required=False,
default='bike-packing',
metavar="<sequence class name>",
help='Sequence class name in DAVIS (default=bike-packing)')
parser.add_argument('--augment_method', required=False,
default='xu_val_augment_2500',
metavar="<augment methods name>",
help='Augment methods name in Matlab code (default=xu_val_augment_2500)')
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
print("Class: ", args.seq)
print("Limit: ", args.limit)
print("Augment method: ", args.augment_method)
############################################################
# Dataset
############################################################
# NEED DAVIS TOOL in https://github.com/fperazzi/davis-2017
from davis import cfg, phase, io, DAVISLoader, Annotation
an = Annotation(args.seq, single_object=0)
OBJ_NUMBER = an.n_objects
# Load dataset
db = DAVISLoader(year=args.year, phase=phase.TESTDEV)
AugmentImgPath = os.path.join(args.dataset, 'Augmentations', args.augment_method, 'JPEGImages', '480p', args.seq)
AugmentAnnoPath = os.path.join(args.dataset, 'Augmentations', args.augment_method, 'Annotations', '480p', args.seq)
# read all the file list
file_names = next(os.walk(AugmentImgPath))[2]
random.shuffle (file_names)
test_files = [x[:-4] for x in file_names[:args.limit]]
train_files = [x[:-4] for x in file_names[args.limit:]]
class DavisDataset(utils.Dataset):
def load_davis(self, subset):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, test)
year: What dataset year to load (2014, 2017) as a string, not an integer
"""
# Add classes
for i in range(1, OBJ_NUMBER):
self.add_class("davis", i, 'obj_'+str(i))
if subset == 'train':
image_list = train_files
else:
image_list = test_files
# Add images
# annotations is a mask of w*h with value [0,1,2,...]
for i in range(len(image_list)):
self.add_image(
"davis", image_id=i,
path=os.path.join(AugmentImgPath, image_list[i]+'.jpg'),
annotations=skimage.io.imread(os.path.join(AugmentAnnoPath, image_list[i]+'.png')))
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "davis":
return super(DavisDataset, self).load_mask(image_id)
img = skimage.io.imread(image_info["path"])
h,w = img.shape[:2]
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for class_id in np.unique(annotations):
if class_id == 0:
continue
mask = annotations.copy()
mask[mask!=class_id] = 0
mask[mask==class_id] = 1
# and end up rounded out. Skip those objects.
if mask.max() < 1:
continue
instance_masks.append(mask)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(DavisDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a path to the image in the DAVIS 2017 Augmentations."""
info = self.image_info[image_id]
if info["source"] == "coco":
return info["path"]
else:
super(DavisDataset, self).image_reference(image_id)
############################################################
# Configurations
############################################################
class DavisConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "davis"
# We use one GPU with 8GB memory, which can fit one image.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
GPU_COUNT = 1
# Number of classes (including background)
NUM_CLASSES = 1 + OBJ_NUMBER # COCO has 80 classes
STEPS_PER_EPOCH = 100
############################################################
# DAVIS Evaluation
############################################################
############################################################
# Training
############################################################
if __name__ == '__main__':
# Configurations
if args.command == "train":
config = DavisConfig()
else:
class InferenceConfig(DavisConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(config=config,
model_dir=args.logs)
if config.GPU_COUNT:
model = model.cuda()
# TODO: add model load methods for the different class
# Select weights file to load
if args.model:
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()[1]
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = config.IMAGENET_MODEL_PATH
else:
model_path = args.model
else:
model_path = ""
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, utils.state_modifier)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = DavisDataset()
dataset_train.load_davis("train")
dataset_train.prepare()
# Validation dataset
dataset_val = DavisDataset()
dataset_val.load_davis("val")
dataset_val.prepare()
# TODO: change epoches because we fine tune it
# lr and epoch number decrese 10
config.LEARNING_RATE = config.LEARNING_RATE / 10
# Training - Stage 1
print("Training network heads")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=20,
layers='heads')
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=60,
layers='4+')
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=80,
layers='all')
elif args.command == "evaluate":
# Validation dataset
dataset_val = DavisDataset()
# TODO: how to load the test dev image
davis = dataset_val.load_davis("minival")
dataset_val.prepare()
print("Running COCO evaluation on {} images.".format(args.limit))
print('Undo')
# evaluate_davis(model, dataset_val, davis, "bbox", limit=int(args.limit))
# evaluate_davis(model, dataset_val, davis, "segm", limit=int(args.limit))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command)) | [
"davis.Annotation",
"random.shuffle",
"argparse.ArgumentParser",
"numpy.unique",
"os.path.join",
"os.getcwd",
"davis.DAVISLoader",
"numpy.stack",
"skimage.io.imread",
"numpy.array",
"os.walk"
] | [((1406, 1417), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1415, 1417), False, 'import os\n'), ((1469, 1514), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_davis.pth"""'], {}), "(ROOT_DIR, 'mask_rcnn_davis.pth')\n", (1481, 1514), False, 'import os\n'), ((1533, 1577), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.pth"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.pth')\n", (1545, 1577), False, 'import os\n'), ((1705, 1742), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""out"""', '"""logs"""'], {}), "(ROOT_DIR, 'out', 'logs')\n", (1717, 1742), False, 'import os\n'), ((1832, 1899), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Mask R-CNN on MS COCO."""'}), "(description='Train Mask R-CNN on MS COCO.')\n", (1855, 1899), False, 'import argparse\n'), ((4100, 4137), 'davis.Annotation', 'Annotation', (['args.seq'], {'single_object': '(0)'}), '(args.seq, single_object=0)\n', (4110, 4137), False, 'from davis import cfg, phase, io, DAVISLoader, Annotation\n'), ((4185, 4233), 'davis.DAVISLoader', 'DAVISLoader', ([], {'year': 'args.year', 'phase': 'phase.TESTDEV'}), '(year=args.year, phase=phase.TESTDEV)\n', (4196, 4233), False, 'from davis import cfg, phase, io, DAVISLoader, Annotation\n'), ((4252, 4352), 'os.path.join', 'os.path.join', (['args.dataset', '"""Augmentations"""', 'args.augment_method', '"""JPEGImages"""', '"""480p"""', 'args.seq'], {}), "(args.dataset, 'Augmentations', args.augment_method,\n 'JPEGImages', '480p', args.seq)\n", (4264, 4352), False, 'import os\n'), ((4367, 4468), 'os.path.join', 'os.path.join', (['args.dataset', '"""Augmentations"""', 'args.augment_method', '"""Annotations"""', '"""480p"""', 'args.seq'], {}), "(args.dataset, 'Augmentations', args.augment_method,\n 'Annotations', '480p', args.seq)\n", (4379, 4468), False, 'import os\n'), ((4536, 4562), 'random.shuffle', 'random.shuffle', (['file_names'], {}), '(file_names)\n', (4550, 4562), False, 'import random\n'), ((4508, 4531), 'os.walk', 'os.walk', (['AugmentImgPath'], {}), '(AugmentImgPath)\n', (4515, 4531), False, 'import os\n'), ((6298, 6335), 'skimage.io.imread', 'skimage.io.imread', (["image_info['path']"], {}), "(image_info['path'])\n", (6315, 6335), False, 'import skimage\n'), ((6642, 6664), 'numpy.unique', 'np.unique', (['annotations'], {}), '(annotations)\n', (6651, 6664), True, 'import numpy as np\n'), ((7114, 7146), 'numpy.stack', 'np.stack', (['instance_masks'], {'axis': '(2)'}), '(instance_masks, axis=2)\n', (7122, 7146), True, 'import numpy as np\n'), ((7171, 7206), 'numpy.array', 'np.array', (['class_ids'], {'dtype': 'np.int32'}), '(class_ids, dtype=np.int32)\n', (7179, 7206), True, 'import numpy as np\n'), ((5432, 5484), 'os.path.join', 'os.path.join', (['AugmentImgPath', "(image_list[i] + '.jpg')"], {}), "(AugmentImgPath, image_list[i] + '.jpg')\n", (5444, 5484), False, 'import os\n'), ((5530, 5583), 'os.path.join', 'os.path.join', (['AugmentAnnoPath', "(image_list[i] + '.png')"], {}), "(AugmentAnnoPath, image_list[i] + '.png')\n", (5542, 5583), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import moving_averages
#
# 用于最中执行batch normalization的函数
# tf.nn.batch_normalization(
# x,
# mean,
# variance,
# offset,
# scale,
# variance_epsilon,
# name=None
# )
#
# 参数:
# x是input输入样本
# mean是样本均值
# variance是样本方差
# offset是样本偏移(相加一个转化值)
# scale是缩放(默认为1)
# variance_epsilon是为了避免分母为0,添加的一个极小值
# 输出的计算公式为:
# y = scale * (x - mean) / var + offset
#
# -------------------------------------------------------
# def moments(
# x,
# axes,
# shift=None, # pylint: disable=unused-argument
# name=None,
# keep_dims=False):
#
# 参数:
# x:一个tensor张量,即我们的输入数据
# axes:一个int型数组,它用来指定我们计算均值和方差的轴(这里不好理解,可以结合下面的例子)
# shift:当前实现中并没有用到
# name:用作计算moment操作的名称
# keep_dims:输出和输入是否保持相同的维度
#
# 返回:
# 两个tensor张量:均值和方差
def mean_var2tensor(input_variable):
v_shape = input_variable.get_shape()
axis = [len(v_shape) - 1]
v_mean, v_var = tf.nn.moments(input_variable, axes=axis, keep_dims=True)
return v_mean, v_var
def mean_var2numpy(input_variable):
v_shape = input_variable.get_shape()
axis = [len(v_shape) - 1]
v_mean, v_var = tf.nn.moments(input_variable, axes=axis, keep_dims=True)
return v_mean, v_var
def my_batch_normalization(input_x, is_training=True, name='BatchNorm', moving_decay=0.9):
# Batch Normalize
x_shape = input_x.get_shape()
axis = [len(x_shape) - 1]
with tf.variable_scope(name):
x_mean, x_var = tf.nn.moments(input_x, axes=axis, name='moments', keep_dims=True)
scale = tf.constant(0.1) # 所有的batch 使用同一个scale因子
shift = tf.constant(0.001) # 所有的batch 使用同一个shift项
epsilon = 0.0001
# 采用滑动平均更新均值与方差
ema = tf.train.ExponentialMovingAverage(moving_decay)
def mean_var_with_update():
ema_apply_op = ema.apply([x_mean, x_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(x_mean), tf.identity(x_var)
# 训练时,更新均值与方差,测试时使用之前最后一次保存的均值与方差
x_mean, x_var = tf.cond(tf.equal(is_training, True), mean_var_with_update,
lambda: (ema.average(x_mean), ema.average(x_var)))
out_x = tf.nn.batch_normalization(input_x, x_mean, x_var, shift, scale, epsilon)
return out_x
def my_bn(input_x, is_training=True, name='BatchNorm', moving_decay=0.9):
# Batch Normalize
x_shape = input_x.get_shape()
axis = [len(x_shape) - 1]
with tf.variable_scope(name):
x_mean, x_var = tf.nn.moments(input_x, axes=axis, name='moments', keep_dims=True)
scale = tf.constant(0.1) # 所有的batch 使用同一个scale因子
shift = tf.constant(0.001) # 所有的batch 使用同一个shift项
epsilon = 0.0001
out_x = tf.nn.batch_normalization(input_x, x_mean, x_var, shift, scale, epsilon)
return out_x
# ---------------------------------------------- my activations -----------------------------------------------
def mysin(x):
return tf.sin(2*np.pi*x)
def srelu(x):
return tf.nn.relu(1-x)*tf.nn.relu(x)
def s2relu(x):
return tf.nn.relu(1-x)*tf.nn.relu(x)*tf.sin(2*np.pi*x)
def powsin_srelu(x):
return tf.nn.relu(1-x)*tf.nn.relu(x)*tf.sin(2*np.pi*x)*tf.sin(2*np.pi*x)
def sin2_srelu(x):
return 2.0*tf.nn.relu(1-x)*tf.nn.relu(x)*tf.sin(4*np.pi*x)*tf.sin(2*np.pi*x)
def slrelu(x):
return tf.nn.leaky_relu(1-x)*tf.nn.leaky_relu(x)
def pow2relu(x):
return tf.nn.relu(1-x)*tf.nn.relu(x)*tf.nn.relu(x)
def selu(x):
return tf.nn.elu(1-x)*tf.nn.elu(x)
def wave(x):
return tf.nn.relu(x) - 2*tf.nn.relu(x-1/4) + \
2*tf.nn.relu(x-3/4) - tf.nn.relu(x-1)
def phi(x):
return tf.nn.relu(x) * tf.nn.relu(x)-3*tf.nn.relu(x-1)*tf.nn.relu(x-1) + 3*tf.nn.relu(x-2)*tf.nn.relu(x-2) \
- tf.nn.relu(x-3)*tf.nn.relu(x-3)*tf.nn.relu(x-3)
# ------------------------------------------------ 初始化权重和偏置 --------------------------------------------
# 生成DNN的权重和偏置
# tf.random_normal(): 用于从服从指定正太分布的数值中取出随机数
# tf.random_normal(shape,mean=0.0,stddev=1.0,dtype=tf.float32,seed=None,name=None)
# hape: 输出张量的形状,必选.--- mean: 正态分布的均值,默认为0.----stddev: 正态分布的标准差,默认为1.0
# dtype: 输出的类型,默认为tf.float32 ----seed: 随机数种子,是一个整数,当设置之后,每次生成的随机数都一样---name: 操作的名称
def Initial_DNN2different_hidden(in_size, out_size, hidden_layers, Flag):
n_hiddens = len(hidden_layers)
Weights = [] # 权重列表,用于存储隐藏层的权重
Biases = [] # 偏置列表,用于存储隐藏层的偏置
# 隐藏层:第一层的权重和偏置,对输入数据做变换
W = tf.Variable(0.1 * tf.random.normal([in_size, hidden_layers[0]]), dtype='float32',
name='W_transInput' + str(Flag))
B = tf.Variable(0.1 * tf.random.uniform([1, hidden_layers[0]]), dtype='float32',
name='B_transInput' + str(Flag))
Weights.append(W)
Biases.append(B)
# 隐藏层:第二至倒数第二层的权重和偏置
for i_layer in range(n_hiddens - 1):
W = tf.Variable(0.1 * tf.random.normal([hidden_layers[i_layer], hidden_layers[i_layer+1]]), dtype='float32',
name='W_hidden' + str(i_layer + 1) + str(Flag))
B = tf.Variable(0.1 * tf.random.uniform([1, hidden_layers[i_layer+1]]), dtype='float32',
name='B_hidden' + str(i_layer + 1) + str(Flag))
Weights.append(W)
Biases.append(B)
# 输出层:最后一层的权重和偏置。将最后的结果变换到输出维度
W = tf.Variable(0.1 * tf.random.normal([hidden_layers[-1], out_size]), dtype='float32',
name='W_outTrans' + str(Flag))
B = tf.Variable(0.1 * tf.random.uniform([1, out_size]), dtype='float32',
name='B_outTrans' + str(Flag))
Weights.append(W)
Biases.append(B)
return Weights, Biases
# tf.truncated_normal(shape, mean, stddev) :shape表示生成张量的维度,mean是均值,stddev是标准差。这个函数产生正太分布,
# 均值和标准差自己设定。这是一个截断的产生正太分布的函数,就是说产生正太分布的值如果与均值的差值大于两倍的标准差,
# 那就重新生成。和一般的正太分布的产生随机数据比起来,这个函数产生的随机数与均值的差距不会超过两倍的标准差,但是一般的别的函数是可能的。
# truncated_normal(
# shape,
# mean=0.0,
# stddev=1.0,
# dtype=tf.float32,
# seed=None,
# name=None)
def truncated_normal_init(in_dim, out_dim, scale_coef=1.0, weight_name='weight'):
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
# 尺度因子防止初始化的数值太小或者太大
V = tf.Variable(scale_coef*tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32, name=weight_name)
return V
# tf.random_uniform()
# 默认是在 0 到 1 之间产生随机数,也可以通过 minval 和 maxval 指定上下界
def uniform_init(in_dim, out_dim, weight_name='weight'):
V = tf.Variable(tf.random_uniform([in_dim, out_dim], dtype=tf.float32), dtype=tf.float32, name=weight_name)
return V
# tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
# 从正态分布中输出随机值。
# 参数:
# shape: 一维的张量,也是输出的张量。
# mean: 正态分布的均值。
# stddev: 正态分布的标准差。
# dtype: 输出的类型。
# seed: 一个整数,当设置之后,每次生成的随机数都一样。
# name: 操作的名字。
def normal_init(in_dim, out_dim, scale_coef=1.0, weight_name='weight'):
stddev2normal = np.sqrt(2.0/(in_dim + out_dim))
# 尺度因子防止初始化的数值太小或者太大
V = tf.Variable(scale_coef*tf.random_normal([in_dim, out_dim], mean=0, stddev=stddev2normal, dtype=tf.float32),
dtype=tf.float32, name=weight_name)
return V
# tf.zeros(
# shape,
# dtype=tf.float32,
# name=None
# )
# shape代表形状,也就是1纬的还是2纬的还是n纬的数组
def zeros_init(in_dim, out_dim, weight_name='weight'):
V = tf.Variable(tf.zeros([in_dim, out_dim], dtype=tf.float32), dtype=tf.float32, name=weight_name)
return V
def initialize_NN_xavier(in_size, out_size, hidden_layers, Flag):
with tf.variable_scope('WB_scope', reuse=tf.AUTO_REUSE):
scale = 5.0
n_hiddens = len(hidden_layers)
Weights = [] # 权重列表,用于存储隐藏层的权重
Biases = [] # 偏置列表,用于存储隐藏层的偏置
# 隐藏层:第一层的权重和偏置,对输入数据做变换
W = truncated_normal_init(in_size, hidden_layers[0], scale_coef=scale, weight_name='W-transInput' + str(Flag))
B = uniform_init(1, hidden_layers[0], weight_name='B-transInput' + str(Flag))
Weights.append(W)
Biases.append(B)
for i_layer in range(0, n_hiddens - 1):
W = truncated_normal_init(hidden_layers[i_layer], hidden_layers[i_layer + 1], scale_coef=scale,
weight_name='W-hidden' + str(i_layer + 1) + str(Flag))
B = uniform_init(1, hidden_layers[i_layer + 1], weight_name='B-hidden' + str(i_layer + 1) + str(Flag))
Weights.append(W)
Biases.append(B)
# 输出层:最后一层的权重和偏置。将最后的结果变换到输出维度
W = truncated_normal_init(hidden_layers[-1], out_size, scale_coef=scale, weight_name='W-outTrans' + str(Flag))
B = uniform_init(1, out_size, weight_name='B-outTrans' + str(Flag))
Weights.append(W)
Biases.append(B)
return Weights, Biases
def initialize_NN_random_normal(in_size, out_size, hidden_layers, Flag, varcoe=0.5):
with tf.variable_scope('WB_scope', reuse=tf.AUTO_REUSE):
n_hiddens = len(hidden_layers)
Weights = [] # 权重列表,用于存储隐藏层的权重
Biases = [] # 偏置列表,用于存储隐藏层的偏置
# 隐藏层:第一层的权重和偏置,对输入数据做变换
stddev_WB = (2.0 / (in_size + hidden_layers[0])) ** varcoe
W = tf.get_variable(name='W-transInput' + str(Flag), shape=(in_size, hidden_layers[0]),
initializer=tf.random_normal_initializer(stddev=stddev_WB),
dtype=tf.float32)
B = tf.get_variable(name='B-transInput' + str(Flag), shape=(1, hidden_layers[0]),
initializer=tf.random_normal_initializer(stddev=stddev_WB),
dtype=tf.float32)
Weights.append(W)
Biases.append(B)
for i_layer in range(0, n_hiddens - 1):
stddev_WB = (2.0 / (hidden_layers[i_layer] + hidden_layers[i_layer + 1])) ** varcoe
W = tf.get_variable(
name='W' + str(i_layer + 1) + str(Flag), shape=(hidden_layers[i_layer], hidden_layers[i_layer + 1]),
initializer=tf.random_normal_initializer(stddev=stddev_WB), dtype=tf.float32)
B = tf.get_variable(
name='B' + str(i_layer + 1) + str(Flag), shape=(1, hidden_layers[i_layer + 1]),
initializer=tf.random_normal_initializer(stddev=stddev_WB), dtype=tf.float32)
Weights.append(W)
Biases.append(B)
# 输出层:最后一层的权重和偏置。将最后的结果变换到输出维度
stddev_WB = (2.0 / (hidden_layers[-1] + out_size)) ** varcoe
W = tf.get_variable(
name='W-outTrans' + str(Flag), shape=(hidden_layers[-1], out_size),
initializer=tf.random_normal_initializer(stddev=stddev_WB), dtype=tf.float32)
B = tf.get_variable(
name='B-outTrans' + str(Flag), shape=(1, out_size),
initializer=tf.random_normal_initializer(stddev=stddev_WB), dtype=tf.float32)
Weights.append(W)
Biases.append(B)
return Weights, Biases
def initialize_NN_random_normal2(in_size, out_size, hidden_layers, Flag, varcoe=0.5):
with tf.variable_scope('WB_scope', reuse=tf.AUTO_REUSE):
n_hiddens = len(hidden_layers)
Weights = [] # 权重列表,用于存储隐藏层的权重
Biases = [] # 偏置列表,用于存储隐藏层的偏置
# 隐藏层:第一层的权重和偏置,对输入数据做变换
stddev_WB = (2.0 / (in_size + hidden_layers[0])) ** varcoe
W = tf.get_variable(name='W-transInput' + str(Flag), shape=(in_size, hidden_layers[0]),
initializer=tf.random_normal_initializer(stddev=stddev_WB),
dtype=tf.float32)
B = tf.get_variable(name='B-transInput' + str(Flag), shape=(hidden_layers[0],),
initializer=tf.random_normal_initializer(stddev=stddev_WB),
dtype=tf.float32)
Weights.append(W)
Biases.append(B)
for i_layer in range(0, n_hiddens - 1):
stddev_WB = (2.0 / (hidden_layers[i_layer] + hidden_layers[i_layer + 1])) ** varcoe
W = tf.get_variable(
name='W' + str(i_layer + 1) + str(Flag), shape=(hidden_layers[i_layer], hidden_layers[i_layer + 1]),
initializer=tf.random_normal_initializer(stddev=stddev_WB), dtype=tf.float32)
B = tf.get_variable(name='B' + str(i_layer + 1) + str(Flag), shape=(hidden_layers[i_layer + 1],),
initializer=tf.random_normal_initializer(stddev=stddev_WB), dtype=tf.float32)
Weights.append(W)
Biases.append(B)
# 输出层:最后一层的权重和偏置。将最后的结果变换到输出维度
stddev_WB = (2.0 / (hidden_layers[-1] + out_size)) ** varcoe
W = tf.get_variable(name='W-outTrans' + str(Flag), shape=(hidden_layers[-1], out_size),
initializer=tf.random_normal_initializer(stddev=stddev_WB), dtype=tf.float32)
B = tf.get_variable(name='B-outTrans' + str(Flag), shape=(out_size,),
initializer=tf.random_normal_initializer(stddev=stddev_WB), dtype=tf.float32)
Weights.append(W)
Biases.append(B)
return Weights, Biases
# ----------------------------------- 正则化 -----------------------------------------------
def regular_weights_biases_L1(weights, biases):
# L1正则化权重和偏置
layers = len(weights)
regular_w = 0
regular_b = 0
for i_layer1 in range(layers):
regular_w = regular_w + tf.reduce_sum(tf.abs(weights[i_layer1]), keep_dims=False)
regular_b = regular_b + tf.reduce_sum(tf.abs(biases[i_layer1]), keep_dims=False)
return regular_w + regular_b
# L2正则化权重和偏置
def regular_weights_biases_L2(weights, biases):
layers = len(weights)
regular_w = 0
regular_b = 0
for i_layer1 in range(layers):
regular_w = regular_w + tf.reduce_sum(tf.square(weights[i_layer1]), keep_dims=False)
regular_b = regular_b + tf.reduce_sum(tf.square(biases[i_layer1]), keep_dims=False)
return regular_w + regular_b
# -------------------------------------------- 网络模型 ------------------------------------------------------
def PDE_DNN(variable_input, Weights, Biases, hiddens, activate_name=None):
if activate_name == 'relu':
DNN_activation = tf.nn.relu
elif activate_name == 'leaky_relu':
DNN_activation = tf.nn.leaky_relu(0.2)
elif activate_name == 'elu':
DNN_activation = tf.nn.elu
elif activate_name == 'tanh':
DNN_activation = tf.nn.tanh
elif activate_name == 'sin':
DNN_activation = mysin
elif activate_name == 'srelu':
DNN_activation = srelu
elif activate_name == 'powsin_srelu':
DNN_activation = powsin_srelu
elif activate_name == 's2relu':
DNN_activation = s2relu
elif activate_name == 'sin2_srelu':
DNN_activation = sin2_srelu
elif activate_name == 'slrelu':
DNN_activation = slrelu
elif activate_name == 'selu':
DNN_activation = selu
elif activate_name == 'phi':
DNN_activation = phi
layers = len(Weights) # 得到输入到输出的层数,即隐藏层层数
H = variable_input # 代表输入数据,即输入层
hidden_record = 0
for k in range(layers-1):
H_pre = H
W = Weights[k]
B = Biases[k]
H = DNN_activation(tf.add(tf.matmul(H, W), B))
if hiddens[k] == hidden_record:
H = H+H_pre
hidden_record = hiddens[k]
W_out = Weights[-1]
B_out = Biases[-1]
output = tf.add(tf.matmul(H, W_out), B_out)
# 下面这个是输出层
# output = tf.nn.tanh(output)
return output
def PDE_DNN_BN(variable_input, Weights, Biases, hiddens, activate_name=None, is_training=None):
if activate_name == 'relu':
DNN_activation = tf.nn.relu
elif activate_name == 'leaky_relu':
DNN_activation = tf.nn.leaky_relu(0.2)
elif activate_name == 'elu':
DNN_activation = tf.nn.elu
elif activate_name == 'tanh':
DNN_activation = tf.nn.tanh
elif activate_name == 'sin':
DNN_activation = mysin
elif activate_name == 'srelu':
DNN_activation = srelu
elif activate_name == 'powsin_srelu':
DNN_activation = powsin_srelu
elif activate_name == 's2relu':
DNN_activation = s2relu
elif activate_name == 'sin2_srelu':
DNN_activation = sin2_srelu
elif activate_name == 'slrelu':
DNN_activation = slrelu
elif activate_name == 'selu':
DNN_activation = selu
elif activate_name == 'phi':
DNN_activation = phi
layers = len(Weights) # 得到输入到输出的层数,即隐藏层层数
H = variable_input # 代表输入数据,即输入层
hidden_record = 0
for k in range(layers-1):
H_pre = H
W = Weights[k]
B = Biases[k]
H = DNN_activation(tf.add(tf.matmul(H, W), B))
if hiddens[k] == hidden_record:
H = H+H_pre
H = my_bn(H, is_training)
hidden_record = hiddens[k]
W_out = Weights[-1]
B_out = Biases[-1]
output = tf.add(tf.matmul(H, W_out), B_out)
# 下面这个是输出层
# output = tf.nn.tanh(output)
return output
def PDE_DNN_scale(variable_input, Weights, Biases, hiddens, freq_frag, activate_name=None):
if activate_name == 'relu':
DNN_activation = tf.nn.relu
elif activate_name == 'leaky_relu':
DNN_activation = tf.nn.leaky_relu(0.2)
elif activate_name == 'elu':
DNN_activation = tf.nn.elu
elif activate_name == 'tanh':
DNN_activation = tf.nn.tanh
elif activate_name == 'sin':
DNN_activation = mysin
elif activate_name == 'srelu':
DNN_activation = srelu
elif activate_name == 'powsin_srelu':
DNN_activation = powsin_srelu
elif activate_name == 's2relu':
DNN_activation = s2relu
elif activate_name == 'sin2_srelu':
DNN_activation = sin2_srelu
elif activate_name == 'slrelu':
DNN_activation = slrelu
elif activate_name == 'selu':
DNN_activation = selu
elif activate_name == 'phi':
DNN_activation = phi
Unit_num = int(hiddens[0] / len(freq_frag))
# np.repeat(a, repeats, axis=None)
# 输入: a是数组,repeats是各个元素重复的次数(repeats一般是个标量,稍复杂点是个list),在axis的方向上进行重复
# 返回: 如果不指定axis,则将重复后的结果展平(维度为1)后返回;如果指定axis,则不展平
mixcoe = np.repeat(freq_frag, Unit_num)
# 将 int 型的 mixcoe 转化为 发np.flost32 型的 mixcoe,mixcoe[: units[1]]省略了行的维度
mixcoe = mixcoe[: hiddens[0]].astype(np.float32)
# 这个的作用是什么?
mixcoe = np.concatenate((mixcoe, np.ones([hiddens[0] - Unit_num * len(freq_frag)]) * freq_frag[-1]))
layers = len(Weights) # 得到输入到输出的层数,即隐藏层层数
H = variable_input # 代表输入数据,即输入层
W_in = Weights[0]
B_in = Biases[0]
if len(freq_frag) == 1:
H = tf.add(tf.matmul(H, W_in), B_in)
else:
H = tf.add(tf.matmul(H, W_in)*mixcoe, B_in)
H = DNN_activation(H)
hidden_record = hiddens[0]
for k in range(layers-2):
H_pre = H
W = Weights[k+1]
B = Biases[k+1]
H = DNN_activation(tf.add(tf.matmul(H, W), B))
if hiddens[k+1] == hidden_record:
H = H + H_pre
hidden_record = hiddens[k+1]
W_out = Weights[-1]
B_out = Biases[-1]
output = tf.add(tf.matmul(H, W_out), B_out)
# 下面这个是输出层
# output = tf.nn.tanh(output)
return output
def PDE_subDNNs_scale(variable_input, Wlists, Blists, hiddens, freq_frag, activate_name=None):
if activate_name == 'relu':
DNN_activation = tf.nn.relu
elif activate_name == 'leaky_relu':
DNN_activation = tf.nn.leaky_relu(0.2)
elif activate_name == 'elu':
DNN_activation = tf.nn.elu
elif activate_name == 'tanh':
DNN_activation = tf.nn.tanh
elif activate_name == 'sin':
DNN_activation = mysin
elif activate_name == 'srelu':
DNN_activation = srelu
elif activate_name == 'powsin_srelu':
DNN_activation = powsin_srelu
elif activate_name == 's2relu':
DNN_activation = s2relu
elif activate_name == 'sin2_srelu':
DNN_activation = sin2_srelu
elif activate_name == 'slrelu':
DNN_activation = slrelu
elif activate_name == 'selu':
DNN_activation = selu
elif activate_name == 'phi':
DNN_activation = phi
output = []
freqs_parts = []
N2subnets = len(Wlists)
len2parts = int(len(freq_frag) / N2subnets)
for isubnet in range(N2subnets - 1):
part2freq_frag = freq_frag[isubnet * len2parts:len2parts * (isubnet + 1)]
freqs_parts.append(part2freq_frag)
part2freq_frag = freq_frag[len2parts * (isubnet + 1):]
freqs_parts.append(part2freq_frag)
for isubnet in range(N2subnets):
len2unit = int(hiddens[0] / len(freqs_parts[isubnet]))
# Units_num.append(len2unit)
# np.repeat(a, repeats, axis=None)
# 输入: a是数组,repeats是各个元素重复的次数(repeats一般是个标量,稍复杂点是个list),在axis的方向上进行重复
# 返回: 如果不指定axis,则将重复后的结果展平(维度为1)后返回;如果指定axis,则不展平
mixcoe = np.repeat(freqs_parts[isubnet], len2unit)
# 将 int 型的 mixcoe 转化为 发np.flost32 型的 mixcoe,mixcoe[: units[1]]省略了行的维度
mixcoe = mixcoe[: hiddens[0]].astype(np.float32)
# 这个的作用是什么?
mixcoe = np.concatenate((mixcoe, np.ones([hiddens[0] - len2unit * len(freqs_parts[isubnet])]) *
(freqs_parts[isubnet])[-1]))
Weights = Wlists[isubnet]
Biases = Blists[isubnet]
layers = len(Weights) # 得到输入到输出的层数,即隐藏层层数
H = variable_input # 代表输入数据,即输入层
W_in = Weights[0]
B_in = Biases[0]
if len(freq_frag) == 1:
H = tf.add(tf.matmul(H, W_in), B_in)
else:
H = tf.add(tf.matmul(H, W_in)*mixcoe, B_in)
H = DNN_activation(H)
hidden_record = hiddens[0]
for k in range(layers-2):
H_pre = H
W = Weights[k+1]
B = Biases[k+1]
H = DNN_activation(tf.add(tf.matmul(H, W), B))
if hiddens[k+1] == hidden_record:
H = H + H_pre
hidden_record = hiddens[k+1]
W_out = Weights[-1]
B_out = Biases[-1]
output2subnet = tf.add(tf.matmul(H, W_out), B_out)
output.append(output2subnet)
# out = tf.reduce_mean(output, axis=-1)
out = tf.reduce_mean(output, axis=0)
return out
def PDE_DNN_adapt_scale(variable_input, Weights, Biases, hiddens, freq_frag, activate_name=None):
if activate_name == 'relu':
DNN_activation = tf.nn.relu
elif activate_name == 'leaky_relu':
DNN_activation = tf.nn.leaky_relu(0.2)
elif activate_name == 'elu':
DNN_activation = tf.nn.elu
elif activate_name == 'tanh':
DNN_activation = tf.nn.tanh
elif activate_name == 'sin':
DNN_activation = mysin
elif activate_name == 'srelu':
DNN_activation = srelu
elif activate_name == 'powsin_srelu':
DNN_activation = powsin_srelu
elif activate_name == 's2relu':
DNN_activation = s2relu
elif activate_name == 'sin2_srelu':
DNN_activation = sin2_srelu
elif activate_name == 'slrelu':
DNN_activation = slrelu
elif activate_name == 'selu':
DNN_activation = selu
elif activate_name == 'phi':
DNN_activation = phi
Unit_num = int(hiddens[0] / len(freq_frag))
# np.repeat(a, repeats, axis=None)
# 输入: a是数组,repeats是各个元素重复的次数(repeats一般是个标量,稍复杂点是个list),在axis的方向上进行重复
# 返回: 如果不指定axis,则将重复后的结果展平(维度为1)后返回;如果指定axis,则不展平
init_mixcoe = np.repeat(freq_frag, Unit_num)
# 这个的作用是什么?
init_mixcoe = np.concatenate((init_mixcoe, np.ones([hiddens[0] - Unit_num * len(freq_frag)]) * freq_frag[-1]))
# 将 int 型的 mixcoe 转化为 发np.flost32 型的 mixcoe,mixcoe[: units[1]]省略了行的维度
init_mixcoe = init_mixcoe.astype(np.float32)
layers = len(Weights) # 得到输入到输出的层数,即隐藏层层数
H = variable_input # 代表输入数据,即输入层
W_in = Weights[0]
B_in = Biases[0]
mixcoe = tf.get_variable(name='M0', initializer=init_mixcoe)
# mixcoe = tf.exp(mixcoe)
if len(freq_frag) == 1:
H = tf.add(tf.matmul(H, W_in), B_in)
else:
H = tf.add(tf.matmul(H, W_in)*mixcoe, B_in)
H = DNN_activation(H)
hidden_record = hiddens[0]
for k in range(layers-2):
H_pre = H
W = Weights[k+1]
B = Biases[k+1]
H = DNN_activation(tf.add(tf.matmul(H, W), B))
if hiddens[k+1] == hidden_record:
H = H + H_pre
hidden_record = hiddens[k+1]
W_out = Weights[-1]
B_out = Biases[-1]
output = tf.add(tf.matmul(H, W_out), B_out)
# 下面这个是输出层
# output = tf.nn.tanh(output)
return output
def PDE_subDNNs_adapt_scale(variable_input, Wlists, Blists, hiddens, freq_frag, activate_name=None):
if activate_name == 'relu':
DNN_activation = tf.nn.relu
elif activate_name == 'leaky_relu':
DNN_activation = tf.nn.leaky_relu(0.2)
elif activate_name == 'elu':
DNN_activation = tf.nn.elu
elif activate_name == 'tanh':
DNN_activation = tf.nn.tanh
elif activate_name == 'sin':
DNN_activation = mysin
elif activate_name == 'srelu':
DNN_activation = srelu
elif activate_name == 'powsin_srelu':
DNN_activation = powsin_srelu
elif activate_name == 's2relu':
DNN_activation = s2relu
elif activate_name == 'sin2_srelu':
DNN_activation = sin2_srelu
elif activate_name == 'slrelu':
DNN_activation = slrelu
elif activate_name == 'selu':
DNN_activation = selu
elif activate_name == 'phi':
DNN_activation = phi
output = []
freqs_parts = []
N2subnets = len(Wlists)
len2parts = int(len(freq_frag) / N2subnets)
for isubnet in range(N2subnets - 1):
part2freq_frag = freq_frag[isubnet * len2parts:len2parts * (isubnet + 1)]
freqs_parts.append(part2freq_frag)
part2freq_frag = freq_frag[len2parts * (isubnet + 1):]
freqs_parts.append(part2freq_frag)
for isubnet in range(N2subnets):
len2unit = int(hiddens[0] / len(freqs_parts[isubnet]))
# Units_num.append(len2unit)
# np.repeat(a, repeats, axis=None)
# 输入: a是数组,repeats是各个元素重复的次数(repeats一般是个标量,稍复杂点是个list),在axis的方向上进行重复
# 返回: 如果不指定axis,则将重复后的结果展平(维度为1)后返回;如果指定axis,则不展平
init_mixcoe = np.repeat(freqs_parts[isubnet], len2unit)
# 将 int 型的 mixcoe 转化为 发np.flost32 型的 mixcoe,mixcoe[: units[1]]省略了行的维度
init_mixcoe = init_mixcoe[: hiddens[0]].astype(np.float32)
# 这个的作用是什么?
init_mixcoe = np.concatenate((init_mixcoe, np.ones([hiddens[0] - len2unit * len(freqs_parts[isubnet])]) *
(freqs_parts[isubnet])[-1]))
mixcoe = tf.get_variable(name='M' + str(isubnet), initializer=init_mixcoe)
Weights = Wlists[isubnet]
Biases = Blists[isubnet]
layers = len(Weights) # 得到输入到输出的层数,即隐藏层层数
H = variable_input # 代表输入数据,即输入层
W_in = Weights[0]
B_in = Biases[0]
if len(freq_frag) == 1:
H = tf.add(tf.matmul(H, W_in), B_in)
else:
H = tf.add(tf.matmul(H, W_in)*mixcoe, B_in)
H = DNN_activation(H)
hidden_record = hiddens[0]
for k in range(layers-2):
H_pre = H
W = Weights[k+1]
B = Biases[k+1]
H = DNN_activation(tf.add(tf.matmul(H, W), B))
if hiddens[k+1] == hidden_record:
H = H + H_pre
hidden_record = hiddens[k+1]
W_out = Weights[-1]
B_out = Biases[-1]
output2subnet = tf.add(tf.matmul(H, W_out), B_out)
output.append(output2subnet)
out = tf.reduce_mean(output, axis=0)
return out
def PDE_DNN_FourierBase(variable_input, Weights, Biases, hiddens, freq_frag, activate_name=None):
if activate_name == 'relu':
DNN_activation = tf.nn.relu
elif activate_name == 'leaky_relu':
DNN_activation = tf.nn.leaky_relu(0.2)
elif activate_name == 'elu':
DNN_activation = tf.nn.elu
elif activate_name == 'tanh':
DNN_activation = tf.nn.tanh
elif activate_name == 'sin':
DNN_activation = mysin
elif activate_name == 'srelu':
DNN_activation = srelu
elif activate_name == 'powsin_srelu':
DNN_activation = powsin_srelu
elif activate_name == 's2relu':
DNN_activation = s2relu
elif activate_name == 'sin2_srelu':
DNN_activation = sin2_srelu
elif activate_name == 'slrelu':
DNN_activation = slrelu
elif activate_name == 'selu':
DNN_activation = selu
elif activate_name == 'phi':
DNN_activation = phi
layers = len(Weights) # 得到输入到输出的层数,即隐藏层层数
H = variable_input # 代表输入数据,即输入层
# 计算第一个隐藏单元和尺度标记的比例
Unit_num = int(hiddens[0] / len(freq_frag))
# 然后,频率标记按按照比例复制
# np.repeat(a, repeats, axis=None)
# 输入: a是数组,repeats是各个元素重复的次数(repeats一般是个标量,稍复杂点是个list),在axis的方向上进行重复
# 返回: 如果不指定axis,则将重复后的结果展平(维度为1)后返回;如果指定axis,则不展平
mixcoe = np.repeat(freq_frag, Unit_num)
# 将 int 型的 mixcoe 转化为 发np.flost32 型的 mixcoe,mixcoe[: units[1]]省略了行的维度
mixcoe = mixcoe[: hiddens[0]]
# 如果第一个隐藏单元的长度大于复制后的频率标记,那就按照最大的频率在最后补齐
mixcoe = np.concatenate((mixcoe, np.ones([hiddens[0] - Unit_num * len(freq_frag)]) * freq_frag[-1]))
# mixcoe = np.reshape(mixcoe.astype(np.float32), shape=[-1, 1])
mixcoe = mixcoe.astype(np.float32)
W_in = Weights[0]
B_in = Biases[0]
if len(freq_frag) == 1:
H = tf.add(tf.matmul(H, W_in), B_in)
else:
H = tf.add(tf.matmul(H, W_in)*mixcoe, B_in)
H = tf.sin(H)
hiddens_record = hiddens[0]
for k in range(layers-2):
H_pre = H
W = Weights[k+1]
B = Biases[k+1]
H = DNN_activation(tf.add(tf.matmul(H, W), B))
if hiddens[k+1] == hiddens_record:
H = H+H_pre
hiddens_record = hiddens[k+1]
W_out = Weights[-1]
B_out = Biases[-1]
output = tf.add(tf.matmul(H, W_out), B_out)
# 下面这个是输出层
# output = tf.nn.tanh(output)
return output | [
"tensorflow.equal",
"numpy.sqrt",
"tensorflow.get_variable",
"tensorflow.nn.elu",
"tensorflow.nn.moments",
"tensorflow.control_dependencies",
"tensorflow.reduce_mean",
"tensorflow.sin",
"tensorflow.random.normal",
"numpy.repeat",
"tensorflow.random_normal",
"tensorflow.random_normal_initialize... | [((1072, 1128), 'tensorflow.nn.moments', 'tf.nn.moments', (['input_variable'], {'axes': 'axis', 'keep_dims': '(True)'}), '(input_variable, axes=axis, keep_dims=True)\n', (1085, 1128), True, 'import tensorflow as tf\n'), ((1290, 1346), 'tensorflow.nn.moments', 'tf.nn.moments', (['input_variable'], {'axes': 'axis', 'keep_dims': '(True)'}), '(input_variable, axes=axis, keep_dims=True)\n', (1303, 1346), True, 'import tensorflow as tf\n'), ((3158, 3179), 'tensorflow.sin', 'tf.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (3164, 3179), True, 'import tensorflow as tf\n'), ((6337, 6368), 'numpy.sqrt', 'np.sqrt', (['(2 / (in_dim + out_dim))'], {}), '(2 / (in_dim + out_dim))\n', (6344, 6368), True, 'import numpy as np\n'), ((7163, 7196), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (in_dim + out_dim))'], {}), '(2.0 / (in_dim + out_dim))\n', (7170, 7196), True, 'import numpy as np\n'), ((18626, 18656), 'numpy.repeat', 'np.repeat', (['freq_frag', 'Unit_num'], {}), '(freq_frag, Unit_num)\n', (18635, 18656), True, 'import numpy as np\n'), ((22785, 22815), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (22799, 22815), True, 'import tensorflow as tf\n'), ((24041, 24071), 'numpy.repeat', 'np.repeat', (['freq_frag', 'Unit_num'], {}), '(freq_frag, Unit_num)\n', (24050, 24071), True, 'import numpy as np\n'), ((24519, 24570), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""M0"""', 'initializer': 'init_mixcoe'}), "(name='M0', initializer=init_mixcoe)\n", (24534, 24570), True, 'import tensorflow as tf\n'), ((28377, 28407), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (28391, 28407), True, 'import tensorflow as tf\n'), ((29801, 29831), 'numpy.repeat', 'np.repeat', (['freq_frag', 'Unit_num'], {}), '(freq_frag, Unit_num)\n', (29810, 29831), True, 'import numpy as np\n'), ((30403, 30412), 'tensorflow.sin', 'tf.sin', (['H'], {}), '(H)\n', (30409, 30412), True, 'import tensorflow as tf\n'), ((1568, 1591), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1585, 1591), True, 'import tensorflow as tf\n'), ((1618, 1683), 'tensorflow.nn.moments', 'tf.nn.moments', (['input_x'], {'axes': 'axis', 'name': '"""moments"""', 'keep_dims': '(True)'}), "(input_x, axes=axis, name='moments', keep_dims=True)\n", (1631, 1683), True, 'import tensorflow as tf\n'), ((1701, 1717), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {}), '(0.1)\n', (1712, 1717), True, 'import tensorflow as tf\n'), ((1760, 1778), 'tensorflow.constant', 'tf.constant', (['(0.001)'], {}), '(0.001)\n', (1771, 1778), True, 'import tensorflow as tf\n'), ((1871, 1918), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['moving_decay'], {}), '(moving_decay)\n', (1904, 1918), True, 'import tensorflow as tf\n'), ((2368, 2440), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['input_x', 'x_mean', 'x_var', 'shift', 'scale', 'epsilon'], {}), '(input_x, x_mean, x_var, shift, scale, epsilon)\n', (2393, 2440), True, 'import tensorflow as tf\n'), ((2641, 2664), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (2658, 2664), True, 'import tensorflow as tf\n'), ((2691, 2756), 'tensorflow.nn.moments', 'tf.nn.moments', (['input_x'], {'axes': 'axis', 'name': '"""moments"""', 'keep_dims': '(True)'}), "(input_x, axes=axis, name='moments', keep_dims=True)\n", (2704, 2756), True, 'import tensorflow as tf\n'), ((2774, 2790), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {}), '(0.1)\n', (2785, 2790), True, 'import tensorflow as tf\n'), ((2833, 2851), 'tensorflow.constant', 'tf.constant', (['(0.001)'], {}), '(0.001)\n', (2844, 2851), True, 'import tensorflow as tf\n'), ((2919, 2991), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['input_x', 'x_mean', 'x_var', 'shift', 'scale', 'epsilon'], {}), '(input_x, x_mean, x_var, shift, scale, epsilon)\n', (2944, 2991), True, 'import tensorflow as tf\n'), ((3207, 3224), 'tensorflow.nn.relu', 'tf.nn.relu', (['(1 - x)'], {}), '(1 - x)\n', (3217, 3224), True, 'import tensorflow as tf\n'), ((3223, 3236), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3233, 3236), True, 'import tensorflow as tf\n'), ((3299, 3320), 'tensorflow.sin', 'tf.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (3305, 3320), True, 'import tensorflow as tf\n'), ((3403, 3424), 'tensorflow.sin', 'tf.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (3409, 3424), True, 'import tensorflow as tf\n'), ((3509, 3530), 'tensorflow.sin', 'tf.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (3515, 3530), True, 'import tensorflow as tf\n'), ((3559, 3582), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(1 - x)'], {}), '(1 - x)\n', (3575, 3582), True, 'import tensorflow as tf\n'), ((3581, 3600), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['x'], {}), '(x)\n', (3597, 3600), True, 'import tensorflow as tf\n'), ((3665, 3678), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3675, 3678), True, 'import tensorflow as tf\n'), ((3709, 3725), 'tensorflow.nn.elu', 'tf.nn.elu', (['(1 - x)'], {}), '(1 - x)\n', (3718, 3725), True, 'import tensorflow as tf\n'), ((3724, 3736), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (3733, 3736), True, 'import tensorflow as tf\n'), ((3841, 3858), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 1)'], {}), '(x - 1)\n', (3851, 3858), True, 'import tensorflow as tf\n'), ((6693, 6747), 'tensorflow.random_uniform', 'tf.random_uniform', (['[in_dim, out_dim]'], {'dtype': 'tf.float32'}), '([in_dim, out_dim], dtype=tf.float32)\n', (6710, 6747), True, 'import tensorflow as tf\n'), ((7596, 7641), 'tensorflow.zeros', 'tf.zeros', (['[in_dim, out_dim]'], {'dtype': 'tf.float32'}), '([in_dim, out_dim], dtype=tf.float32)\n', (7604, 7641), True, 'import tensorflow as tf\n'), ((7774, 7824), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""WB_scope"""'], {'reuse': 'tf.AUTO_REUSE'}), "('WB_scope', reuse=tf.AUTO_REUSE)\n", (7791, 7824), True, 'import tensorflow as tf\n'), ((9150, 9200), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""WB_scope"""'], {'reuse': 'tf.AUTO_REUSE'}), "('WB_scope', reuse=tf.AUTO_REUSE)\n", (9167, 9200), True, 'import tensorflow as tf\n'), ((11306, 11356), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""WB_scope"""'], {'reuse': 'tf.AUTO_REUSE'}), "('WB_scope', reuse=tf.AUTO_REUSE)\n", (11323, 11356), True, 'import tensorflow as tf\n'), ((15755, 15774), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_out'], {}), '(H, W_out)\n', (15764, 15774), True, 'import tensorflow as tf\n'), ((17330, 17349), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_out'], {}), '(H, W_out)\n', (17339, 17349), True, 'import tensorflow as tf\n'), ((19623, 19642), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_out'], {}), '(H, W_out)\n', (19632, 19642), True, 'import tensorflow as tf\n'), ((21417, 21458), 'numpy.repeat', 'np.repeat', (['freqs_parts[isubnet]', 'len2unit'], {}), '(freqs_parts[isubnet], len2unit)\n', (21426, 21458), True, 'import numpy as np\n'), ((25143, 25162), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_out'], {}), '(H, W_out)\n', (25152, 25162), True, 'import tensorflow as tf\n'), ((26948, 26989), 'numpy.repeat', 'np.repeat', (['freqs_parts[isubnet]', 'len2unit'], {}), '(freqs_parts[isubnet], len2unit)\n', (26957, 26989), True, 'import numpy as np\n'), ((30785, 30804), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_out'], {}), '(H, W_out)\n', (30794, 30804), True, 'import tensorflow as tf\n'), ((2214, 2241), 'tensorflow.equal', 'tf.equal', (['is_training', '(True)'], {}), '(is_training, True)\n', (2222, 2241), True, 'import tensorflow as tf\n'), ((3269, 3286), 'tensorflow.nn.relu', 'tf.nn.relu', (['(1 - x)'], {}), '(1 - x)\n', (3279, 3286), True, 'import tensorflow as tf\n'), ((3285, 3298), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3295, 3298), True, 'import tensorflow as tf\n'), ((3385, 3406), 'tensorflow.sin', 'tf.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (3391, 3406), True, 'import tensorflow as tf\n'), ((3491, 3512), 'tensorflow.sin', 'tf.sin', (['(4 * np.pi * x)'], {}), '(4 * np.pi * x)\n', (3497, 3512), True, 'import tensorflow as tf\n'), ((3635, 3652), 'tensorflow.nn.relu', 'tf.nn.relu', (['(1 - x)'], {}), '(1 - x)\n', (3645, 3652), True, 'import tensorflow as tf\n'), ((3651, 3664), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3661, 3664), True, 'import tensorflow as tf\n'), ((4034, 4051), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 3)'], {}), '(x - 3)\n', (4044, 4051), True, 'import tensorflow as tf\n'), ((4701, 4746), 'tensorflow.random.normal', 'tf.random.normal', (['[in_size, hidden_layers[0]]'], {}), '([in_size, hidden_layers[0]])\n', (4717, 4746), True, 'import tensorflow as tf\n'), ((4846, 4886), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1, hidden_layers[0]]'], {}), '([1, hidden_layers[0]])\n', (4863, 4886), True, 'import tensorflow as tf\n'), ((5552, 5599), 'tensorflow.random.normal', 'tf.random.normal', (['[hidden_layers[-1], out_size]'], {}), '([hidden_layers[-1], out_size])\n', (5568, 5599), True, 'import tensorflow as tf\n'), ((5697, 5729), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1, out_size]'], {}), '([1, out_size])\n', (5714, 5729), True, 'import tensorflow as tf\n'), ((6425, 6485), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[in_dim, out_dim]'], {'stddev': 'xavier_stddev'}), '([in_dim, out_dim], stddev=xavier_stddev)\n', (6444, 6485), True, 'import tensorflow as tf\n'), ((7253, 7341), 'tensorflow.random_normal', 'tf.random_normal', (['[in_dim, out_dim]'], {'mean': '(0)', 'stddev': 'stddev2normal', 'dtype': 'tf.float32'}), '([in_dim, out_dim], mean=0, stddev=stddev2normal, dtype=tf.\n float32)\n', (7269, 7341), True, 'import tensorflow as tf\n'), ((14551, 14572), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(0.2)'], {}), '(0.2)\n', (14567, 14572), True, 'import tensorflow as tf\n'), ((16091, 16112), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(0.2)'], {}), '(0.2)\n', (16107, 16112), True, 'import tensorflow as tf\n'), ((17662, 17683), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(0.2)'], {}), '(0.2)\n', (17678, 17683), True, 'import tensorflow as tf\n'), ((19133, 19151), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (19142, 19151), True, 'import tensorflow as tf\n'), ((19958, 19979), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(0.2)'], {}), '(0.2)\n', (19974, 19979), True, 'import tensorflow as tf\n'), ((22663, 22682), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_out'], {}), '(H, W_out)\n', (22672, 22682), True, 'import tensorflow as tf\n'), ((23072, 23093), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(0.2)'], {}), '(0.2)\n', (23088, 23093), True, 'import tensorflow as tf\n'), ((24653, 24671), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (24662, 24671), True, 'import tensorflow as tf\n'), ((25484, 25505), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(0.2)'], {}), '(0.2)\n', (25500, 25505), True, 'import tensorflow as tf\n'), ((28300, 28319), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_out'], {}), '(H, W_out)\n', (28309, 28319), True, 'import tensorflow as tf\n'), ((28664, 28685), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(0.2)'], {}), '(0.2)\n', (28680, 28685), True, 'import tensorflow as tf\n'), ((30304, 30322), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (30313, 30322), True, 'import tensorflow as tf\n'), ((2031, 2070), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[ema_apply_op]'], {}), '([ema_apply_op])\n', (2054, 2070), True, 'import tensorflow as tf\n'), ((3355, 3372), 'tensorflow.nn.relu', 'tf.nn.relu', (['(1 - x)'], {}), '(1 - x)\n', (3365, 3372), True, 'import tensorflow as tf\n'), ((3371, 3384), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3381, 3384), True, 'import tensorflow as tf\n'), ((3477, 3490), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3487, 3490), True, 'import tensorflow as tf\n'), ((3767, 3780), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3777, 3780), True, 'import tensorflow as tf\n'), ((3821, 3842), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 3 / 4)'], {}), '(x - 3 / 4)\n', (3831, 3842), True, 'import tensorflow as tf\n'), ((3970, 3987), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 2)'], {}), '(x - 2)\n', (3980, 3987), True, 'import tensorflow as tf\n'), ((4002, 4019), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 3)'], {}), '(x - 3)\n', (4012, 4019), True, 'import tensorflow as tf\n'), ((4018, 4035), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 3)'], {}), '(x - 3)\n', (4028, 4035), True, 'import tensorflow as tf\n'), ((5103, 5173), 'tensorflow.random.normal', 'tf.random.normal', (['[hidden_layers[i_layer], hidden_layers[i_layer + 1]]'], {}), '([hidden_layers[i_layer], hidden_layers[i_layer + 1]])\n', (5119, 5173), True, 'import tensorflow as tf\n'), ((5294, 5344), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1, hidden_layers[i_layer + 1]]'], {}), '([1, hidden_layers[i_layer + 1]])\n', (5311, 5344), True, 'import tensorflow as tf\n'), ((9563, 9609), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (9591, 9609), True, 'import tensorflow as tf\n'), ((9790, 9836), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (9818, 9836), True, 'import tensorflow as tf\n'), ((10866, 10912), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (10894, 10912), True, 'import tensorflow as tf\n'), ((11052, 11098), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (11080, 11098), True, 'import tensorflow as tf\n'), ((11719, 11765), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (11747, 11765), True, 'import tensorflow as tf\n'), ((11944, 11990), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (11972, 11990), True, 'import tensorflow as tf\n'), ((13018, 13064), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (13046, 13064), True, 'import tensorflow as tf\n'), ((13204, 13250), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (13232, 13250), True, 'import tensorflow as tf\n'), ((13667, 13692), 'tensorflow.abs', 'tf.abs', (['weights[i_layer1]'], {}), '(weights[i_layer1])\n', (13673, 13692), True, 'import tensorflow as tf\n'), ((13758, 13782), 'tensorflow.abs', 'tf.abs', (['biases[i_layer1]'], {}), '(biases[i_layer1])\n', (13764, 13782), True, 'import tensorflow as tf\n'), ((14050, 14078), 'tensorflow.square', 'tf.square', (['weights[i_layer1]'], {}), '(weights[i_layer1])\n', (14059, 14078), True, 'import tensorflow as tf\n'), ((14144, 14171), 'tensorflow.square', 'tf.square', (['biases[i_layer1]'], {}), '(biases[i_layer1])\n', (14153, 14171), True, 'import tensorflow as tf\n'), ((15560, 15575), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (15569, 15575), True, 'import tensorflow as tf\n'), ((17100, 17115), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (17109, 17115), True, 'import tensorflow as tf\n'), ((19190, 19208), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (19199, 19208), True, 'import tensorflow as tf\n'), ((19422, 19437), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (19431, 19437), True, 'import tensorflow as tf\n'), ((22106, 22124), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (22115, 22124), True, 'import tensorflow as tf\n'), ((24710, 24728), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (24719, 24728), True, 'import tensorflow as tf\n'), ((24942, 24957), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (24951, 24957), True, 'import tensorflow as tf\n'), ((27743, 27761), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (27752, 27761), True, 'import tensorflow as tf\n'), ((30361, 30379), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (30370, 30379), True, 'import tensorflow as tf\n'), ((30584, 30599), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (30593, 30599), True, 'import tensorflow as tf\n'), ((2096, 2115), 'tensorflow.identity', 'tf.identity', (['x_mean'], {}), '(x_mean)\n', (2107, 2115), True, 'import tensorflow as tf\n'), ((2117, 2135), 'tensorflow.identity', 'tf.identity', (['x_var'], {}), '(x_var)\n', (2128, 2135), True, 'import tensorflow as tf\n'), ((3461, 3478), 'tensorflow.nn.relu', 'tf.nn.relu', (['(1 - x)'], {}), '(1 - x)\n', (3471, 3478), True, 'import tensorflow as tf\n'), ((3785, 3806), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 1 / 4)'], {}), '(x - 1 / 4)\n', (3795, 3806), True, 'import tensorflow as tf\n'), ((3886, 3899), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3896, 3899), True, 'import tensorflow as tf\n'), ((3902, 3915), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3912, 3915), True, 'import tensorflow as tf\n'), ((3934, 3951), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 1)'], {}), '(x - 1)\n', (3944, 3951), True, 'import tensorflow as tf\n'), ((3954, 3971), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 2)'], {}), '(x - 2)\n', (3964, 3971), True, 'import tensorflow as tf\n'), ((10265, 10311), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (10293, 10311), True, 'import tensorflow as tf\n'), ((10491, 10537), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (10519, 10537), True, 'import tensorflow as tf\n'), ((12419, 12465), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (12447, 12465), True, 'import tensorflow as tf\n'), ((12641, 12687), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev_WB'}), '(stddev=stddev_WB)\n', (12669, 12687), True, 'import tensorflow as tf\n'), ((22171, 22189), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (22180, 22189), True, 'import tensorflow as tf\n'), ((22431, 22446), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (22440, 22446), True, 'import tensorflow as tf\n'), ((27808, 27826), 'tensorflow.matmul', 'tf.matmul', (['H', 'W_in'], {}), '(H, W_in)\n', (27817, 27826), True, 'import tensorflow as tf\n'), ((28068, 28083), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (28077, 28083), True, 'import tensorflow as tf\n'), ((3918, 3935), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x - 1)'], {}), '(x - 1)\n', (3928, 3935), True, 'import tensorflow as tf\n')] |
# coding=utf8
# BézierBuilder
#
# Copyright (c) 2013, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""BézierBuilder, an interactive Bézier curve explorer.
Just run it with
$ python bezier_builder.py
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from math import factorial
from scipy import signal
from matplotlib.lines import Line2D
from matplotlib.backends.qt_compat import QtGui, QtCore
from .minimvc import Trigger
class ControlPointModel(object):
def __init__(self, xp, yp, fixed=None):
# fixed is either None (if no point is fixed) or and index of a fixed
# point
self._xp = list(xp)
self._yp = list(yp)
self._fixed = fixed
self.trigger = Trigger()
def get_control_points(self):
return list(self._xp), list(self._yp), self._fixed
def add_point(self, i, new_x, new_y):
self._xp.insert(i, new_x)
self._yp.insert(i, new_y)
if self._fixed is not None and i <= self._fixed:
self._fixed += 1
self.trigger.fire()
def remove_point(self, i):
if i == self._fixed:
return
del self._xp[i]
del self._yp[i]
if self._fixed is not None and i < self._fixed:
self._fixed -= 1
self.trigger.fire()
def move_point(self, i, new_x, new_y):
if i == self._fixed:
return
self._xp[i] = new_x
self._yp[i] = new_y
self.trigger.fire()
def set_control_points(self, xp, yp, fixed=None):
self._xp = list(xp)
self._yp = list(yp)
self._fixed = fixed
self.trigger.fire()
class ControlPointBuilder(object):
def __init__(self, ax, control_point_model):
self.ax = ax
self.control_point_model = control_point_model
self.canvas = self.ax.figure.canvas
xp, yp, _ = self.control_point_model.get_control_points()
self.control_polygon = Line2D(xp, yp,
ls="--", c="#666666", marker="x",
mew=2, mec="#204a87")
self.ax.add_line(self.control_polygon)
# Event handler for mouse clicking
self.canvas.mpl_connect('button_press_event', self.on_button_press)
self.canvas.mpl_connect('button_release_event', self.on_button_release)
self.canvas.mpl_connect('motion_notify_event', self.on_motion_notify)
self._index = None # Active vertex
self.control_point_model.trigger.add_callback(self._refresh)
self.mode = "move"
self._refresh()
def on_button_press(self, event):
modkey = event.guiEvent.modifiers()
# Ignore clicks outside axes
if event.inaxes != self.ax:
return
res, ind = self.control_polygon.contains(event)
if res and modkey == QtCore.Qt.NoModifier:
self._index = ind["ind"][0]
if res and (modkey == QtCore.Qt.ControlModifier or self.mode == "remove"):
# Control-click deletes
self.control_point_model.remove_point(ind["ind"][0])
if (modkey == QtCore.Qt.ShiftModifier or self.mode == "add"):
# Adding a new point. Find the two closest points and insert it in
# between them.
total_squared_dists = []
xp, yp, _ = self.control_point_model.get_control_points()
for i in range(len(xp) - 1):
dist = (event.xdata - xp[i]) ** 2
dist += (event.ydata - yp[i]) ** 2
dist += (event.xdata - xp[i + 1]) ** 2
dist += (event.ydata - yp[i + 1]) ** 2
total_squared_dists.append(dist)
best = np.argmin(total_squared_dists)
self.control_point_model.add_point(best + 1,
event.xdata,
event.ydata)
def on_button_release(self, event):
if event.button != 1:
return
self._index = None
def on_motion_notify(self, event):
if event.inaxes != self.ax:
return
if self._index is None:
return
x, y = event.xdata, event.ydata
self.control_point_model.move_point(self._index, x, y)
def _refresh(self):
xp, yp, _ = self.control_point_model.get_control_points()
self.control_polygon.set_data(xp, yp)
self.canvas.draw()
################################################################
def compute_bezier_points(xp, yp, at, method, grid=256):
at = np.asarray(at)
# The Bezier curve is parameterized by a value t which ranges from 0
# to 1. However, there is a nonlinear relationship between this value
# and arclength. We want to parameterize by t', which measures
# normalized arclength. To do this, we have to calculate the function
# arclength(t), and then invert it.
t = np.linspace(0, 1, grid)
arclength = compute_arc_length(xp, yp, method, t=t)
arclength /= arclength[-1]
# Now (t, arclength) is a lookup table describing the t -> arclength
# mapping. Invert it to get at -> t
at_t = np.interp(at, arclength, t)
# And finally look up at the Bezier values at at_t
# (Might be quicker to np.interp againts x and y, but eh, doesn't
# really matter.)
return method(list(zip(xp, yp)), at_t).T
def compute_arc_length(xp, yp, method, t=None, grid=256):
if t is None:
t = np.linspace(0, 1, grid)
x, y = method(list(zip(xp, yp)), t).T
x_deltas = np.diff(x)
y_deltas = np.diff(y)
arclength_deltas = np.empty(len(x))
if t.size == 0:
return np.asarray([0])
arclength_deltas[0] = 0
np.hypot(x_deltas, y_deltas, out=arclength_deltas[1:])
return np.cumsum(arclength_deltas)
class SingleBezierCurveModel(object):
def __init__(self, control_point_model, method="CatmulClark"):
self.method = eval(method)
self.control_point_model = control_point_model
x, y = self.get_bezier_points()
self.bezier_curve = Line2D(x, y)
self.trigger = self.control_point_model.trigger
self.trigger.add_callback(self._refresh)
def get_bezier_points(self, num=200):
return self.get_bezier_points_at(np.linspace(0, 1, num))
def get_bezier_points_at(self, at, grid=1000):
xp, yp, _ = self.control_point_model.get_control_points()
return compute_bezier_points(xp, yp, at, self.method, grid=grid)
def _refresh(self):
x, y = self.get_bezier_points()
self.bezier_curve.set_data(x, y)
# self.canvas.draw()
class TwoBezierCurveModel(object):
def __init__(self, control_point_model, method="CatmulClark"):
self.method = eval(method)
self.control_point_model = control_point_model
x, y = self.get_bezier_points()
self.bezier_curve = Line2D(x, y)
self.trigger = self.control_point_model.trigger
self.trigger.add_callback(self._refresh)
def get_bezier_points(self, num=200):
return self.get_bezier_points_at(np.linspace(0, 1, num))
def get_bezier_points_at(self, at, grid=256):
at = np.asarray(at)
if at.ndim == 0:
at = np.array([at])
low_mask = (at < 0.5)
high_mask = (at >= 0.5)
xp, yp, fixed = self.control_point_model.get_control_points()
assert fixed is not None
low_xp = xp[:fixed + 1]
low_yp = yp[:fixed + 1]
high_xp = xp[fixed:]
high_yp = yp[fixed:]
low_al = compute_arc_length(low_xp, low_yp, self.method).max()
high_al = compute_arc_length(high_xp, high_yp, self.method).max()
sf = min(low_al, high_al) / max(low_al, high_al)
high_at = at[high_mask]
low_at = at[low_mask]
if high_al < low_al:
high_at = high_at * 2 - 1
low_at = (0.5 - (0.5 - low_at) * sf) * 2
else:
high_at = (0.5 + (high_at - 0.5) * sf) * 2 - 1
low_at = low_at * 2
low_points = compute_bezier_points(low_xp, low_yp,
low_at, self.method, grid=grid)
high_points = compute_bezier_points(high_xp, high_yp,
high_at, self.method, grid=grid)
out = np.concatenate([low_points,high_points], 1)
return out
def _refresh(self):
x, y = self.get_bezier_points()
self.bezier_curve.set_data(x, y)
class BezierCurveView(object):
def __init__(self, ax, bezier_curve_model):
self.ax = ax
self.bezier_curve_model = bezier_curve_model
self.canvas = self.ax.figure.canvas
x, y = self.bezier_model.get_bezier_points()
self.bezier_curve = Line2D(x, y)
self.ax.add_line(self.bezier_curve)
self.bezier_curve_model.trigger.add_callback(self._refresh)
self._refresh()
def _refresh(self):
x, y = self.bezier_curve_model.get_bezier_points()
self.bezier_curve.set_data(x, y)
self.canvas.draw()
# We used to use scipy.special.binom here,
# but reimplementing it ourself lets us avoid pulling in a dependency
# scipy just for that one function.
def binom(n, k):
return factorial(n) * 1.0 / (factorial(k) * factorial(n - k))
def Bernstein(n, k):
"""Bernstein polynomial.
"""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly
def Bezier(points, at):
"""Build Bézier curve from points.
Deprecated. CatmulClark builds nicer splines
"""
at = np.asarray(at)
at_flat = at.ravel()
N = len(points)
curve = np.zeros((at_flat.shape[0], 2))
for ii in range(N):
curve += np.outer(Bernstein(N - 1, ii)(at_flat), points[ii])
return curve.reshape(at.shape + (2,))
def CatmulClark(points, at):
points = np.asarray(points)
while len(points) < len(at):
new_p = np.zeros((2 * len(points), 2))
new_p[0] = points[0]
new_p[-1] = points[-1]
new_p[1:-2:2] = 3/4. * points[:-1] + 1/4. * points[1:]
new_p[2:-1:2] = 1/4. * points[:-1] + 3/4. * points[1:]
points = new_p
xp, yp = zip(*points)
xp = np.interp(at, np.linspace(0, 1, len(xp)), xp)
yp = np.interp(at, np.linspace(0, 1, len(yp)), yp)
return np.asarray(list(zip(xp, yp)))
| [
"math.factorial",
"numpy.asarray",
"numpy.diff",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.cumsum",
"numpy.interp",
"numpy.concatenate",
"numpy.argmin",
"numpy.hypot",
"matplotlib.lines.Line2D"
] | [((5878, 5892), 'numpy.asarray', 'np.asarray', (['at'], {}), '(at)\n', (5888, 5892), True, 'import numpy as np\n'), ((6229, 6252), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'grid'], {}), '(0, 1, grid)\n', (6240, 6252), True, 'import numpy as np\n'), ((6468, 6495), 'numpy.interp', 'np.interp', (['at', 'arclength', 't'], {}), '(at, arclength, t)\n', (6477, 6495), True, 'import numpy as np\n'), ((6859, 6869), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (6866, 6869), True, 'import numpy as np\n'), ((6885, 6895), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (6892, 6895), True, 'import numpy as np\n'), ((7019, 7073), 'numpy.hypot', 'np.hypot', (['x_deltas', 'y_deltas'], {'out': 'arclength_deltas[1:]'}), '(x_deltas, y_deltas, out=arclength_deltas[1:])\n', (7027, 7073), True, 'import numpy as np\n'), ((7085, 7112), 'numpy.cumsum', 'np.cumsum', (['arclength_deltas'], {}), '(arclength_deltas)\n', (7094, 7112), True, 'import numpy as np\n'), ((10933, 10947), 'numpy.asarray', 'np.asarray', (['at'], {}), '(at)\n', (10943, 10947), True, 'import numpy as np\n'), ((11005, 11036), 'numpy.zeros', 'np.zeros', (['(at_flat.shape[0], 2)'], {}), '((at_flat.shape[0], 2))\n', (11013, 11036), True, 'import numpy as np\n'), ((11215, 11233), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (11225, 11233), True, 'import numpy as np\n'), ((3251, 3321), 'matplotlib.lines.Line2D', 'Line2D', (['xp', 'yp'], {'ls': '"""--"""', 'c': '"""#666666"""', 'marker': '"""x"""', 'mew': '(2)', 'mec': '"""#204a87"""'}), "(xp, yp, ls='--', c='#666666', marker='x', mew=2, mec='#204a87')\n", (3257, 3321), False, 'from matplotlib.lines import Line2D\n'), ((6778, 6801), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'grid'], {}), '(0, 1, grid)\n', (6789, 6801), True, 'import numpy as np\n'), ((6971, 6986), 'numpy.asarray', 'np.asarray', (['[0]'], {}), '([0])\n', (6981, 6986), True, 'import numpy as np\n'), ((7377, 7389), 'matplotlib.lines.Line2D', 'Line2D', (['x', 'y'], {}), '(x, y)\n', (7383, 7389), False, 'from matplotlib.lines import Line2D\n'), ((8191, 8203), 'matplotlib.lines.Line2D', 'Line2D', (['x', 'y'], {}), '(x, y)\n', (8197, 8203), False, 'from matplotlib.lines import Line2D\n'), ((8486, 8500), 'numpy.asarray', 'np.asarray', (['at'], {}), '(at)\n', (8496, 8500), True, 'import numpy as np\n'), ((9641, 9685), 'numpy.concatenate', 'np.concatenate', (['[low_points, high_points]', '(1)'], {}), '([low_points, high_points], 1)\n', (9655, 9685), True, 'import numpy as np\n'), ((10091, 10103), 'matplotlib.lines.Line2D', 'Line2D', (['x', 'y'], {}), '(x, y)\n', (10097, 10103), False, 'from matplotlib.lines import Line2D\n'), ((5001, 5031), 'numpy.argmin', 'np.argmin', (['total_squared_dists'], {}), '(total_squared_dists)\n', (5010, 5031), True, 'import numpy as np\n'), ((7579, 7601), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num'], {}), '(0, 1, num)\n', (7590, 7601), True, 'import numpy as np\n'), ((8398, 8420), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num'], {}), '(0, 1, num)\n', (8409, 8420), True, 'import numpy as np\n'), ((8543, 8557), 'numpy.array', 'np.array', (['[at]'], {}), '([at])\n', (8551, 8557), True, 'import numpy as np\n'), ((10573, 10585), 'math.factorial', 'factorial', (['n'], {}), '(n)\n', (10582, 10585), False, 'from math import factorial\n'), ((10595, 10607), 'math.factorial', 'factorial', (['k'], {}), '(k)\n', (10604, 10607), False, 'from math import factorial\n'), ((10610, 10626), 'math.factorial', 'factorial', (['(n - k)'], {}), '(n - k)\n', (10619, 10626), False, 'from math import factorial\n')] |
import sys, os, glob, random
import time
import parser
import torch
import torch.nn as nn
# from AdaAdam import AdaAdam
import torch.optim as OPT
import numpy as np
from copy import deepcopy
from tqdm import tqdm, trange
import logging
from torchtext import data
import DataProcessing
from DataProcessing.MLTField import MTLField
from DataProcessing.NlcDatasetSingleFile import NlcDatasetSingleFile
from CNNModel import CNNModel
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO )
batch_size = 10
seed = 12345678
torch.manual_seed(seed)
Train = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def load_train_test_files(listfilename, test_suffix='.test'):
filein = open(listfilename, 'r')
file_tuples = []
task_classes = ['.t2', '.t4', '.t5']
for line in filein:
array = line.strip().split('\t')
line = array[0]
for t_class in task_classes:
trainfile = line + t_class + '.train'
devfile = line + t_class + '.dev'
testfile = line + t_class + test_suffix
file_tuples.append((trainfile, devfile, testfile))
filein.close()
return file_tuples
filelist = 'data/Amazon_few_shot/workspace.filtered.list'
targetlist = 'data/Amazon_few_shot/workspace.target.list'
workingdir = 'data/Amazon_few_shot'
emfilename = 'glove.6B.300d'
emfiledir = '..'
datasets = []
list_datasets = []
file_tuples = load_train_test_files(filelist)
print(file_tuples)
TEXT = MTLField(lower=True)
for (trainfile, devfile, testfile) in file_tuples:
print(trainfile, devfile, testfile)
LABEL1 = data.Field(sequential=False)
train1, dev1, test1 = NlcDatasetSingleFile.splits(
TEXT, LABEL1, path=workingdir, train=trainfile,
validation=devfile, test=testfile)
datasets.append((TEXT, LABEL1, train1, dev1, test1))
list_datasets.append(train1)
list_datasets.append(dev1)
list_datasets.append(test1)
target_datasets = []
target_file = load_train_test_files(targetlist)
print(target_file)
for (trainfile, devfile, testfile) in target_file:
print(trainfile, devfile, testfile)
LABEL2 = data.Field(sequential=False)
train2, dev2, test2 = NlcDatasetSingleFile.splits(TEXT, LABEL2, path=workingdir,
train=trainfile,validation=devfile, test=testfile)
target_datasets.append((TEXT, LABEL2, train2, dev2, test2))
datasets_iters = []
for (TEXT, LABEL, train, dev, test) in datasets:
train_iter, dev_iter, test_iter = data.BucketIterator.splits(
(train, dev, test), batch_size=batch_size, device=device,shuffle=True)
train_iter.repeat = False
datasets_iters.append((train_iter, dev_iter, test_iter))
fsl_ds_iters = []
for (TEXT, LABEL, train, dev, test) in target_datasets:
train_iter, dev_iter, test_iter = data.BucketIterator.splits(
(train,dev, test), batch_size=batch_size, device=device)
train_iter.repeat = False
fsl_ds_iters.append((train_iter, dev_iter, test_iter))
num_batch_total = 0
for i, (TEXT, LABEL, train, dev, test) in enumerate(datasets):
# print('DATASET%d'%(i+1))
# print('train.fields', train.fields)
# print('len(train)', len(train))
# print('len(dev)', len(dev))
# print('len(test)', len(test))
# print('vars(train[0])', vars(train[0]))
num_batch_total += len(train) / batch_size
TEXT.build_vocab(list_datasets, vectors = emfilename, vectors_cache = emfiledir)
# TEXT.build_vocab(list_dataset)
# build the vocabulary
for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):
LABEL.build_vocab(train, dev, test)
LABEL.vocab.itos = LABEL.vocab.itos[1:]
for k, v in LABEL.vocab.stoi.items():
LABEL.vocab.stoi[k] = v - 1
# print vocab information
# print('len(TEXT.vocab)', len(TEXT.vocab))
# print('TEXT.vocab.vectors.size()', TEXT.vocab.vectors.size())
# print(LABEL.vocab.itos)
# print(len(LABEL.vocab.itos))
# print(len(LABEL.vocab.stoi))
fsl_num_tasks = 0
for taskid, (TEXT, LABEL, train, dev, test) in enumerate(target_datasets):
fsl_num_tasks += 1
LABEL.build_vocab(train, dev, test)
LABEL.vocab.itos = LABEL.vocab.itos[1:]
for k, v in LABEL.vocab.stoi.items():
LABEL.vocab.stoi[k] = v - 1
nums_embed = len(TEXT.vocab)
dim_embed = 100
dim_w_hid = 200
dim_h_hid = 100
Inner_lr = 2e-6
Outer_lr = 1e-5
n_labels = []
for (TEXT, LABEL, train, dev, test) in datasets:
n_labels.append(len(LABEL.vocab))
print(n_labels)
num_tasks = len(n_labels)
print("num_tasks", num_tasks)
winsize = 3
num_labels = len(LABEL.vocab.itos)
model = CNNModel(nums_embed, num_labels, dim_embed, dim_w_hid, dim_h_hid, winsize, batch_size)
print("GPU Device: ", device)
model.to(device)
print(model)
criterion = nn.CrossEntropyLoss()
opt = OPT.Adam(model.parameters(), lr=Inner_lr)
Inner_epochs = 4
epochs = 2
N_task = 5
task_list = np.arange(num_tasks)
print("Total Batch: ", num_batch_total)
output_model_file = '/tmp/CNN_MAML_output'
if Train:
for t in trange(int(num_batch_total*epochs/Inner_epochs), desc="Iterations"):
selected_task = np.random.choice(task_list, N_task,replace=False)
weight_before = deepcopy(model.state_dict())
update_vars = []
fomaml_vars = []
for task_id in selected_task:
# print(task_id)
(train_iter, dev_iter, test_iter) = datasets_iters[task_id]
train_iter.init_epoch()
model.train()
n_correct = 0
n_step = 0
for inner_iter in range(Inner_epochs):
batch = next(iter(train_iter))
# print(batch.text)
# print(batch.label)
logits = model(batch.text)
loss = criterion(logits.view(-1, num_labels), batch.label.data.view(-1))
n_correct = (torch.max(logits, 1)[1].view(batch.label.size()).data == batch.label.data).sum()
n_step = batch.batch_size
loss.backward()
opt.step()
opt.zero_grad()
task_acc = 100.*n_correct/n_step
if t%10 == 0:
logger.info("Iter: %d, task id: %d, train acc: %f", t, task_id, task_acc)
weight_after = deepcopy(model.state_dict())
update_vars.append(weight_after)
model.load_state_dict(weight_before)
new_weight_dict = {}
for name in weight_before:
weight_list = [tmp_weight_dict[name] for tmp_weight_dict in update_vars]
weight_shape = list(weight_list[0].size())
stack_shape = [len(weight_list)] + weight_shape
stack_weight = torch.empty(stack_shape)
for i in range(len(weight_list)):
stack_weight[i,:] = weight_list[i]
new_weight_dict[name] = torch.mean(stack_weight, dim=0).cuda()
new_weight_dict[name] = weight_before[name]+(new_weight_dict[name]-weight_before[name])/Inner_lr*Outer_lr
model.load_state_dict(new_weight_dict)
torch.save(model.state_dict(), output_model_file)
model.load_state_dict(torch.load(output_model_file))
logger.info("***** Running evaluation *****")
fsl_task_list = np.arange(fsl_num_tasks)
weight_before = deepcopy(model.state_dict())
fsl_epochs = 3
Total_acc = 0
opt = OPT.Adam(model.parameters(), lr=3e-4)
for task_id in fsl_task_list:
model.train()
(train_iter, dev_iter, test_iter) = fsl_ds_iters[task_id]
train_iter.init_epoch()
batch = next(iter(train_iter))
for i in range(fsl_epochs):
logits = model(batch.text)
loss = criterion(logits.view(-1, num_labels), batch.label.data.view(-1))
n_correct = (torch.max(logits, 1)[1].view(batch.label.size()).data == batch.label.data).sum()
n_size = batch.batch_size
train_acc = 100. * n_correct / n_size
loss = criterion(logits.view(-1, num_labels), batch.label.data.view(-1))
loss.backward()
opt.step()
opt.zero_grad()
logger.info(" Task id: %d, fsl epoch: %d, Acc: %f, loss: %f", task_id, i, train_acc, loss)
model.eval()
test_iter.init_epoch()
n_correct = 0
n_size = 0
for test_batch_idx, test_batch in enumerate(test_iter):
with torch.no_grad():
logits = model(test_batch.text)
loss = criterion(logits.view(-1, num_labels), test_batch.label.data.view(-1))
n_correct += (torch.max(logits, 1)[1].view(test_batch.label.size()).data == test_batch.label.data).sum()
n_size += test_batch.batch_size
test_acc = 100.* n_correct/n_size
logger.info("FSL test Number: %d, Accuracy: %f",n_size, test_acc)
Total_acc += test_acc
model.load_state_dict(weight_before)
print("Mean Accuracy is : ", float(Total_acc)/fsl_num_tasks)
| [
"logging.getLogger",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.cuda.device_count",
"torch.cuda.is_available",
"DataProcessing.NlcDatasetSingleFile.NlcDatasetSingleFile.splits",
"numpy.arange",
"torch.mean",
"torchtext.data.BucketIterator.splits",
"numpy.random.seed",
"torchtext.data.Field... | [((441, 468), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (458, 468), False, 'import logging\n'), ((470, 613), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (489, 613), False, 'import logging\n'), ((683, 706), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (700, 706), False, 'import torch\n'), ((802, 827), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (825, 827), False, 'import torch\n'), ((828, 845), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (839, 845), False, 'import sys, os, glob, random\n'), ((846, 866), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (860, 866), True, 'import numpy as np\n'), ((867, 890), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (884, 890), False, 'import torch\n'), ((1791, 1811), 'DataProcessing.MLTField.MTLField', 'MTLField', ([], {'lower': '(True)'}), '(lower=True)\n', (1799, 1811), False, 'from DataProcessing.MLTField import MTLField\n'), ((4880, 4970), 'CNNModel.CNNModel', 'CNNModel', (['nums_embed', 'num_labels', 'dim_embed', 'dim_w_hid', 'dim_h_hid', 'winsize', 'batch_size'], {}), '(nums_embed, num_labels, dim_embed, dim_w_hid, dim_h_hid, winsize,\n batch_size)\n', (4888, 4970), False, 'from CNNModel import CNNModel\n'), ((5041, 5062), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5060, 5062), True, 'import torch.nn as nn\n'), ((5164, 5184), 'numpy.arange', 'np.arange', (['num_tasks'], {}), '(num_tasks)\n', (5173, 5184), True, 'import numpy as np\n'), ((7490, 7514), 'numpy.arange', 'np.arange', (['fsl_num_tasks'], {}), '(fsl_num_tasks)\n', (7499, 7514), True, 'import numpy as np\n'), ((909, 941), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (935, 941), False, 'import torch\n'), ((1916, 1944), 'torchtext.data.Field', 'data.Field', ([], {'sequential': '(False)'}), '(sequential=False)\n', (1926, 1944), False, 'from torchtext import data\n'), ((1971, 2085), 'DataProcessing.NlcDatasetSingleFile.NlcDatasetSingleFile.splits', 'NlcDatasetSingleFile.splits', (['TEXT', 'LABEL1'], {'path': 'workingdir', 'train': 'trainfile', 'validation': 'devfile', 'test': 'testfile'}), '(TEXT, LABEL1, path=workingdir, train=trainfile,\n validation=devfile, test=testfile)\n', (1998, 2085), False, 'from DataProcessing.NlcDatasetSingleFile import NlcDatasetSingleFile\n'), ((2446, 2474), 'torchtext.data.Field', 'data.Field', ([], {'sequential': '(False)'}), '(sequential=False)\n', (2456, 2474), False, 'from torchtext import data\n'), ((2501, 2615), 'DataProcessing.NlcDatasetSingleFile.NlcDatasetSingleFile.splits', 'NlcDatasetSingleFile.splits', (['TEXT', 'LABEL2'], {'path': 'workingdir', 'train': 'trainfile', 'validation': 'devfile', 'test': 'testfile'}), '(TEXT, LABEL2, path=workingdir, train=trainfile,\n validation=devfile, test=testfile)\n', (2528, 2615), False, 'from DataProcessing.NlcDatasetSingleFile import NlcDatasetSingleFile\n'), ((2794, 2896), 'torchtext.data.BucketIterator.splits', 'data.BucketIterator.splits', (['(train, dev, test)'], {'batch_size': 'batch_size', 'device': 'device', 'shuffle': '(True)'}), '((train, dev, test), batch_size=batch_size,\n device=device, shuffle=True)\n', (2820, 2896), False, 'from torchtext import data\n'), ((3105, 3193), 'torchtext.data.BucketIterator.splits', 'data.BucketIterator.splits', (['(train, dev, test)'], {'batch_size': 'batch_size', 'device': 'device'}), '((train, dev, test), batch_size=batch_size,\n device=device)\n', (3131, 3193), False, 'from torchtext import data\n'), ((7397, 7426), 'torch.load', 'torch.load', (['output_model_file'], {}), '(output_model_file)\n', (7407, 7426), False, 'import torch\n'), ((755, 780), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (778, 780), False, 'import torch\n'), ((5384, 5434), 'numpy.random.choice', 'np.random.choice', (['task_list', 'N_task'], {'replace': '(False)'}), '(task_list, N_task, replace=False)\n', (5400, 5434), True, 'import numpy as np\n'), ((6955, 6979), 'torch.empty', 'torch.empty', (['stack_shape'], {}), '(stack_shape)\n', (6966, 6979), False, 'import torch\n'), ((8536, 8551), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8549, 8551), False, 'import torch\n'), ((7114, 7145), 'torch.mean', 'torch.mean', (['stack_weight'], {'dim': '(0)'}), '(stack_weight, dim=0)\n', (7124, 7145), False, 'import torch\n'), ((7976, 7996), 'torch.max', 'torch.max', (['logits', '(1)'], {}), '(logits, 1)\n', (7985, 7996), False, 'import torch\n'), ((8705, 8725), 'torch.max', 'torch.max', (['logits', '(1)'], {}), '(logits, 1)\n', (8714, 8725), False, 'import torch\n'), ((6138, 6158), 'torch.max', 'torch.max', (['logits', '(1)'], {}), '(logits, 1)\n', (6147, 6158), False, 'import torch\n')] |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyscf.nao.m_xjl import xjl
#
#
#
class sbt_c():
'''
Spherical Bessel Transform by <NAME>. Functions are given on logarithmic mesh
See m_log_mesh
Args:
nr : integer, number of points on radial mesh
rr : array of points in coordinate space
kk : array of points in momentum space
lmax : integer, maximal angular momentum necessary
with_sqrt_pi_2 : if one, then transforms will be multiplied by sqrt(pi/2)
fft_flags : ??
Returns:
a class preinitialized to perform the spherical Bessel Transform
Examples:
label = 'siesta'
sv = system_vars_c(label)
sbt = sbt_c(sv.ao_log.rr, sv.ao_log.pp)
print(sbt.exe(sv.ao_log.psi_log[0,0,:], 0))
'''
def __init__(self, rr, kk, lmax=12, with_sqrt_pi_2=True, fft_flags=None):
assert(type(rr)==np.ndarray)
assert(rr[0]>0.0)
assert(type(kk)==np.ndarray)
assert(kk[0]>0.0)
self.nr = len(rr)
n = self.nr
assert(self.nr>1)
assert(lmax>-1)
self.rr,self.kk = rr,kk
nr2, self.rr3, self.kk3 = self.nr*2, rr**3, kk**3
self.rmin,self.kmin = rr[0],kk[0]
self.rhomin,self.kapmin= np.log(self.rmin),np.log(self.kmin)
self.dr_jt = np.log(rr[1]/rr[0])
dr = self.dr_jt
dt = 2.0*np.pi/(nr2*dr)
self._smallr = self.rmin*np.array([np.exp(-dr*(n-i)) for i in range(n)], dtype='float64')
self._premult = np.array([np.exp(1.5*dr*(i-n)) for i in range(2*n)], dtype='float64')
coeff = 1.0/np.sqrt(np.pi/2.0) if with_sqrt_pi_2 else 1.0
self._postdiv = np.array([coeff*np.exp(-1.5*dr*i) for i in range(n)], dtype='float64')
temp1 = np.zeros((nr2), dtype='complex128')
temp2 = np.zeros((nr2), dtype='complex128')
temp1[0] = 1.0
temp2 = np.fft.fft(temp1)
xx = sum(np.real(temp2))
if abs(nr2-xx)>1e-10 : raise SystemError('err: sbt_plan: problem with fftw sum(temp2):')
self._mult_table1 = np.zeros((lmax+1, self.nr), dtype='complex128')
for it in range(n):
tt = it*dt # Define a t value
phi3 = (self.kapmin+self.rhomin)*tt # See Eq. (33)
rad,phi = np.sqrt(10.5**2+tt**2),np.arctan((2.0*tt)/21.0)
phi1 = -10.0*phi-np.log(rad)*tt+tt+np.sin(phi)/(12.0*rad) \
-np.sin(3.0*phi)/(360.0*rad**3)+np.sin(5.0*phi)/(1260.0*rad**5) \
-np.sin(7.0*phi)/(1680.0*rad**7)
for ix in range(1,11): phi1=phi1+np.arctan((2.0*tt)/(2.0*ix-1)) # see Eqs. (27) and (28)
phi2 = -np.arctan(1.0) if tt>200.0 else -np.arctan(np.sinh(np.pi*tt/2)/np.cosh(np.pi*tt/2)) # see Eq. (20)
phi = phi1+phi2+phi3
self._mult_table1[0,it] = np.sqrt(np.pi/2)*np.exp(1j*phi)/n # Eq. (18)
if it==0 : self._mult_table1[0,it] = 0.5*self._mult_table1[0,it]
phi = -phi2 - np.arctan(2.0*tt)
if lmax>0 : self._mult_table1[1,it] = np.exp(2.0*1j*phi)*self._mult_table1[0,it] # See Eq. (21)
# Apply Eq. (24)
for lk in range(1,lmax):
phi = -np.arctan(2*tt/(2*lk+1))
self._mult_table1[lk+1,it] = np.exp(2.0*1j*phi)*self._mult_table1[lk-1,it]
# END of it in range(n):
# make the initialization for the calculation at small k values for 2N mesh values
self._mult_table2 = np.zeros((lmax+1, self.nr+1), dtype='complex128')
j_ltable = np.zeros((lmax+1,nr2), dtype='float64')
for i in range(nr2): j_ltable[0:lmax+1,i] = xjl( np.exp(self.rhomin+self.kapmin+i*dr), lmax )
for ll in range(lmax+1):
self._mult_table2[ll,:] = np.fft.rfft(j_ltable[ll,:]) /nr2
if with_sqrt_pi_2 : self._mult_table2 = self._mult_table2/np.sqrt(np.pi/2)
#
# The calculation of the Sperical Bessel Transform for a given data...
#
def sbt(self, ff, am, direction=1, npow=0) :
"""
Args:
ff : numpy array containing radial orbital (values of radial orbital on logarithmic grid) to be transformed. The data must be on self.rr grid or self.kk grid provided during initialization.
am : angular momentum of the radial orbital ff[:]
direction : 1 -- real-space --> momentum space transform; -1 -- momentum space --> real-space transform.
npow : additional power for the shape of the orbital
f(xyz) = rr[i]**npow * ff[i] * Y_lm( xyz )
Result:
gg : numpy array containing the result of the Spherical Bessel Transform
gg(k) = int_0^infty ff(r) j_{am}(k*r) r**2 dr ( direction == 1 )
gg(r) = int_0^infty ff(k) j_{am}(k*r) k**2 dk ( direction == -1 )
"""
assert(type(ff)==np.ndarray)
assert(len(ff)==self.nr)
assert(am > -1)
assert(am < self._mult_table1.shape[0])
if direction==1 :
rmin, kmin, ptr_rr3 = self.rmin, self.kmin, self.rr3
dr = np.log(self.rr[1]/self.rr[0])
C = ff[0]/self.rr[0]**(npow+am)
elif direction==-1 :
rmin, kmin, ptr_rr3 = self.kmin, self.rmin, self.kk3
dr = np.log(self.kk[1]/self.kk[0])
C = ff[0]/self.kk[0]**(npow+am)
else:
raise SystemError('!direction=+/-1')
gg = np.zeros((self.nr), dtype='float64') # Allocate the result
# make the calculation for LARGE k values extend the input to the doubled mesh, extrapolating the input as C r**(np+li)
nr2 = self.nr*2
r2c_in = np.zeros((nr2), dtype='float64')
r2c_in[0:self.nr] = C*self._premult[0:self.nr]*self._smallr[0:self.nr]**(npow+am)
r2c_in[self.nr:nr2] = self._premult[self.nr:nr2]*ff[0:self.nr]
r2c_out = np.fft.rfft(r2c_in)
temp1 = np.zeros((nr2), dtype='complex128')
temp1[0:self.nr] = np.conj(r2c_out[0:self.nr])*self._mult_table1[am,0:self.nr]
temp2 = np.fft.ifft(temp1)*nr2
gg[0:self.nr] = (rmin/kmin)**1.5 * (temp2[self.nr:nr2]).real * self._postdiv[0:self.nr]
# obtain the SMALL k results in the array c2r_out
r2c_in[0:self.nr] = ptr_rr3[0:self.nr] * ff[0:self.nr]
r2c_in[self.nr:nr2] = 0.0
r2c_out = np.fft.rfft(r2c_in)
c2r_in = np.conj(r2c_out[0:self.nr+1]) * self._mult_table2[am,0:self.nr+1]
c2r_out = np.fft.irfft(c2r_in)*dr*nr2
r2c_in[0:self.nr] = abs(gg[0:self.nr]-c2r_out[0:self.nr])
kdiv = np.argmin(r2c_in[0:self.nr])
gg[0:kdiv] = c2r_out[0:kdiv]
return gg
| [
"numpy.sqrt",
"numpy.conj",
"numpy.sin",
"numpy.log",
"numpy.fft.fft",
"numpy.fft.irfft",
"numpy.sinh",
"numpy.fft.rfft",
"numpy.real",
"numpy.zeros",
"numpy.exp",
"numpy.cosh",
"numpy.argmin",
"numpy.fft.ifft",
"numpy.arctan"
] | [((1799, 1820), 'numpy.log', 'np.log', (['(rr[1] / rr[0])'], {}), '(rr[1] / rr[0])\n', (1805, 1820), True, 'import numpy as np\n'), ((2226, 2259), 'numpy.zeros', 'np.zeros', (['nr2'], {'dtype': '"""complex128"""'}), "(nr2, dtype='complex128')\n", (2234, 2259), True, 'import numpy as np\n'), ((2274, 2307), 'numpy.zeros', 'np.zeros', (['nr2'], {'dtype': '"""complex128"""'}), "(nr2, dtype='complex128')\n", (2282, 2307), True, 'import numpy as np\n'), ((2341, 2358), 'numpy.fft.fft', 'np.fft.fft', (['temp1'], {}), '(temp1)\n', (2351, 2358), True, 'import numpy as np\n'), ((2507, 2556), 'numpy.zeros', 'np.zeros', (['(lmax + 1, self.nr)'], {'dtype': '"""complex128"""'}), "((lmax + 1, self.nr), dtype='complex128')\n", (2515, 2556), True, 'import numpy as np\n'), ((3803, 3856), 'numpy.zeros', 'np.zeros', (['(lmax + 1, self.nr + 1)'], {'dtype': '"""complex128"""'}), "((lmax + 1, self.nr + 1), dtype='complex128')\n", (3811, 3856), True, 'import numpy as np\n'), ((3868, 3910), 'numpy.zeros', 'np.zeros', (['(lmax + 1, nr2)'], {'dtype': '"""float64"""'}), "((lmax + 1, nr2), dtype='float64')\n", (3876, 3910), True, 'import numpy as np\n'), ((5550, 5584), 'numpy.zeros', 'np.zeros', (['self.nr'], {'dtype': '"""float64"""'}), "(self.nr, dtype='float64')\n", (5558, 5584), True, 'import numpy as np\n'), ((5775, 5805), 'numpy.zeros', 'np.zeros', (['nr2'], {'dtype': '"""float64"""'}), "(nr2, dtype='float64')\n", (5783, 5805), True, 'import numpy as np\n'), ((5975, 5994), 'numpy.fft.rfft', 'np.fft.rfft', (['r2c_in'], {}), '(r2c_in)\n', (5986, 5994), True, 'import numpy as np\n'), ((6012, 6045), 'numpy.zeros', 'np.zeros', (['nr2'], {'dtype': '"""complex128"""'}), "(nr2, dtype='complex128')\n", (6020, 6045), True, 'import numpy as np\n'), ((6424, 6443), 'numpy.fft.rfft', 'np.fft.rfft', (['r2c_in'], {}), '(r2c_in)\n', (6435, 6443), True, 'import numpy as np\n'), ((6640, 6668), 'numpy.argmin', 'np.argmin', (['r2c_in[0:self.nr]'], {}), '(r2c_in[0:self.nr])\n', (6649, 6668), True, 'import numpy as np\n'), ((1745, 1762), 'numpy.log', 'np.log', (['self.rmin'], {}), '(self.rmin)\n', (1751, 1762), True, 'import numpy as np\n'), ((1763, 1780), 'numpy.log', 'np.log', (['self.kmin'], {}), '(self.kmin)\n', (1769, 1780), True, 'import numpy as np\n'), ((2372, 2386), 'numpy.real', 'np.real', (['temp2'], {}), '(temp2)\n', (2379, 2386), True, 'import numpy as np\n'), ((5256, 5287), 'numpy.log', 'np.log', (['(self.rr[1] / self.rr[0])'], {}), '(self.rr[1] / self.rr[0])\n', (5262, 5287), True, 'import numpy as np\n'), ((6071, 6098), 'numpy.conj', 'np.conj', (['r2c_out[0:self.nr]'], {}), '(r2c_out[0:self.nr])\n', (6078, 6098), True, 'import numpy as np\n'), ((6143, 6161), 'numpy.fft.ifft', 'np.fft.ifft', (['temp1'], {}), '(temp1)\n', (6154, 6161), True, 'import numpy as np\n'), ((6458, 6489), 'numpy.conj', 'np.conj', (['r2c_out[0:self.nr + 1]'], {}), '(r2c_out[0:self.nr + 1])\n', (6465, 6489), True, 'import numpy as np\n'), ((1996, 2022), 'numpy.exp', 'np.exp', (['(1.5 * dr * (i - n))'], {}), '(1.5 * dr * (i - n))\n', (2002, 2022), True, 'import numpy as np\n'), ((2073, 2093), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 2.0)'], {}), '(np.pi / 2.0)\n', (2080, 2093), True, 'import numpy as np\n'), ((2715, 2743), 'numpy.sqrt', 'np.sqrt', (['(10.5 ** 2 + tt ** 2)'], {}), '(10.5 ** 2 + tt ** 2)\n', (2722, 2743), True, 'import numpy as np\n'), ((2738, 2764), 'numpy.arctan', 'np.arctan', (['(2.0 * tt / 21.0)'], {}), '(2.0 * tt / 21.0)\n', (2747, 2764), True, 'import numpy as np\n'), ((3361, 3380), 'numpy.arctan', 'np.arctan', (['(2.0 * tt)'], {}), '(2.0 * tt)\n', (3370, 3380), True, 'import numpy as np\n'), ((3962, 4004), 'numpy.exp', 'np.exp', (['(self.rhomin + self.kapmin + i * dr)'], {}), '(self.rhomin + self.kapmin + i * dr)\n', (3968, 4004), True, 'import numpy as np\n'), ((4069, 4097), 'numpy.fft.rfft', 'np.fft.rfft', (['j_ltable[ll, :]'], {}), '(j_ltable[ll, :])\n', (4080, 4097), True, 'import numpy as np\n'), ((4164, 4182), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (4171, 4182), True, 'import numpy as np\n'), ((5419, 5450), 'numpy.log', 'np.log', (['(self.kk[1] / self.kk[0])'], {}), '(self.kk[1] / self.kk[0])\n', (5425, 5450), True, 'import numpy as np\n'), ((6538, 6558), 'numpy.fft.irfft', 'np.fft.irfft', (['c2r_in'], {}), '(c2r_in)\n', (6550, 6558), True, 'import numpy as np\n'), ((1911, 1932), 'numpy.exp', 'np.exp', (['(-dr * (n - i))'], {}), '(-dr * (n - i))\n', (1917, 1932), True, 'import numpy as np\n'), ((2156, 2177), 'numpy.exp', 'np.exp', (['(-1.5 * dr * i)'], {}), '(-1.5 * dr * i)\n', (2162, 2177), True, 'import numpy as np\n'), ((2912, 2929), 'numpy.sin', 'np.sin', (['(7.0 * phi)'], {}), '(7.0 * phi)\n', (2918, 2929), True, 'import numpy as np\n'), ((2992, 3028), 'numpy.arctan', 'np.arctan', (['(2.0 * tt / (2.0 * ix - 1))'], {}), '(2.0 * tt / (2.0 * ix - 1))\n', (3001, 3028), True, 'import numpy as np\n'), ((3064, 3078), 'numpy.arctan', 'np.arctan', (['(1.0)'], {}), '(1.0)\n', (3073, 3078), True, 'import numpy as np\n'), ((3224, 3242), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (3231, 3242), True, 'import numpy as np\n'), ((3241, 3259), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (3247, 3259), True, 'import numpy as np\n'), ((3423, 3447), 'numpy.exp', 'np.exp', (['(2.0 * 1.0j * phi)'], {}), '(2.0 * 1.0j * phi)\n', (3429, 3447), True, 'import numpy as np\n'), ((3554, 3586), 'numpy.arctan', 'np.arctan', (['(2 * tt / (2 * lk + 1))'], {}), '(2 * tt / (2 * lk + 1))\n', (3563, 3586), True, 'import numpy as np\n'), ((3616, 3640), 'numpy.exp', 'np.exp', (['(2.0 * 1.0j * phi)'], {}), '(2.0 * 1.0j * phi)\n', (3622, 3640), True, 'import numpy as np\n'), ((2869, 2886), 'numpy.sin', 'np.sin', (['(5.0 * phi)'], {}), '(5.0 * phi)\n', (2875, 2886), True, 'import numpy as np\n'), ((2838, 2855), 'numpy.sin', 'np.sin', (['(3.0 * phi)'], {}), '(3.0 * phi)\n', (2844, 2855), True, 'import numpy as np\n'), ((3107, 3130), 'numpy.sinh', 'np.sinh', (['(np.pi * tt / 2)'], {}), '(np.pi * tt / 2)\n', (3114, 3130), True, 'import numpy as np\n'), ((3127, 3150), 'numpy.cosh', 'np.cosh', (['(np.pi * tt / 2)'], {}), '(np.pi * tt / 2)\n', (3134, 3150), True, 'import numpy as np\n'), ((2804, 2815), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2810, 2815), True, 'import numpy as np\n'), ((2786, 2797), 'numpy.log', 'np.log', (['rad'], {}), '(rad)\n', (2792, 2797), True, 'import numpy as np\n')] |
"""A generalization of the classification accuracy with cross-class scores.
Soften the accuracy score by giving scores through certain misclassifications
defined by the score matrix. For example, in ordinal regression we may want
not to penalize too much misclassifications to neighbor classes. The score also
generalizes RMSE-like regression scores for ordinal regression (when true and
predicted output levels are coming from a fixed set) by allowing to define
arbitrary misclassification scores.
"""
from __future__ import division
import numpy as np
from .base import BaseScoreType
class SoftAccuracy(BaseScoreType):
is_lower_the_better = False
minimum = 0.0
def __init__(self, score_matrix, name='soft precision', precision=2):
self.name = name
self.precision = precision
self.maximum = np.max(score_matrix)
self.score_matrix = score_matrix
def __call__(self, y_true_proba, y_proba):
# Clip negative probas
y_proba_positive = np.clip(y_proba, 0, 1)
# normalize rows
y_proba_normalized = y_proba_positive / np.sum(
y_proba_positive, axis=1, keepdims=True)
# Smooth true probabilities with score_matrix
y_true_smoothed = y_true_proba.dot(self.score_matrix)
# Compute dot product between the predicted probabilities and
# the smoothed true "probabilites" ("" because it does not sum to 1)
scores = np.sum(y_proba_normalized * y_true_smoothed, axis=1)
scores = np.nan_to_num(scores)
score = np.mean(scores)
# to pick up all zero probabilities
score = np.nan_to_num(score)
return score
| [
"numpy.clip",
"numpy.mean",
"numpy.max",
"numpy.sum",
"numpy.nan_to_num"
] | [((834, 854), 'numpy.max', 'np.max', (['score_matrix'], {}), '(score_matrix)\n', (840, 854), True, 'import numpy as np\n'), ((1002, 1024), 'numpy.clip', 'np.clip', (['y_proba', '(0)', '(1)'], {}), '(y_proba, 0, 1)\n', (1009, 1024), True, 'import numpy as np\n'), ((1439, 1491), 'numpy.sum', 'np.sum', (['(y_proba_normalized * y_true_smoothed)'], {'axis': '(1)'}), '(y_proba_normalized * y_true_smoothed, axis=1)\n', (1445, 1491), True, 'import numpy as np\n'), ((1509, 1530), 'numpy.nan_to_num', 'np.nan_to_num', (['scores'], {}), '(scores)\n', (1522, 1530), True, 'import numpy as np\n'), ((1547, 1562), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1554, 1562), True, 'import numpy as np\n'), ((1623, 1643), 'numpy.nan_to_num', 'np.nan_to_num', (['score'], {}), '(score)\n', (1636, 1643), True, 'import numpy as np\n'), ((1098, 1145), 'numpy.sum', 'np.sum', (['y_proba_positive'], {'axis': '(1)', 'keepdims': '(True)'}), '(y_proba_positive, axis=1, keepdims=True)\n', (1104, 1145), True, 'import numpy as np\n')] |
from multiprocessing import Pool
import time
import numpy as np
import torch
import gym
from lagom import Logger
from lagom import EpisodeRunner
from lagom.transform import describe
from lagom.utils import CloudpickleWrapper
from lagom.utils import pickle_dump
from lagom.utils import tensorify
from lagom.utils import set_global_seeds
from lagom.experiment import Config
from lagom.experiment import Grid
from lagom.experiment import run_experiment
from lagom.envs import TimeStepEnv
from lagom import CMAES
from baselines.cmaes.agent import Agent
config = Config(
{'log.freq': 10,
'checkpoint.num': 3,
'env.id': Grid(['Acrobot-v1', 'BipedalWalker-v2', 'Pendulum-v0', 'LunarLanderContinuous-v2']),
'nn.sizes': [64, 64],
# only for continuous control
'env.clip_action': True, # clip action within valid bound before step()
'agent.std0': 0.6, # initial std
'train.generations': 500, # total number of ES generations
'train.popsize': 32,
'train.worker_chunksize': 4, # must be divisible by popsize
'train.mu0': 0.0,
'train.std0': 1.0,
})
def make_env(config, seed, mode):
assert mode in ['train', 'eval']
env = gym.make(config['env.id'])
env.seed(seed)
env.observation_space.seed(seed)
env.action_space.seed(seed)
if config['env.clip_action'] and isinstance(env.action_space, gym.spaces.Box):
env = gym.wrappers.ClipAction(env) # TODO: use tanh to squash policy output when RescaleAction wrapper merged in gym
env = TimeStepEnv(env)
return env
def fitness(data):
torch.set_num_threads(1) # VERY IMPORTANT TO AVOID GETTING STUCK
config, seed, device, param = data
env = make_env(config, seed, 'train')
agent = Agent(config, env, device)
agent.from_vec(tensorify(param, 'cpu'))
runner = EpisodeRunner()
with torch.no_grad():
D = runner(agent, env, 10)
R = np.mean([sum(traj.rewards) for traj in D])
H = np.mean([traj.T for traj in D])
return R, H
def run(config, seed, device, logdir):
set_global_seeds(seed)
torch.set_num_threads(1) # VERY IMPORTANT TO AVOID GETTING STUCK
print('Initializing...')
agent = Agent(config, make_env(config, seed, 'eval'), device)
es = CMAES([config['train.mu0']]*agent.num_params, config['train.std0'],
{'popsize': config['train.popsize'],
'seed': seed})
train_logs = []
checkpoint_count = 0
with Pool(processes=config['train.popsize']//config['train.worker_chunksize']) as pool:
print('Finish initialization. Training starts...')
for generation in range(config['train.generations']):
t0 = time.perf_counter()
solutions = es.ask()
data = [(config, seed, device, solution) for solution in solutions]
out = pool.map(CloudpickleWrapper(fitness), data, chunksize=config['train.worker_chunksize'])
Rs, Hs = zip(*out)
es.tell(solutions, [-R for R in Rs])
logger = Logger()
logger('generation', generation+1)
logger('num_seconds', round(time.perf_counter() - t0, 1))
logger('Returns', describe(Rs, axis=-1, repr_indent=1, repr_prefix='\n'))
logger('Horizons', describe(Hs, axis=-1, repr_indent=1, repr_prefix='\n'))
logger('fbest', es.result.fbest)
train_logs.append(logger.logs)
if generation == 0 or (generation+1) % config['log.freq'] == 0:
logger.dump(keys=None, index=0, indent=0, border='-'*50)
if (generation+1) >= int(config['train.generations']*(checkpoint_count/(config['checkpoint.num'] - 1))):
agent.from_vec(tensorify(es.result.xbest, 'cpu'))
agent.checkpoint(logdir, generation+1)
checkpoint_count += 1
pickle_dump(obj=train_logs, f=logdir/'train_logs', ext='.pkl')
return None
if __name__ == '__main__':
run_experiment(run=run,
config=config,
seeds=[1770966829, 1500925526, 2054191100],
log_dir='logs/default',
max_workers=12, # tune to fulfill computation power
chunksize=1,
use_gpu=False,
gpu_ids=None)
| [
"lagom.envs.TimeStepEnv",
"gym.wrappers.ClipAction",
"lagom.transform.describe",
"lagom.experiment.run_experiment",
"gym.make",
"baselines.cmaes.agent.Agent",
"lagom.EpisodeRunner",
"numpy.mean",
"lagom.CMAES",
"lagom.utils.set_global_seeds",
"time.perf_counter",
"torch.set_num_threads",
"la... | [((1216, 1242), 'gym.make', 'gym.make', (["config['env.id']"], {}), "(config['env.id'])\n", (1224, 1242), False, 'import gym\n'), ((1550, 1566), 'lagom.envs.TimeStepEnv', 'TimeStepEnv', (['env'], {}), '(env)\n', (1561, 1566), False, 'from lagom.envs import TimeStepEnv\n'), ((1607, 1631), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (1628, 1631), False, 'import torch\n'), ((1766, 1792), 'baselines.cmaes.agent.Agent', 'Agent', (['config', 'env', 'device'], {}), '(config, env, device)\n', (1771, 1792), False, 'from baselines.cmaes.agent import Agent\n'), ((1850, 1865), 'lagom.EpisodeRunner', 'EpisodeRunner', ([], {}), '()\n', (1863, 1865), False, 'from lagom import EpisodeRunner\n'), ((1986, 2017), 'numpy.mean', 'np.mean', (['[traj.T for traj in D]'], {}), '([traj.T for traj in D])\n', (1993, 2017), True, 'import numpy as np\n'), ((2079, 2101), 'lagom.utils.set_global_seeds', 'set_global_seeds', (['seed'], {}), '(seed)\n', (2095, 2101), False, 'from lagom.utils import set_global_seeds\n'), ((2106, 2130), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (2127, 2130), False, 'import torch\n'), ((2281, 2407), 'lagom.CMAES', 'CMAES', (["([config['train.mu0']] * agent.num_params)", "config['train.std0']", "{'popsize': config['train.popsize'], 'seed': seed}"], {}), "([config['train.mu0']] * agent.num_params, config['train.std0'], {\n 'popsize': config['train.popsize'], 'seed': seed})\n", (2286, 2407), False, 'from lagom import CMAES\n'), ((3865, 3929), 'lagom.utils.pickle_dump', 'pickle_dump', ([], {'obj': 'train_logs', 'f': "(logdir / 'train_logs')", 'ext': '""".pkl"""'}), "(obj=train_logs, f=logdir / 'train_logs', ext='.pkl')\n", (3876, 3929), False, 'from lagom.utils import pickle_dump\n'), ((3977, 4150), 'lagom.experiment.run_experiment', 'run_experiment', ([], {'run': 'run', 'config': 'config', 'seeds': '[1770966829, 1500925526, 2054191100]', 'log_dir': '"""logs/default"""', 'max_workers': '(12)', 'chunksize': '(1)', 'use_gpu': '(False)', 'gpu_ids': 'None'}), "(run=run, config=config, seeds=[1770966829, 1500925526, \n 2054191100], log_dir='logs/default', max_workers=12, chunksize=1,\n use_gpu=False, gpu_ids=None)\n", (3991, 4150), False, 'from lagom.experiment import run_experiment\n'), ((639, 726), 'lagom.experiment.Grid', 'Grid', (["['Acrobot-v1', 'BipedalWalker-v2', 'Pendulum-v0', 'LunarLanderContinuous-v2']"], {}), "(['Acrobot-v1', 'BipedalWalker-v2', 'Pendulum-v0',\n 'LunarLanderContinuous-v2'])\n", (643, 726), False, 'from lagom.experiment import Grid\n'), ((1428, 1456), 'gym.wrappers.ClipAction', 'gym.wrappers.ClipAction', (['env'], {}), '(env)\n', (1451, 1456), False, 'import gym\n'), ((1812, 1835), 'lagom.utils.tensorify', 'tensorify', (['param', '"""cpu"""'], {}), "(param, 'cpu')\n", (1821, 1835), False, 'from lagom.utils import tensorify\n'), ((1875, 1890), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1888, 1890), False, 'import torch\n'), ((2488, 2563), 'multiprocessing.Pool', 'Pool', ([], {'processes': "(config['train.popsize'] // config['train.worker_chunksize'])"}), "(processes=config['train.popsize'] // config['train.worker_chunksize'])\n", (2492, 2563), False, 'from multiprocessing import Pool\n'), ((2709, 2728), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2726, 2728), False, 'import time\n'), ((3049, 3057), 'lagom.Logger', 'Logger', ([], {}), '()\n', (3055, 3057), False, 'from lagom import Logger\n'), ((2869, 2896), 'lagom.utils.CloudpickleWrapper', 'CloudpickleWrapper', (['fitness'], {}), '(fitness)\n', (2887, 2896), False, 'from lagom.utils import CloudpickleWrapper\n'), ((3205, 3259), 'lagom.transform.describe', 'describe', (['Rs'], {'axis': '(-1)', 'repr_indent': '(1)', 'repr_prefix': '"""\n"""'}), "(Rs, axis=-1, repr_indent=1, repr_prefix='\\n')\n", (3213, 3259), False, 'from lagom.transform import describe\n'), ((3292, 3346), 'lagom.transform.describe', 'describe', (['Hs'], {'axis': '(-1)', 'repr_indent': '(1)', 'repr_prefix': '"""\n"""'}), "(Hs, axis=-1, repr_indent=1, repr_prefix='\\n')\n", (3300, 3346), False, 'from lagom.transform import describe\n'), ((3733, 3766), 'lagom.utils.tensorify', 'tensorify', (['es.result.xbest', '"""cpu"""'], {}), "(es.result.xbest, 'cpu')\n", (3742, 3766), False, 'from lagom.utils import tensorify\n'), ((3145, 3164), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3162, 3164), False, 'import time\n')] |
import numpy as np
def _merge_intervals_inplace(merge_target, merger, sum_weighted_y, sum_weighted_y_sq, sum_weights, level_set):
sum_weighted_y[merge_target] += sum_weighted_y[merger]
sum_weighted_y_sq[merge_target] += sum_weighted_y_sq[merger]
sum_weights[merge_target] += sum_weights[merger]
# Update the level set
level_set[merge_target] = sum_weighted_y[merge_target] / sum_weights[merge_target]
def prefix_isotonic_regression(y, weights=None, non_negativity=False):
if weights is None:
weights = np.ones_like(y)
sumwy = weights * y
sumwy2 = weights * y * y
sumw = weights.copy()
level_set = np.zeros_like(y)
index_range = np.zeros_like(y, dtype=np.int32)
error = np.zeros(y.shape[0] + 1) # +1 since error[0] is error of empty set
level_set[0] = y[0]
index_range[0] = 0
num_samples = y.shape[0]
if non_negativity:
cumsumwy2 = np.cumsum(sumwy2)
threshold = np.zeros(level_set.shape)
if level_set[0] < 0:
threshold[0] = True
error[1] = cumsumwy2[0]
for i in range(1, num_samples):
level_set[i] = y[i]
index_range[i] = i
while level_set[i] <= level_set[index_range[i]-1] and index_range[i] != 0:
_merge_intervals_inplace(i, index_range[i]-1, sumwy, sumwy2, sumw, level_set)
index_range[i] = index_range[index_range[i] - 1]
levelerror = sumwy2[i] - (sumwy[i]**2 / sumw[i])
if non_negativity and level_set[i] < 0:
threshold[i] = True
error[i + 1] = cumsumwy2[i]
else:
error[i + 1] = levelerror + error[index_range[i]]
if non_negativity:
for i in range(len(level_set)):
if threshold[i]:
level_set[i] = 0
error = np.zeros(y.shape[0] + 1) # +1 since error[0] is error of empty set
for i in range(1, y.shape[0]+1):
yhat = compute_isotonic_from_index(i, level_set, index_range)
error[i] = np.sum((yhat - y[:i])**2)
return (level_set, index_range), error
def compute_isotonic_from_index(end_index, level_set, index_range):
if end_index is None:
idx = level_set.shape[0] - 1
else:
idx = end_index - 1
y_iso = np.empty_like(level_set[:idx+1]) * np.nan
while idx >= 0:
y_iso[index_range[idx]:idx+1] = level_set[idx]
idx = index_range[idx] - 1
assert not np.any(np.isnan(y_iso))
return y_iso
def _get_best_unimodality_index(error_left, error_right):
best_error = error_right[-1]
best_idx = 0
for i in range(error_left.shape[0]):
error = error_left[i] + error_right[len(error_left) - i - 1]
if error < best_error:
best_error = error
best_idx = i
return best_idx, best_error
def _unimodal_regression(y, non_negativity):
iso_left, error_left = prefix_isotonic_regression(y, non_negativity=non_negativity)
iso_right, error_right = prefix_isotonic_regression(y[::-1], non_negativity=non_negativity)
num_samples = y.shape[0]
best_idx, error = _get_best_unimodality_index(error_left, error_right)
y_iso_left = compute_isotonic_from_index(best_idx, iso_left[0], iso_left[1])
y_iso_right = compute_isotonic_from_index(num_samples-best_idx, iso_right[0], iso_right[1])
return np.concatenate([y_iso_left, y_iso_right[::-1]]), error
def unimodal_regression(y, non_negativity=False):
y = np.asarray(y)
if y.ndim == 1:
return _unimodal_regression(y, non_negativity=non_negativity)[0]
elif y.ndim == 2:
return np.stack([_unimodal_regression(y[:, r], non_negativity=non_negativity)[0] for r in range(y.shape[1])], axis=1)
else:
raise ValueError(f"y must be a vector or matrix, has {y.ndim} dimensions.")
| [
"numpy.ones_like",
"numpy.asarray",
"numpy.sum",
"numpy.zeros",
"numpy.empty_like",
"numpy.isnan",
"numpy.concatenate",
"numpy.cumsum",
"numpy.zeros_like"
] | [((652, 668), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (665, 668), True, 'import numpy as np\n'), ((687, 719), 'numpy.zeros_like', 'np.zeros_like', (['y'], {'dtype': 'np.int32'}), '(y, dtype=np.int32)\n', (700, 719), True, 'import numpy as np\n'), ((732, 756), 'numpy.zeros', 'np.zeros', (['(y.shape[0] + 1)'], {}), '(y.shape[0] + 1)\n', (740, 756), True, 'import numpy as np\n'), ((1806, 1830), 'numpy.zeros', 'np.zeros', (['(y.shape[0] + 1)'], {}), '(y.shape[0] + 1)\n', (1814, 1830), True, 'import numpy as np\n'), ((3444, 3457), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (3454, 3457), True, 'import numpy as np\n'), ((539, 554), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (551, 554), True, 'import numpy as np\n'), ((921, 938), 'numpy.cumsum', 'np.cumsum', (['sumwy2'], {}), '(sumwy2)\n', (930, 938), True, 'import numpy as np\n'), ((959, 984), 'numpy.zeros', 'np.zeros', (['level_set.shape'], {}), '(level_set.shape)\n', (967, 984), True, 'import numpy as np\n'), ((2000, 2027), 'numpy.sum', 'np.sum', (['((yhat - y[:i]) ** 2)'], {}), '((yhat - y[:i]) ** 2)\n', (2006, 2027), True, 'import numpy as np\n'), ((2254, 2288), 'numpy.empty_like', 'np.empty_like', (['level_set[:idx + 1]'], {}), '(level_set[:idx + 1])\n', (2267, 2288), True, 'import numpy as np\n'), ((3329, 3376), 'numpy.concatenate', 'np.concatenate', (['[y_iso_left, y_iso_right[::-1]]'], {}), '([y_iso_left, y_iso_right[::-1]])\n', (3343, 3376), True, 'import numpy as np\n'), ((2429, 2444), 'numpy.isnan', 'np.isnan', (['y_iso'], {}), '(y_iso)\n', (2437, 2444), True, 'import numpy as np\n')] |
""" Question 2: Cell by cell Parallelization """
import numpy as np
import numba
from numba import cuda
import sys
import time
import math
OUTPUT_TIMING_DATA = False
#define constants
grid_size = 4
eta = 0.0002
rho = 0.5
G = 0.75
num_threads = 8
#cuda kernal that processes one x,y coordinate of the grid's interior
@cuda.jit
def process_interior(grid, output, width, offset):
index = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x + offset
if index >= (width - 2) * (width - 2): return
row = (index // (width-2)) + 1
col = (index % (width-2)) + 1
grid[row][col][0] = grid[row-1][col][1]
grid[row][col][0] += grid[row+1][col][1]
grid[row][col][0] += grid[row][col-1][1]
grid[row][col][0] += grid[row][col+1][1]
grid[row][col][0] -= 4 * grid[row][col][1]
grid[row][col][0] *= rho
grid[row][col][0] += 2 * grid[row][col][1]
grid[row][col][0] -= (1 - eta) * grid[row][col][2]
grid[row][col][0] /= (1 + eta)
output[0] = grid[width // 2][width // 2][0]
#cuda kernal that processes one x,y coordinate of the grid's edge
@cuda.jit
def process_edges(grid, width, offset):
index = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x + offset
if index >= (width - 2) * 4: return
if index < (width - 2):
grid[0][index+1][0] = G * grid[1][index+1][0]
elif index < 2 * (width - 2):
grid[width-1][(index % (width-2))+1][0] = G * grid[width -2][(index % (width-2))+1][0]
elif index < 3 * (width - 2):
grid[(index % (width-2))+1][0][0] = G * grid[(index % (width-2))+1][1][0]
else:
grid[(index % (width-2))+1][width-1][0] = G * grid[(index % (width-2))+1][width-2][0]
#cuda kernal that processes one corner of the grid
@cuda.jit
def process_corners(grid, width, offset):
index = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x + offset
if index >= 4: return
if index == 0:
grid[0][0][0] = G * grid[1][0][0]
elif index == 1:
grid[width-1][0][0] = G * grid[width-2][0][0]
elif index == 3:
grid[0][width-1][0] = G * grid[0][width-2][0]
else:
grid[width-1][width-1][0] = G * grid[width-1][width-2][0]
#cuda kernal that copies u1 to u2 and u0 to u1 for one x,y coordinate
@cuda.jit
def propogate(grid, width, offset):
index = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x + offset
if index >= width * width: return
row = index // width
col = index % width
grid[row][col][2] = grid[row][col][1]
grid[row][col][1] = grid[row][col][0]
""" MAIN """
#decide how to allocate threads/blocks
num_blocks = 1
threads_per_block = num_threads
max_threads_per_block = 32
while threads_per_block > max_threads_per_block:
num_blocks += 1
threads_per_block = math.ceil(float(num_threads) / float(num_blocks))
#check if we're using too many blocks
if(num_blocks > 65535):
num_blocks = 1
threads_per_block = num_threads
max_threads_per_block *= 2
#make the grid
grid = np.zeros((grid_size, grid_size, 3), dtype=np.float)
grid[grid_size//2][grid_size//2][1] = np.float(1.0)
grid_d = cuda.to_device(grid)
#get the required number of iterations
num_iterations = 20
if len(sys.argv) >= 2:
num_iterations = int(sys.argv[1])
start = time.time()
#make the cuda output buffer
tmp = np.array([0.0], dtype=np.float)
output = cuda.device_array_like(tmp)
#process the grid the required number of times
for i in range(num_iterations):
for offset in range(0, (grid_size-2)*(grid_size-2), num_threads):
process_interior[num_blocks,threads_per_block](grid_d, output, grid_size, offset)
for offset in range(0, 4 * (grid_size-2), num_threads):
process_edges[num_blocks,threads_per_block](grid_d, grid_size, offset)
for offset in range(0, grid_size, num_threads):
process_corners[num_blocks,threads_per_block](grid_d, grid_size, offset)
for offset in range(0, grid_size*grid_size, num_threads):
propogate[num_blocks,threads_per_block](grid_d, grid_size, offset)
#print(grid_d.copy_to_host())
result = output.copy_to_host()
#format to match sample
u = result[0]
u_string = '{:.6f}'.format(round(u, 6))
if u >= 0:
u_string = ' ' + u_string
if i < 9:
u_string = ' ' + u_string
print(str(i+1) + ': ' + u_string)
end = time.time()
if OUTPUT_TIMING_DATA:
with open('2048.csv', 'a+') as f:
f.write(str(end-start) + ',') | [
"numpy.float",
"numpy.array",
"numpy.zeros",
"numba.cuda.to_device",
"numba.cuda.device_array_like",
"time.time"
] | [((2906, 2957), 'numpy.zeros', 'np.zeros', (['(grid_size, grid_size, 3)'], {'dtype': 'np.float'}), '((grid_size, grid_size, 3), dtype=np.float)\n', (2914, 2957), True, 'import numpy as np\n'), ((2996, 3009), 'numpy.float', 'np.float', (['(1.0)'], {}), '(1.0)\n', (3004, 3009), True, 'import numpy as np\n'), ((3019, 3039), 'numba.cuda.to_device', 'cuda.to_device', (['grid'], {}), '(grid)\n', (3033, 3039), False, 'from numba import cuda\n'), ((3170, 3181), 'time.time', 'time.time', ([], {}), '()\n', (3179, 3181), False, 'import time\n'), ((3218, 3249), 'numpy.array', 'np.array', (['[0.0]'], {'dtype': 'np.float'}), '([0.0], dtype=np.float)\n', (3226, 3249), True, 'import numpy as np\n'), ((3259, 3286), 'numba.cuda.device_array_like', 'cuda.device_array_like', (['tmp'], {}), '(tmp)\n', (3281, 3286), False, 'from numba import cuda\n'), ((4208, 4219), 'time.time', 'time.time', ([], {}), '()\n', (4217, 4219), False, 'import time\n')] |
import numpy as np
from rnn_utils import *
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Implement a single forward step of the LSTM-cell as described in Figure (4)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
c_next -- next memory state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)
Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde),
c stands for the cell state (memory)
"""
# Retrieve parameters from "parameters"
Wf = parameters["Wf"] # forget gate weight
bf = parameters["bf"]
Wi = parameters["Wi"] # update gate weight (notice the variable name)
bi = parameters["bi"] # (notice the variable name)
Wc = parameters["Wc"] # candidate value weight
bc = parameters["bc"]
Wo = parameters["Wo"] # output gate weight
bo = parameters["bo"]
Wy = parameters["Wy"] # prediction weight
by = parameters["by"]
# Retrieve dimensions from shapes of xt and Wy
n_x, m = xt.shape
n_y, n_a = Wy.shape
### START CODE HERE ###
# Concatenate a_prev and xt (≈1 line)
concat = np.concatenate((a_prev, xt), axis=0)
# Compute values for ft (forget gate), it (update gate),
# cct (candidate value), c_next (cell state),
# ot (output gate), a_next (hidden state) (≈6 lines)
ft = sigmoid(np.dot(Wf, concat)+bf) # forget gate
it = sigmoid(np.dot(Wi, concat)+bi) # update gate
cct = np.tanh(np.dot(Wc, concat)+bc) # candidate
c_next = np.multiply(it, cct) + np.multiply(ft, c_prev) # cell state
ot = sigmoid(np.dot(Wo,concat) + bo) # output gate
a_next = np.multiply(ot, np.tanh(c_next)) # hidden state
# Compute prediction of the LSTM cell (≈1 line)
yt_pred = softmax(np.dot(Wy, a_next)+by)
### END CODE HERE ###
# store values needed for backward propagation in cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache
def lstm_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network using an LSTM-cell described in Figure (4).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
c -- The value of the cell state, numpy array of shape (n_a, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of all the caches, x)
"""
# Initialize "caches", which will track the list of all the caches
caches = []
### START CODE HERE ###
Wy = parameters['Wy'] # saving parameters['Wy'] in a local variable in case students use Wy instead of parameters['Wy']
# Retrieve dimensions from shapes of x and parameters['Wy'] (≈2 lines)
n_x, m, T_x = x.shape
n_y, n_a = Wy.shape
# initialize "a", "c" and "y" with zeros (≈3 lines)
a = np.zeros([n_a,m,T_x])
c = np.zeros([n_a,m,T_x])
y = np.zeros([n_y,m,T_x])
# Initialize a_next and c_next (≈2 lines)
a_next = a0
c_next = np.zeros([n_a,m])
# loop over all time-steps
for t in range(T_x):
# Get the 2D slice 'xt' from the 3D input 'x' at time step 't'
xt = x[:,:,t]
# Update next hidden state, next memory state, compute the prediction, get the cache (≈1 line)
a_next, c_next, yt, cache = lstm_cell_forward(xt, a_next, c_next, parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the next cell state (≈1 line)
c[:,:,t] = c_next
# Save the value of the prediction in y (≈1 line)
y[:,:,t] = yt
# Append the cache into caches (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y, c, caches | [
"numpy.multiply",
"numpy.tanh",
"numpy.dot",
"numpy.zeros",
"numpy.concatenate"
] | [((2605, 2641), 'numpy.concatenate', 'np.concatenate', (['(a_prev, xt)'], {'axis': '(0)'}), '((a_prev, xt), axis=0)\n', (2619, 2641), True, 'import numpy as np\n'), ((5627, 5650), 'numpy.zeros', 'np.zeros', (['[n_a, m, T_x]'], {}), '([n_a, m, T_x])\n', (5635, 5650), True, 'import numpy as np\n'), ((5657, 5680), 'numpy.zeros', 'np.zeros', (['[n_a, m, T_x]'], {}), '([n_a, m, T_x])\n', (5665, 5680), True, 'import numpy as np\n'), ((5687, 5710), 'numpy.zeros', 'np.zeros', (['[n_y, m, T_x]'], {}), '([n_y, m, T_x])\n', (5695, 5710), True, 'import numpy as np\n'), ((5789, 5807), 'numpy.zeros', 'np.zeros', (['[n_a, m]'], {}), '([n_a, m])\n', (5797, 5807), True, 'import numpy as np\n'), ((3006, 3026), 'numpy.multiply', 'np.multiply', (['it', 'cct'], {}), '(it, cct)\n', (3017, 3026), True, 'import numpy as np\n'), ((3029, 3052), 'numpy.multiply', 'np.multiply', (['ft', 'c_prev'], {}), '(ft, c_prev)\n', (3040, 3052), True, 'import numpy as np\n'), ((3159, 3174), 'numpy.tanh', 'np.tanh', (['c_next'], {}), '(c_next)\n', (3166, 3174), True, 'import numpy as np\n'), ((2829, 2847), 'numpy.dot', 'np.dot', (['Wf', 'concat'], {}), '(Wf, concat)\n', (2835, 2847), True, 'import numpy as np\n'), ((2890, 2908), 'numpy.dot', 'np.dot', (['Wi', 'concat'], {}), '(Wi, concat)\n', (2896, 2908), True, 'import numpy as np\n'), ((2952, 2970), 'numpy.dot', 'np.dot', (['Wc', 'concat'], {}), '(Wc, concat)\n', (2958, 2970), True, 'import numpy as np\n'), ((3086, 3104), 'numpy.dot', 'np.dot', (['Wo', 'concat'], {}), '(Wo, concat)\n', (3092, 3104), True, 'import numpy as np\n'), ((3273, 3291), 'numpy.dot', 'np.dot', (['Wy', 'a_next'], {}), '(Wy, a_next)\n', (3279, 3291), True, 'import numpy as np\n')] |
# Produce a boxplot of the %age of commits during a particular Python.
# Note this doesn't run any front-ends here, it just looks at the repos
# and counts the commits corresponding to Python release dates.
# Violin plot code from here:
#https://matplotlib.org/examples/statistics/customized_violin_demo.html
import os
import matplotlib.pyplot as plt
import numpy as np
import qualitas
import python_versions
from data_dir import DATA_DIR
# Where the output boxplots will go:
BOXPLOT_PDF = 'commit-boxplots.pdf'
VIOLINPLOT_PDF = 'commit-violinplots.pdf'
DATA_FILE = os.path.join(DATA_DIR,'commit-data.csv')
######################################################################
def print_quartile_numbers(pyvers, bp_dict):
'''Print the median, first and third quartile from a given boxplot'''
nums = [[v] for v in pyvers]
for i,line in enumerate(bp_dict['medians']):
_, y = line.get_xydata()[1]
nums[i].append('%d' % y)
for i,line in enumerate(bp_dict['boxes']):
_, bot = line.get_xydata()[0] # Bottom line
_, top = line.get_xydata()[2] # Top line line
nums[i].append('%d' % bot)
nums[i].append('%d' % top)
for n in nums:
print(' '.join(n))
def show_box_plot(pyvers, percs, save_as=None):
''' Given the (lists of) percentages for each pyver, produce the boxplot '''
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
bp_dict = ax.boxplot(percs)
print_quartile_numbers(pyvers, bp_dict)
ax.set_xlabel('On or after this Python release')
ax.set_ylabel('Percentage of commits')
start, end = ax.get_xlim()
ax.set_xticklabels(pyvers)
if save_as:
plt.savefig(save_as, bbox_inches='tight')
else:
plt.show()
######################################################################
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
def set_axis_style(ax, labels):
ax.get_xaxis().set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel('Sample name')
def show_violin_plot(pyvers, percs, save_as=None):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 6), sharey=True)
parts = ax.violinplot(
percs, showmeans=False, showmedians=False,
showextrema=False)
for pc in parts['bodies']:
pc.set_facecolor('#4f90d9')
pc.set_edgecolor('black')
pc.set_alpha(1)
quartile1, medians, quartile3 = np.percentile(percs, [25, 50, 75], axis=1)
# Print quartile data to screen, just for confirmation
for v,q1,q2,q3 in zip(pyvers, quartile1, medians, quartile3):
print('\t{} Q1={:3.0f}, Q2={:3.0f}, Q3={:3.0f}'.format(v, q1, q2, q3))
whiskers = np.array([
adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(percs, quartile1, quartile3)])
whiskersMin, whiskersMax = whiskers[:, 0], whiskers[:, 1]
inds = np.arange(1, len(medians) + 1)
ax.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
ax.vlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
ax.vlines(inds, whiskersMin, whiskersMax, color='k', linestyle='-', lw=1)
# set style for the axes
set_axis_style(ax, pyvers)
ax.set_xlabel('On or after this Python release')
ax.set_ylabel('Percentage of commits')
ax.set_ylim(0,110)
ax.set_xticklabels(pyvers)
ax.yaxis.grid(True, linestyle='--', which='major', color='grey', alpha=.25)
plt.subplots_adjust(bottom=0.15, wspace=0.05)
if save_as:
plt.savefig(save_as, bbox_inches='tight')
else:
plt.show()
######################################################################
def read_date_counts(pyvers, qualapps, data_file):
''' Read the commit counts from the given file, one line per app.
Return a list of percentages, one list for each Python version.
i.e. percentages are transposed (and reversed) from order in file.
'''
percs = [[] for v in pyvers]
with open(data_file, 'r') as in_fd:
for line in in_fd:
if line.startswith('#'):
continue
data = line.split()
# first entry is app name, last is total, rest are counts-per-version
assert(data[0] in qualapps), 'Unknown app %s' % data[0]
total_commits = int(data[-1])
counts = data[-2:0:-1] # Reverse, delete first & last
assert len(counts)==len(pyvers), 'Wrong line length %d'% len(data)
for i,vercount in enumerate(counts):
percs[i].append(100*int(vercount)/total_commits)
return percs
def print_date_percs(pyvers, qualapps, percs):
''' Categorise the commits in the repos; calculate percentages,
and write this data to a file, one line per application.
'''
print('{:12s} & {}\\\\'.format('Application', ' & '.join(pyvers)))
for i, app in enumerate(qualapps):
thisper = [p[i] for p in percs]
tidyper = ['{:3.0f}'.format(p) for p in thisper]
print('{:12s} & {}\\\\'.format(app, ' & '.join(tidyper)))
def plot_date_counts(pyvers, qualapps):
'''Main driver: generate the data and plot it. I use an intermediate
data file so I don't have to keep re-producing the data as I fiddle
with the boxplot.
'''
percs = read_date_counts(pyvers, qualapps, DATA_FILE)
print_date_percs(pyvers, qualapps, percs)
#show_box_plot(pyvers, percs, BOXPLOT_PDF)
show_violin_plot(pyvers, percs, VIOLINPLOT_PDF)
if __name__ == '__main__':
full_suite = qualitas.get_dirnames()
pyvers = python_versions.get_releases('3') # Only look at 3.x releases
#pyvers = pyvers[:-1] # Delete the last one (Python 3.6)
plot_date_counts(pyvers, full_suite)
| [
"numpy.clip",
"matplotlib.pyplot.savefig",
"python_versions.get_releases",
"os.path.join",
"matplotlib.pyplot.figure",
"qualitas.get_dirnames",
"numpy.percentile",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((574, 615), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""commit-data.csv"""'], {}), "(DATA_DIR, 'commit-data.csv')\n", (586, 615), False, 'import os\n'), ((1372, 1384), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1382, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1931, 1974), 'numpy.clip', 'np.clip', (['upper_adjacent_value', 'q3', 'vals[-1]'], {}), '(upper_adjacent_value, q3, vals[-1])\n', (1938, 1974), True, 'import numpy as np\n'), ((2051, 2093), 'numpy.clip', 'np.clip', (['lower_adjacent_value', 'vals[0]', 'q1'], {}), '(lower_adjacent_value, vals[0], q1)\n', (2058, 2093), True, 'import numpy as np\n'), ((2497, 2557), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(12, 6)', 'sharey': '(True)'}), '(nrows=1, ncols=1, figsize=(12, 6), sharey=True)\n', (2509, 2557), True, 'import matplotlib.pyplot as plt\n'), ((2835, 2877), 'numpy.percentile', 'np.percentile', (['percs', '[25, 50, 75]'], {'axis': '(1)'}), '(percs, [25, 50, 75], axis=1)\n', (2848, 2877), True, 'import numpy as np\n'), ((3851, 3896), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.15)', 'wspace': '(0.05)'}), '(bottom=0.15, wspace=0.05)\n', (3870, 3896), True, 'import matplotlib.pyplot as plt\n'), ((5945, 5968), 'qualitas.get_dirnames', 'qualitas.get_dirnames', ([], {}), '()\n', (5966, 5968), False, 'import qualitas\n'), ((5982, 6015), 'python_versions.get_releases', 'python_versions.get_releases', (['"""3"""'], {}), "('3')\n", (6010, 6015), False, 'import python_versions\n'), ((1677, 1718), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_as'], {'bbox_inches': '"""tight"""'}), "(save_as, bbox_inches='tight')\n", (1688, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1745, 1747), True, 'import matplotlib.pyplot as plt\n'), ((3921, 3962), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_as'], {'bbox_inches': '"""tight"""'}), "(save_as, bbox_inches='tight')\n", (3932, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3981, 3991), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3989, 3991), True, 'import matplotlib.pyplot as plt\n')] |
import os, sys, pickle
import requests
import random
import time
import operator
import math
import progressbar
import numpy as np
import pandas as pd
import multiprocessing as mp
import difflib
import matplotlib.pyplot as plt
import inspect
from decimal import Decimal
from rdkit import Chem, DataStructs, RDConfig
from rdkit.Chem import AllChem, rdmolops
from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from scipy.spatial.distance import squareform, cdist
from scipy import stats
class Protein(object):
"""!
An object to represent a protein
"""
def __init__(self, id_, sig):
## @var id_
# PDB or UniProt ID for the given protein
self.id_ = id_
## @var alt_id
# Used when a second identifier mapping is available (such as SIFTs project)
self.alt_id = ''
## @var sig
# List of scores representing each drug interaction with the given protein
self.sig = sig
## @var pathways
# List of Pathway objects in which the given protein is involved
self.pathways = []
## @var indications
# List of Indication objects to which the protein is associated
self.indications = []
## @var name
# str: the common name of the protein (not currently used)
self.name = ''
## @var gene
# str: the gene name from which the protein is produced
self.gene = ''
class Compound(object):
"""!
An object to represent a compound/drug
"""
def __init__(self, name, id_, index, status='N/A'):
## @var name
# str: Name of the Compound (e.g., 'caffeine')
self.name = name
## @var id_
# int: CANDO id from mapping file (e.g., 1, 10, 100, ...)
self.id_ = id_
## @var index
# int: The order in which the Compound appears in the mapping file (e.g, 1, 2, 3, ...)
self.index = index
## @var status
# str: The clinical trial status of the compound from DrugBank ('approved' or 'other')
self.status = status
## @var sig
# list: Signature is essentially a column of the Matrix
self.sig = []
## @var aux_sig
# list: Potentially temporary signature for things like pathways, where "c.sig" needs to be preserved
self.aux_sig = []
## @var indications
# list: This is every indication the Compound is associated with from the
# mapping file
self.indications = []
## @var similar
# list: This is the ranked list of compounds with the most similar interaction signatures
self.similar = []
## @var similar_computed
# bool: Have the distances of all Compounds to the given Compound been computed?
self.similar_computed = False
## @var similar_sorted
# bool: Have the most similar Compounds to the given Compound been sorted?
self.similar_sorted = False
## @var cluster_id
# int: The cluster id this Compound was assigned from clustering method
self.cluster_id = []
## @var adrs
# list: List of ADRs associated with this Compound
self.adrs = []
## @var alt_ids
# dict: dict of other ids inputted with compound mapping
self.alt_ids = {}
## @var metabolites
# list: List of all metabolites from the compound
self.metabolites = []
## @var is_metabolite
# bool: bool if the drug is a metabolite itself
self.is_metabolite = False
## @var parent
# Compound: Compound object to which this compound is a metabolite
self.parent = None
## @var compounds
# List Compound: Compound objects to which this compound is associated
self.compounds = []
def add_indication(self, ind):
"""!
Add an Indication to the list of Indications associated to this Compound
@param ind object: Indication object to add
"""
self.indications.append(ind)
class Compound_pair(object):
"""!
An object to represent a compound/drug-pair
"""
def __init__(self, name, id_, index):
## @var name
# str: Name of the Compound (e.g., 'caffeine')
self.name = name
## @var id_
# int: CANDO id from mapping file (e.g., 1, 10, 100, ...)
self.id_ = id_
## @var index
# int: The order in which the Compound appears in the mapping file (e.g, 1, 2, 3, ...)
self.index = index
## @var sig
# list: Signature is essentially a column of the Matrix
self.sig = []
## @var aux_sig
# list: Potentially temporary signature for things like pathways, where "c.sig" needs to be preserved
self.aux_sig = []
## @var similar
# list: This is the ranked list of compounds with the most similar interaction signatures
self.similar = []
## @var similar_computed
# bool: Have the distances of all Compounds to the given Compound been computed?
self.similar_computed = False
## @var similar_sorted
# bool: Have the most similar Compounds to the given Compound been sorted?
self.similar_sorted = False
## @var adrs
# list: List of ADRs associated with this Compound
self.adrs = []
def add_adr(self, adr):
"""!
Add an ADR to the list of Indications associated to this Compound
@param ind object: Indication object to add
"""
self.adrs.append(adr)
class Indication(object):
"""!
An object to represent an indication (disease)
"""
def __init__(self, ind_id, name):
## @var id_
# str: MeSH or OMIM ID for the indication from the mapping file
self.id_ = ind_id
## @var name
# str: Name for the indication from the mapping file
self.name = name
## @var compounds
# list: Every associated compound object from the mapping file
self.compounds = []
## @var pathways
# list: Every pathway associated to the indication from the mapping file
self.pathways = []
## @var proteins
# list: Every protein associated to the indication form the mapping file
self.proteins = []
## @var pathogen
# bool: Whether or not this indication is caused by a pathogen
self.pathogen = None
class Pathway(object):
"""!
An object to represent a pathway
"""
def __init__(self, id_):
## @var proteins
# list: Protein objects associated with the given Pathway
self.proteins = []
## @var id_
# str: Identification for the given Pathway
self.id_ = id_
## @var indications
# list: Indication objects associated with the given Pathway
self.indications = []
class ADR(object):
"""!
An object to represent an adverse reaction
"""
def __init__(self, id_, name):
## @var id_
# str: Identification for the given ADR
self.id_ = id_
## @var name
# str: Name of the given ADR
self.name = name
## @var compounds
# list: Compound objects associated with the given ADR
self.compounds = []
## @var compounds
# List: Compound object pairs (tuples) associated with the given ADR
self.compound_pairs = []
class CANDO(object):
"""!
An object to represent all aspects of CANDO (compounds, indications, matrix, etc.)
To instantiate you need the compound mapping (c_map), an
indication mapping file (i_map), and typically and a compound-protein matrix (matrix=) or
or precomputed compound-compound distance matrix (read_rmsds=), but those are optional.
"""
def __init__(self, c_map, i_map, matrix='', compound_set='all', compute_distance=False, save_dists='',
read_dists='', pathways='', pathway_quantifier='max', indication_pathways='', indication_proteins='',
similarity=False, dist_metric='rmsd', protein_set='', rm_zeros=False, rm_compounds='',
ddi_compounds='', ddi_adrs='', adr_map='', protein_distance=False, protein_map='', ncpus=1):
## @var c_map
# str: File path to the compound mapping file (relative or absolute)
self.c_map = c_map
## @var i_map
# str: File path to the indication mapping file (relative or absolute)
self.i_map = i_map
## @var matrix
# str: File path to the cando matrix file (relative or absolute)
self.matrix = matrix
## @var compound_set
# str or List str: what compounds to use, such as all, approved, experimental, etc
self.compound_set = compound_set
## @var protein_set
# str: File path to protein subset file (relative or absolute)
self.protein_set = protein_set
## @var pathways
# str: File path to pathway file
self.pathways = []
self.accuracies = {}
## @var compute_distance
# bool: Calculate the distance for each Compound against all other Compounds using chosen distance metric
self.compute_distance = compute_distance
## @var protein_distance
# bool: Calculate the distance for each Protein against all other Proteins using chosen distance metric
self.protein_distance = protein_distance
self.clusters = {}
## @var rm_zeros
# bool: Remove Compounds with all-zero signatures from CANDO object
self.rm_zeros = rm_zeros
## @var rm_compounds
# list: Compounds to remove from the CANDO object
self.rm_compounds = rm_compounds
self.rm_cmpds = []
## @var save_dists
# bool: Write the calculated distances to file after computation (set compute_distances=True)
self.save_dists = save_dists
## @var read_dists
# str: File path to pre-computed distance matrix
self.read_dists = read_dists
## @var similarity
# bool: Use similarity instead of distance
self.similarity = similarity
## @var dist_metric
# str: Distance metric to be used for computing Compound-Compound distances
self.dist_metric = dist_metric
## @var ncpus
# int: Number of CPUs used for parallelization
self.ncpus = int(ncpus)
## @var pathway_quantifier
# str: Method used to quantify a all Pathways
self.pathway_quantifier = pathway_quantifier
## @var indication_pathways
# str: File path to Indication-Pathway association file
self.indication_pathways = indication_pathways
## @var indication_proteins
# str: File path to Indication-Protein association file
self.indication_proteins = indication_proteins
## @var adr_map
# str: File path to ADR mapping file
self.adr_map = adr_map
## @var protein_map
# str: File path to Protein metadata mapping file
self.protein_map = protein_map
## @var ddi_compounds
# str: File path to Drug--drug mapping file
self.ddi_compounds = ddi_compounds
## @var ddi_compounds
# str: File path to Drug--Drug--ADE mapping file
self.ddi_adrs = ddi_adrs
## @var proteins
# List: Protein objects in the platform
self.proteins = []
self.protein_id_to_index = {}
## @var compounds
# List: Compound objects in the platform
self.compounds = []
self.compound_ids = []
## @var compound_pairs
# List: Compound_pair objects in the platform
self.compound_pairs = []
self.compound_pair_ids = []
## @var indications
# List: Indication objects in the platform
self.indications = []
self.indication_ids = []
## @var adrs
# List: ADR objects in the platform
self.adrs = []
self.adr_ids = []
self.short_matrix_path = self.matrix.split('/')[-1]
self.short_read_dists = read_dists.split('/')[-1]
self.short_protein_set = protein_set.split('/')[-1]
self.cmpd_set = rm_compounds.split('/')[-1]
self.data_name = ''
if self.matrix:
if self.protein_set:
self.data_name = self.short_protein_set + '.' + self.short_matrix_path
elif rm_compounds:
self.data_name = self.cmpd_set + '.' + self.short_matrix_path
if self.short_read_dists:
self.data_name = self.short_read_dists
ignored_set = []
# create all of the compound objects from the compound map
with open(c_map, 'r') as c_f:
lines = c_f.readlines()
header = lines[0]
h2i = {}
for i, h in enumerate(header.strip().split('\t')):
h2i[h] = i
for l in lines[1:]:
ls = l.strip().split('\t')
name = ls[h2i['GENERIC_NAME']]
id_ = int(ls[h2i['CANDO_ID']])
db_id = ls[h2i['DRUGBANK_ID']]
index = id_
cm = Compound(name, id_, index)
include_cmpd = False
if self.compound_set == 'all':
include_cmpd = True
tags = None
elif isinstance(self.compound_set, str):
tags = [self.compound_set]
elif isinstance(self.compound_set, list):
tags = self.compound_set
else:
tags = None
print('compound_set flag has wrong input type, please input a string compound category ("all", '
'"approved", etc) or a list of categories (["approved", "experimental"])')
quit()
if 'DRUG_GROUPS' in h2i:
stati = ls[h2i['DRUG_GROUPS']].split(';')
if tags is not None:
if len(list(set(tags) & set(stati))) > 0:
include_cmpd = True
else:
ignored_set.append(id_)
continue
if 'approved' in stati:
cm.status = 'approved'
elif 'metabolite' in stati:
cm.status = 'other'
cm.is_metabolite = True
else:
cm.status = 'other'
else:
if self.compound_set != 'all':
print('This mapping does not have drug groups/approval status - '
'please re-run with compound_set="all".')
sys.exit()
cm.status = 'N/A'
if include_cmpd:
self.compounds.append(cm)
self.compound_ids.append(id_)
if self.compound_set and len(self.compounds) == 0:
print('No compounds passed filtering, please check input parameters.')
quit()
# create the indication objects and add indications to the
# already created compound objects from previous loop
# NOTE: if a compound is in the indication mapping file that
# isn't in the compound mapping file, an error will occur. I
# had to remove those compounds from the indication mapping in
# order for it to work
with open(i_map, 'r') as i_f:
lines = i_f.readlines()
header = lines[0]
h2i = {}
for i, h in enumerate(header.strip().split('\t')):
h2i[h] = i
for l in lines[1:]:
ls = l.strip().split('\t')
c_id = int(ls[h2i['CANDO_ID']])
if c_id in ignored_set:
continue
i_name = ls[h2i['INDICATION_NAME']]
ind_id = ls[h2i['MESH_ID']]
cm = self.get_compound(c_id, quiet=True)
if cm:
if ind_id in self.indication_ids:
ind = self.get_indication(ind_id)
ind.compounds.append(cm)
else:
ind = Indication(ind_id, i_name)
ind.compounds.append(cm)
self.indications.append(ind)
self.indication_ids.append(ind.id_)
cm.add_indication(ind)
# add proteins, add signatures and such to compounds
if self.protein_set:
uniprots = []
with open(self.protein_set, 'r') as psf:
lines = psf.readlines()
for line in lines:
uni = line.strip()
uniprots.append(uni)
if matrix:
if matrix[-4:] == '.fpt':
print('The matrix file {} is in the old fpt format -- please '
'convert to tsv with the following line of code:'.format(matrix))
print('>> Matrix({}, convert_to_tsv=True)'.format(matrix))
quit()
print('Reading signatures from matrix...')
with open(matrix, 'r') as m_f:
m_lines = m_f.readlines()
if self.protein_set:
print('Editing signatures according to proteins in {}...'.format(self.protein_set))
targets, pdct_rev = self.uniprot_set_index(self.protein_set)
new_i = 0
matches = [0, 0]
for l_i in range(len(m_lines)):
vec = m_lines[l_i].strip().split('\t')
name = vec[0]
if name in targets:
scores = list(map(float, vec[1:]))
if len(scores) != len(self.compounds):
print('The number of compounds in {} does not match the '
'number of values in {} -- quitting.'.format(self.c_map, self.matrix))
quit()
p = Protein(name, scores)
try:
alt = pdct_rev[name]
p.alt_id = alt
matches[0] += 1
except KeyError:
matches[1] += 1
self.proteins.append(p)
self.protein_id_to_index[name] = new_i
for i in range(len(scores)):
s = scores[i]
self.compounds[i].sig.append(s)
new_i += 1
else:
continue
print('\tDirect UniProt matches:\t{}\n\tDirect PDB matches: \t{}'
'\n\tNew signature length: \t{}'.format(matches[1], matches[0], sum(matches)))
if not sum(matches):
print('Sorry, the input proteins did not match any proteins in the input matrix -- quitting.')
quit()
else:
for l_i in range(len(m_lines)):
vec = m_lines[l_i].strip().split('\t')
name = vec[0]
scores = list(map(float, vec[1:]))
if len(scores) != len(self.compounds):
print('The number of compounds in {} does not match the '
'number of values in {} -- quitting.'.format(self.c_map, self.matrix))
quit()
p = Protein(name, scores)
self.proteins.append(p)
self.protein_id_to_index[name] = l_i
for i in range(len(scores)):
s = scores[i]
self.compounds[i].sig.append(s)
print('Done reading signatures.\n')
if pathways:
print('Reading pathways...')
if self.indication_pathways:
print('Reading indication-pathway associations...')
path_ind = {}
with open(indication_pathways, 'r') as ipf:
for l in ipf:
ls = l.strip().split('\t')
pw = ls[0]
ind_ids = ls[1:]
path_ind[pw] = ind_ids
with open(pathways, 'r') as pf:
for l in pf:
ls = l.strip().split('\t')
pw = ls[0]
ps = ls[1:]
if not ps:
continue
PW = Pathway(pw)
self.pathways.append(PW)
for p in ps:
try:
pi = self.protein_id_to_index[p]
pro = self.proteins[pi]
pro.pathways.append(PW)
PW.proteins.append(pro)
except KeyError:
pass
if self.indication_pathways:
try:
ind_ids = path_ind[pw]
for ind_id in ind_ids:
try:
ind = self.get_indication(ind_id)
except LookupError:
continue
PW.indications.append(ind)
ind.pathways.append(PW)
except KeyError:
continue
if not indication_pathways:
self.quantify_pathways()
print('Done reading pathways.')
if self.ddi_compounds:
print("Reading compound-compound associations...")
ddi = pd.read_csv(ddi_compounds, sep='\t')
for x in ddi.index:
c1 = self.get_compound(int(ddi.loc[x,'CANDO_ID-1']))
c2 = self.get_compound(int(ddi.loc[x,'CANDO_ID-2']))
if c2 not in c1.compounds:
c1.compounds.append(c2)
if c1 not in c2.compounds:
c2.compounds.append(c1)
print('Done reading compound-compound associations.\n')
if self.ddi_adrs:
print("Reading compound pair-adverse events associations...")
ddi = pd.read_csv(ddi_adrs,sep='\t')
# Create a unique set of tuples using CANDO IDs for compound pairs
idss = list(zip(ddi.loc[:,'CANDO_ID-1'].values.tolist(),ddi.loc[:,'CANDO_ID-2'].values.tolist()))
print(" {} compound pair-adverse event associations.".format(len(idss)))
idss = list(set(idss))
# Iterate through list of CANDO ID tuples
for ids in idss:
if ids in self.compound_pair_ids:
cm_p = self.get_compound_pair(ids)
elif (ids[1],ids[0]) in self.compound_pair_ids:
cm_p = self.get_compound_pair((ids[1],ids[0]))
else:
names = (self.get_compound(ids[0]).name,self.get_compound(ids[1]).name)
cm_p = Compound_pair(names, ids, ids)
self.compound_pairs.append(cm_p)
self.compound_pair_ids.append(ids)
# Pull list of ADRs for this compound pair
adrs = ddi.loc[(ddi['CANDO_ID-1']==ids[0]) & (ddi['CANDO_ID-2']==ids[1])]
# Iterate through ADRs for this compound pair
for x in adrs.index:
#ADRs
adr_name = ddi.loc[x,'CONDITION_MESH_NAME']
adr_id = ddi.loc[x,'CONDITION_MESH_ID']
if adr_id in self.adr_ids:
adr = self.get_adr(adr_id)
else:
adr = ADR(adr_id,adr_name)
self.adrs.append(adr)
self.adr_ids.append(adr.id_)
# Add comppund pair to ADR and vice versa
cm_p.add_adr(adr)
adr.compound_pairs.append(cm_p)
print(" {} compound pairs.".format(len(self.compound_pairs)))
print(" {} adverse events.".format(len(self.adrs)))
print('Done reading compound pair-adverse event associations.\n')
'''
for x in ddi.itertuples():
#ADRs
#adr_name = ddi.loc[x,'EVENT_NAME']
adr_name = x[6]
#adr_id = ddi.loc[x,'EVENT_UMLS_ID']
adr_id = x[5]
if adr_id in self.adr_ids:
adr = self.get_adr(adr_id)
else:
adr = ADR(adr_id,adr_name)
self.adrs.append(adr)
self.adr_ids.append(adr.id_)
# Compound pair
ids = (int(x[1]),int(x[3]))
#ids = (int(ddi.loc[x,'CANDO_ID-1']),int(ddi.loc[x,'CANDO_ID-2']))
if ids in self.compound_pair_ids:
cm_p = self.get_compound_pair(ids)
elif (ids[1],ids[0]) in self.compound_pair_ids:
cm_p = self.get_compound_pair((ids[1],ids[0]))
else:
#names = (x[1],x[3])
names = (self.get_compound(ids[0]).name,self.get_compound(ids[1]).name)
cm_p = Compound_pair(names, ids, ids)
self.compound_pairs.append(cm_p)
self.compound_pair_ids.append(ids)
# Add comppund pair to ADR and vice versa
cm_p.add_adr(adr)
adr.compound_pairs.append(cm_p)
print('Done reading compound-compound adverse event associations.\n')
'''
'''
print("Generating compound pairs...")
for i in range(len(self.compounds)):
c1 = self.compounds[i]
for j in range(i,len(self.compounds)):
if i == j:
continue
c2 = self.compounds[j]
names = (c1.name,c2.name)
ids = (c1.id_,c2.id_)
idxs = (c1.id_,c2.id_)
cm_p = Compound_pair(names,ids,idxs)
self.compound_pairs.append(cm_p)
self.compound_pair_ids.append(ids)
print("Done generating compound pairs.\n")
'''
print("Generating compound-compound signatures...")
for cm_p in self.compound_pairs:
c1 = self.get_compound(cm_p.id_[0])
c2 = self.get_compound(cm_p.id_[1])
# Add signatures??
cm_p.sig = [i+j for i,j in zip(c1.sig,c2.sig)]
# max, min, mult?
print("Done generating compound-compound signatures.\n")
if self.indication_proteins:
print('Reading indication-gene associations...')
with open(indication_proteins, 'r') as igf:
for l in igf:
ls = l.strip().split('\t')
ind_id = ls[0]
genes = ls[1].split(";")
for p in genes:
try:
pi = self.protein_id_to_index[p]
pro = self.proteins[pi]
ind = self.get_indication(ind_id)
ind.proteins.append(pro)
pro.indications.append(ind)
except KeyError:
pass
except LookupError:
pass
print('Done reading indication-gene associations.')
if read_dists:
print('Reading {} distances...'.format(self.dist_metric))
with open(read_dists, 'r') as rrs:
lines = rrs.readlines()
for i in range(len(lines)):
c1 = self.compounds[i]
scores = lines[i].strip().split('\t')
if len(scores) != len(self.compounds):
print('The number of compounds in {} does not match the '
'number of values in {} -- quitting.'.format(self.c_map, self.matrix))
quit()
for j in range(len(scores)):
if i == j:
continue
else:
s = float(scores[j])
if similarity:
s = 1 - s
c1.similar.append((self.compounds[j], s))
for c in self.compounds:
sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
c.similar = sorted_scores
c.similar_computed = True
c.similar_sorted = True
print('Done reading {} distances.\n'.format(self.dist_metric))
# if compute distance is true, generate similar compounds for each
if compute_distance and not read_dists:
if self.pathways and not self.indication_pathways and not ddi_adrs:
print('Computing distances using global pathway signatures...')
for c in self.compounds:
self.generate_similar_sigs(c, aux=True)
# Still cleaning this code up.
# Memory issues with full Twosides is a huge limitation
elif ddi_adrs:
print('Computing {} distances for compound pairs...'.format(self.dist_metric))
# put all compound_pair signatures into 2D-array
snp = [self.compound_pairs[i].sig for i in range(0, len(self.compound_pairs))]
snp = np.array(snp) # convert to numpy form
# call pairwise_distances, speed up with custom RMSD function and parallelism
if self.dist_metric == "rmsd":
distance_matrix = pairwise_distances(snp, metric=lambda u, v: np.sqrt(((u - v) ** 2).mean()), n_jobs=self.ncpus)
distance_matrix = squareform(distance_matrix)
#elif self.dist_metric in ['cosine']:
# distance_matrix = cosine_dist(snp)
# distance_matrix = squareform(distance_matrix, checks=False)
elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:
for i in range(len(self.compound_pairs)):
print("{} of {}".format(i+1,len(self.compound_pairs)))
dists = cdist([snp[i]], snp, dist_metric)[0]
self.compound_pairs[i].similar = dict(zip(self.compound_pairs, dists))
self.compound_pairs[i].similar.pop(i)
self.compound_pairs[i].similar_computed = True
'''
distance_matrix = pairwise_distances_chunked(snp, metric=self.dist_metric,
force_all_finite=False,
n_jobs=self.ncpus)
print("pairwise is done.")
'''
#distance_matrix = np.concatenate(list(distance_matrix), axis=0)
#print("concat is done.")
#distance_matrix = pairwise_distances(snp, metric=self.dist_metric,
# force_all_finite=False,
# n_jobs=self.ncpus)
# Removed checks in case the diagonal is very small (close to zero) but not zero.
#distance_matrix = squareform(distance_matrix, checks=False)
#print("squareform is done.")
#i = 0
#cp_ids = [i.id_ for i in self.compound_pairs]
#for cp in self.compound_pairs:
#for i in range(len(self.compound_pairs)):
'''
for x in distance_matrix:
for y in x:
cp = self.compound_pairs[i]
print("{} of {}".format(i+1,len(self.compound_pairs)))
cp.similar = dict(zip(self.compound_pairs, y))
# Remove self similar
del cp.similar[cp]
# Completed simialr calc
cp.similar_computed = True
'''
#print(distance_matrix[i])
#dists = cdist([snp[i]], snp, dist_metric)[0]
# Let us try dicts instead of list of tuples
#self.compound_pairs[i].similar = dict(zip(self.compound_pairs, dists))
#del self.compound_pairs[i].similar[self.compound_pairs[i]]
#self.compound_pairs[i].similar = list(zip(self.compound_pairs, dists))
#self.compound_pairs[i].similar = list(zip(self.compound_pairs, distance_matrix[i]))
#self.compound_pairs[i].similar.pop(i)
#distance_matrix = np.delete(distance_matrix, 0, 0)
#cp.similar = dict(zip(cp_ids, distance_matrix[i]))
# Sort similar
#cp.similar = {k: v for k,v in sorted(cp.similar.items(), key=operator.itemgetter(1))}
#cp.similar_sorted = True
#i+=1
#del distance_matrix
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
exit()
'''
# step through the condensed matrix - add RMSDs to Compound.similar lists
nc = len(self.compound_pairs)
print(nc)
n = 0
for i in range(nc):
for j in range(i, nc):
c1 = self.compound_pairs[i]
c2 = self.compound_pairs[j]
if i == j:
continue
print("got both pairs")
r = distance_matrix[n]
print(r)
c1.similar.append((c2, r))
c2.similar.append((c1, r))
n += 1
'''
print('Done computing {} distances.\n'.format(self.dist_metric))
# sort the dists after saving (if desired)
print('Sorting {} distances...'.format(self.dist_metric))
i = 1
for cp in self.compound_pairs:
print("{} of {}".format(i,len(self.compound_pairs)))
cp.similar = {k: v for k,v in sorted(cp.similar.items(), key=operator.itemgetter(1))}
#cp.similar = {k: v for k, v in sorted(cp.similar.items(), key=lambda item: item[1])}
cp.similar_sorted = True
i+=1
print('Done sorting {} distances.\n'.format(self.dist_metric))
'''
for c in self.compound_pairs:
sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
c.similar = sorted_scores
c.similar_computed = True
c.similar_sorted = True
'''
else:
print('Computing {} distances...'.format(self.dist_metric))
# put all compound signatures into 2D-array
signatures = []
for i in range(0, len(self.compounds)):
signatures.append(self.compounds[i].sig)
snp = np.array(signatures) # convert to numpy form
# call pairwise_distances, speed up with custom RMSD function and parallelism
if self.dist_metric == "rmsd":
distance_matrix = pairwise_distances(snp, metric=lambda u, v: np.sqrt(np.mean((u - v)**2)), n_jobs=self.ncpus)
distance_matrix = squareform(distance_matrix)
elif self.dist_metric in ['correlation', 'euclidean', 'cityblock', 'cosine']:
distance_matrix = pairwise_distances(snp, metric=self.dist_metric, force_all_finite=False, n_jobs=self.ncpus)
#distance_matrix = pairwise_distances(snp, metric=self.dist_metric, force_all_finite=False, n_jobs=self.ncpus)
# Removed checks in case the diagonal is very small (close to zero) but not zero.
distance_matrix = squareform(distance_matrix, checks=False)
#elif self.dist_metric in ['cosine']:
# distance_matrix = cosine_dist(snp)
# distance_matrix = squareform(distance_matrix, checks=False)
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
exit()
# step through the condensed matrix - add RMSDs to Compound.similar lists
nc = len(self.compounds)
n = 0
for i in range(nc):
for j in range(i, nc):
c1 = self.compounds[i]
c2 = self.compounds[j]
if i == j:
continue
r = distance_matrix[n]
c1.similar.append((c2, r))
c2.similar.append((c1, r))
n += 1
print('Done computing {} distances.\n'.format(self.dist_metric))
if self.save_dists:
def dists_to_str(cmpd, ci):
o = []
for si in range(len(cmpd.similar)):
if ci == si:
if self.similarity:
o.append('1.0')
else:
o.append('0.0')
s = cmpd.similar[si]
o.append(str(s[1]))
if len(o) < len(self.compounds):
o.append('0.0')
o = "\t".join(o)
o = o + '\n'
return o
print('Saving {} distances...'.format(self.dist_metric))
'''
if adr_ddi:
with open(self.save_dists, 'w') as srf:
for ci in range(len(self.compound_pairs)):
c = self.compound_pairs[ci]
srf.write(dists_to_str(c, ci))
else:
with open(self.save_dists, 'w') as srf:
for ci in range(len(self.compounds)):
c = self.compounds[ci]
srf.write(dists_to_str(c, ci))
'''
with open(self.save_dists, 'w') as srf:
for ci in range(len(self.compounds)):
c = self.compounds[ci]
srf.write(dists_to_str(c, ci))
print('Done saving {} distances.\n'.format(self.dist_metric))
if rm_compounds:
print('Removing undesired compounds in {}...'.format(rm_compounds))
with open(rm_compounds, 'r') as rcf:
self.rm_cmpds = [int(line.strip().split('\t')[0]) for line in rcf]
self.compounds = [c for c in self.compounds if c.id_ not in self.rm_cmpds]
for c in self.compounds:
c.similar = [s for s in c.similar if s[0].id_ not in self.rm_cmpds]
c.compounds = [s for s in c.compounds if s.id_ not in self.rm_cmpds]
if self.matrix:
for p in self.proteins:
p.sig = [y for x, y in enumerate(p.sig) if x not in self.rm_cmpds]
print('Done removing undesired compounds.\n')
# sort the RMSDs after saving (if desired)
for c in self.compounds:
sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
c.similar = sorted_scores
c.similar_computed = True
c.similar_sorted = True
if self.rm_zeros:
print('Removing compounds with all-zero signatures...')
def check_sig(sig):
for s in sig:
if s != 0.0:
return True
return False
non_zero_compounds = []
for c in self.compounds:
if check_sig(c.sig):
non_zero_compounds.append(c)
self.compounds = non_zero_compounds
print('Done removing compounds with all-zero signatures.\n')
if self.rm_zeros or self.rm_compounds:
print('Filtering indication mapping...')
for ind in self.indications:
ind.compounds = [cmpd for cmpd in ind.compounds if cmpd.id_ not in self.rm_cmpds]
print('Done filtering indication mapping.\n')
if compute_distance and not read_dists and (rm_compounds or rm_zeros):
if self.pathways and not self.indication_pathways:
print('Recomputing distances using global pathway signatures...')
for c in self.compounds:
self.generate_similar_sigs(c, aux=True)
else:
print('Recomputing {} distances...'.format(self.dist_metric))
# put all compound signatures into 2D-array
signatures = []
for i in range(0, len(self.compounds)):
signatures.append(self.compounds[i].sig)
snp = np.array(signatures) # convert to numpy form
# call pairwise_distances, speed up with custom RMSD function and parallelism
if self.dist_metric == "rmsd":
distance_matrix = pairwise_distances(snp, metric=lambda u, v: np.sqrt(np.mean((u - v)**2)),
n_jobs=self.ncpus)
distance_matrix = squareform(distance_matrix)
elif self.dist_metric in ['correlation', 'euclidean', 'cityblock', 'cosine']:
distance_matrix = pairwise_distances_chunked(snp, metric=self.dist_metric,
force_all_finite=False,
n_jobs=self.ncpus)
distance_matrix = np.concatenate(list(distance_matrix), axis=0)
#distance_matrix = pairwise_distances(snp, metric=self.dist_metric, force_all_finite=False,
# n_jobs=self.ncpus)
# Removed checks in case the diagonal is very small (close to zero) but not zero.
distance_matrix = squareform(distance_matrix, checks=False)
elif self.dist_metric in ['cosine']:
distance_matrix = cosine_dist(snp)
distance_matrix = squareform(distance_matrix, checks=False)
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
exit()
# step through the condensed matrix - add RMSDs to Compound.similar lists
nc = len(self.compounds)
n = 0
for i in range(nc):
for j in range(i, nc):
c1 = self.compounds[i]
c2 = self.compounds[j]
if i == j:
continue
r = distance_matrix[n]
c1.similar.append((c2, r))
c2.similar.append((c1, r))
n += 1
print('Done recomputing {} distances.\n'.format(self.dist_metric))
if adr_map:
print('Reading ADR mapping file...')
with open(adr_map, 'r') as amf:
lines = amf.readlines()
header = lines[0]
h2i = {}
for i, h in enumerate(header.strip().split('\t')):
h2i[h] = i
prev_id = -1
lcount = 0
for l in lines[1:]:
ls = l.strip().split('\t')
adr_name = ls[h2i['CONDITION_MESH_NAME']]
adr_id = ls[h2i['CONDITION_MESH_ID']]
c_id = int(ls[h2i['CANDO_ID']])
#adr_name = ls[h2i['condition_concept_name']]
#c_id = int(ls[h2i['drug_cando_id']])
#adr_id = ls[h2i['condition_meddra_id']]
if c_id == -1:
continue
if prev_id == c_id:
pass
else:
cmpd = self.get_compound(c_id, quiet=True)
if cmpd is not None:
prev_id = c_id
else:
# cmpd is not in CANDO - prevents from crashing
continue
try:
adr = self.get_adr(adr_id)
adr.compounds.append(cmpd)
cmpd.adrs.append(adr)
except LookupError:
adr = ADR(adr_id, adr_name)
adr.compounds.append(cmpd)
cmpd.adrs.append(adr)
self.adrs.append(adr)
print('Read {} ADRs.'.format(len(self.adrs)))
if protein_map:
print('Reading Protein mapping file...')
prot_df = pd.read_csv(protein_map,sep='\t',index_col=0)
for i in prot_df.index:
p = self.get_protein(i)
p.name = prot_df['uniprotRecommendedName'][i]
p.gene = prot_df['geneName'][i]
def search_compound(self, name, n=5):
"""!
Print closest Compound names/IDs for input search str
@param name str: Compound name
@param n int: Number of outputted compounds
@return Returns None
"""
id_d = {}
def return_names(x):
id_d[x.name] = x.id_
return x.name
name = name.strip().lower().replace(' ', '_')
cando_drugs = list(map(return_names, self.compounds))
nms = difflib.get_close_matches(name, cando_drugs, n=n, cutoff=0.5)
print('id\tname')
for nm in nms:
print("{}\t{}".format(id_d[nm], nm))
def get_compound(self, cmpd_id, quiet=False):
"""!
Get Compound object from Compound id or fuzzy match to Compound name
@param cmpd_id int or str: Compound id or Compound name
@return Returns object: Compound object or None if no exact match is found
"""
if type(cmpd_id) is int:
for c in self.compounds:
if c.id_ == cmpd_id:
return c
if not quiet:
print("{0} not in {1}".format(cmpd_id, self.c_map))
return None
elif type(cmpd_id) is str:
id_d = {}
def return_names(x):
id_d[x.name] = x.id_
return x.name
cando_drugs = list(map(return_names, self.compounds))
name = cmpd_id.strip().lower().replace(' ', '_')
if name not in cando_drugs:
print('"{}" is not in our mapping, here are the 5 closest results:'.format(name))
self.search_compound(name, n=5)
return None
else:
return self.get_compound(id_d[name])
def get_compound_pair(self, ids):
"""!
Get Compound_pair object from Compound_pair id
@param id_ int: Compound_pair id
@return Returns object: Compound_pair object
"""
for c in self.compound_pairs:
if c.id_ == ids:
return c
elif c.id_ == (ids[1],ids[0]):
return c
print("{0} not in {1}".format(ids, self.ddi_adrs))
return None
def get_protein(self, protein_id):
"""!
Get Protein object from Protein id
@param protein_id str: Protein name
@return Returns object: Protein object
"""
if len(self.proteins) == 0 or not self.matrix:
print('No matrix/proteins loaded -- quitting.')
quit()
for p in self.proteins:
if p.id_ == protein_id:
return p
def get_indication(self, ind_id):
"""!
Get Indication object from Indication id
@param ind_id str: Indication id
@return Returns object: Indication object
"""
for i in self.indications:
if i.id_ == ind_id:
return i
print('{} not in {}'.format(ind_id, self.i_map))
raise LookupError
def get_pathway(self, id_):
"""!
Get Pathway object from Pathway id
@param id_ str: Pathway id
@return Returns object: Pathway object
"""
for p in self.pathways:
if p.id_ == id_:
return p
raise LookupError
def get_adr(self, id_):
"""!
Get ADR (adverse drug reaction) from ADR id
@param id_ str: ADR id
@return Returns object: ADR object
"""
for a in self.adrs:
if a.id_ == id_:
return a
raise LookupError
def search_indication(self, name, n=5):
"""!
Print closest MeSH IDs for Indication name
@param name str: Indication name
@param n int: Number of outputted indications
@return Returns None
"""
id_d = {}
def return_names(x):
id_d[x.name] = x.id_
return x.name
name = name.strip()
cando_inds = list(map(return_names, self.indications))
exact_matches = []
for ci in cando_inds:
if name in ci:
exact_matches.append(ci)
if exact_matches:
print('Matches exactly containing {}:'.format(name))
print('id \tname')
for em in exact_matches:
print("{}\t{}".format(id_d[em], em))
print()
nms = difflib.get_close_matches(name, cando_inds, n=n, cutoff=0.3)
print('Matches using string distance:')
print('id \tname')
for nm in nms:
print("{}\t{}".format(id_d[nm], nm))
def top_targets(self, cmpd, n=10, negative=False, save_file=''):
"""!
Get the top scoring protein targets for a given compound
@param cmpd Compound or int: Compound object or int id_ for which to print targets
@param n int: number of top targets to print/return
@param negative int: if the interaction scores are negative (stronger) energies
@param save_file str: output file for results
@return Returns list: list of tuples (protein id_, score)
"""
# print the list of the top targets
if type(cmpd) is Compound:
pass
elif type(cmpd) is int:
cmpd = self.get_compound(cmpd)
else:
print('Please enter a Compound object or integer id_ for a compound -- quitting.')
quit()
all_interactions = []
sig = cmpd.sig
for i in range(len(sig)):
s = sig[i]
p = self.proteins[i]
all_interactions.append((p, s))
if negative:
interactions_sorted = sorted(all_interactions, key=lambda x: x[1])
else:
interactions_sorted = sorted(all_interactions, key=lambda x: x[1])[::-1]
if save_file:
o = open(save_file,'w')
o.write('rank\tscore\tindex\tid\tgene\tname\n')
print('Compound is {}'.format(cmpd.name))
print('rank\tscore\tindex\tid\tgene\tname')
for si in range(n):
pr = interactions_sorted[si][0]
print('{}\t{}\t{}\t{}\t{}\t{}'.format(si+1, round(interactions_sorted[si][1], 3),
self.proteins.index(pr), pr.id_, pr.gene, pr.name))
if save_file:
o.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(si+1, round(interactions_sorted[si][1], 3),
self.proteins.index(pr), pr.id_, pr.gene, pr.name))
print()
if save_file:
o.close()
return interactions_sorted[0:n]
def common_targets(self, cmpds_file, n=10, negative=False, save_file=''):
"""!
Get the consensus top scoring protein targets for a set of compounds
@param cmpds_file str: File containing a list of Compound IDs for which to search common targets
@param n int: number of top targets to print/return
@param negative int: if the interaction scores are negative (stronger) energies
@param save_file str: save results to file name
@return Returns list: list of tuples (protein id_, score)
"""
cs_df = pd.read_csv(cmpds_file,sep='\t',header=None)
sum_sig = [0]*len(self.get_compound(0).sig)
for ci in cs_df.itertuples(index=False):
try:
s = self.get_compound(int(ci[0])).sig
except:
print("{} does not exist in the current drug library.\n".format(ci[0]))
continue
sum_sig = [i+j for i,j in zip(sum_sig,s)]
# print the list of the top targets
all_interactions = []
for i in range(len(sum_sig)):
s = sum_sig[i]
p = self.proteins[i]
all_interactions.append((p, s))
if negative:
interactions_sorted = sorted(all_interactions, key=lambda x: x[1])
else:
interactions_sorted = sorted(all_interactions, key=lambda x: x[1])[::-1]
if save_file:
o = open(save_file,'w')
o.write('rank\tscore\tindex\tid\tgene\tname\n')
print('rank\tscore\tindex\tid\tgene\tname')
for si in range(n):
pr = interactions_sorted[si][0]
print('{}\t{}\t{}\t{}\t{}\t{}'.format(si+1, round(interactions_sorted[si][1], 3),
self.proteins.index(pr), pr.id_, pr.gene, pr.name))
if save_file:
o.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(si+1, round(interactions_sorted[si][1], 3),
self.proteins.index(pr), pr.id_, pr.gene, pr.name))
print()
if save_file:
o.close()
return interactions_sorted[0:n]
def virtual_screen(self, protein, n=10, negative=False, compound_set='all', save_file=''):
"""!
Get the top scoring compounds for a given protein
@param protein Protein int or str: Protein (object, int index, or str id_) of which to screen for top scores
@param n int: number of top compounds to print/return
@param negative int: if the interaction scores are negative (stronger) energies
@param compound_set str: use all Compounds ('all') or only approved Compounds ('approved')
@param save_file str: save results to file name
@return Returns None
"""
if type(protein) is Protein:
prot = protein
elif type(protein) is int:
prot = self.proteins[protein]
elif type(protein) is str:
for p in self.proteins:
if p.id_ == protein:
prot = p
# print the list of the top targets
all_interactions = []
sig = prot.sig
for i in range(len(sig)):
s = sig[i]
c_id = self.compounds[i].id_
#if c_id in self.rm_cmpds:
# continue
all_interactions.append((c_id, s))
if negative:
interactions_sorted = sorted(all_interactions, key=lambda x: x[1])
else:
interactions_sorted = sorted(all_interactions, key=lambda x: x[1])[::-1]
print('Protein is {}'.format(prot.id_))
if save_file:
o = open(save_file,'w')
o.write('rank\tscore\tid\tapproved\tname\n')
print('rank\tscore\tid\tapproved\tname')
printed = 0
si = 0
while printed < n:
c = self.get_compound(interactions_sorted[si][0])
#c = self.compounds[interactions_sorted[si][0]]
if compound_set == 'approved':
if c.status == 'approved':
print('{}\t{}\t{}\t{} \t{}'.format(printed+1, round(interactions_sorted[si][1], 3), c.id_,
'true', c.name))
if save_file:
o.write('{}\t{}\t{}\t{}\t{}\n'.format(printed+1, round(interactions_sorted[si][1], 3), c.id_,
'true', c.name))
printed += 1
else:
print('{}\t{}\t{}\t{} \t{}'.format(printed+1, round(interactions_sorted[si][1], 3),
c.id_, str(c.status == 'approved').lower(),
c.name))
if save_file:
o.write('{}\t{}\t{}\t{}\t{}\n'.format(printed+1, round(interactions_sorted[si][1], 3),
c.id_, str(c.status == 'approved').lower(),
c.name))
printed += 1
si += 1
print()
if save_file:
o.close()
return
def uniprot_set_index(self, prots):
"""!
Gather proteins from input matrix that map to UniProt IDs from 'protein_set=' param
@param prots list: UniProt IDs (str)
@return Returns list: Protein chains (str) matching input UniProt IDs
"""
pre = os.path.dirname(__file__) + "/data/v2.2+/"
if not os.path.exists('{}/mappings/pdb_2_uniprot.csv'.format(pre)):
print('Downloading UniProt to PDB mapping file...')
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/pdb_2_uniprot.csv'
dl_file(url, '{}/mappings/pdb_2_uniprot.csv'.format(pre))
pdct = {}
pdct_rev = {}
with open('{}/mappings/pdb_2_uniprot.csv'.format(pre), 'r') as u2p:
for l in u2p.readlines()[1:]:
spl = l.strip().split(',')
pdb = spl[0] + spl[1]
uni = spl[2]
try:
if pdb not in pdct[uni]:
pdct[uni].append(pdb)
except KeyError:
pdct[uni] = [pdb]
pdct_rev[pdb] = uni
targets = []
with open(prots, 'r') as unisf:
for lp in unisf:
prot = lp.strip()
targets.append(prot)
#pdct_rev[prot] = lp.strip().upper()
try:
targets += pdct[lp.strip().upper()]
except KeyError:
pass
return targets, pdct_rev
def generate_similar_sigs(self, cmpd, sort=False, proteins=[], aux=False):
"""!
For a given compound, generate the similar compounds using distance of sigs.
@param cmpd object: Compound object
@param sort bool: Sort the list of similar compounds
@param proteins list: Protein objects to identify a subset of the Compound signature
@param aux bool: Use an auxiliary signature (default: False)
@return Returns list: Similar Compounds to the given Compound
"""
# find index of query compound, collect signatures for both
q = 0
c_sig = []
if proteins is None:
c_sig = cmpd.sig
elif proteins:
for pro in proteins:
index = self.protein_id_to_index[pro.id_]
c_sig.append(cmpd.sig[index])
else:
if aux:
c_sig = cmpd.aux_sig
else:
c_sig = cmpd.sig
ca = np.array([c_sig])
other_sigs = []
for ci in range(len(self.compounds)):
c = self.compounds[ci]
if cmpd.id_ == c.id_:
q = ci
other = []
if proteins is None:
other_sigs.append(c.sig)
elif proteins:
for pro in proteins:
index = self.protein_id_to_index[pro.id_]
other.append(c.sig[index])
other_sigs.append(other)
else:
if aux:
other_sigs.append(c.aux_sig)
else:
other_sigs.append(c.sig)
oa = np.array(other_sigs)
# call cdist, speed up with custom RMSD function
if self.dist_metric == "rmsd":
distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)
elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:
distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
cmpd.similar = []
# step through the cdist list - add RMSDs to Compound.similar list
n = len(self.compounds)
for i in range(n):
c2 = self.compounds[i]
if i == q:
continue
d = distances[0][i]
cmpd.similar.append((c2, d))
n += 1
if sort:
sorted_scores = sorted(cmpd.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
cmpd.similar = sorted_scores
cmpd.similar_computed = True
cmpd.similar_sorted = True
return sorted_scores
else:
cmpd.similar_computed = True
return cmpd.similar
def generate_similar_sigs_cp(self, cmpd_pair, sort=False, proteins=[], aux=False):
"""!
For a given compound pair, generate the similar compound pairs using distance of sigs.
@param cmpd_pair object: Compound_pair object
@param sort bool: Sort the list of similar compounds
@param proteins list: Protein objects to identify a subset of the Compound signature
@param aux bool: Use an auxiliary signature (default: False)
@return Returns list: Similar Compounds to the given Compound
"""
# find index of query compound, collect signatures for both
q = 0
cp_sig = []
if proteins is None:
cp_sig = cmpd_pair.sig
elif proteins:
for pro in proteins:
index = self.protein_id_to_index[pro.id_]
cp_sig.append(cmpd_pair.sig[index])
else:
if aux:
cp_sig = cmpd_pair.aux_sig
else:
cp_sig = cmpd_pair.sig
ca = np.array([cp_sig])
other_sigs = []
for ci in range(len(self.compound_pairs)):
cp = self.compound_pairs[ci]
if cmpd_pair.id_ == cp.id_:
q = ci
other = []
if proteins is None:
other_sigs.append(cp.sig)
elif proteins:
for pro in proteins:
index = self.protein_id_to_index[pro.id_]
other.append(cp.sig[index])
other_sigs.append(other)
else:
if aux:
other_sigs.append(cp.aux_sig)
else:
other_sigs.append(cp.sig)
oa = np.array(other_sigs)
# call cdist, speed up with custom RMSD function
if self.dist_metric == "rmsd":
distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)
elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:
distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
cmpd_pair.similar = []
# step through the cdist list - add RMSDs to Compound.similar list
n = len(self.compound_pairs)
for i in range(n):
c2 = self.compound_pairs[i]
if i == q:
continue
d = distances[0][i]
cmpd_pair.similar.append((c2, d))
n += 1
if sort:
sorted_scores = sorted(cmpd_pair.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
cmpd_pair.similar = sorted_scores
cmpd_pair.similar_computed = True
cmpd_pair.similar_sorted = True
return sorted_scores
else:
cmpd_pair.similar_computed = True
return cmpd_pair.similar
def generate_some_similar_sigs(self, cmpds, sort=False, proteins=[], aux=False):
"""!
For a given list of compounds, generate the similar compounds based on dist of sigs
This is pathways/genes for all intents and purposes
@param cmpds list: Compound objects
@param sort bool: Sort similar compounds for each Compound
@param proteins list: Protein objects to identify a subset of the Compound signature
@param aux bool: Use an auxiliary signature (default: False)
@return Returns list: Similar Compounds to the given Compound
"""
q = [cmpd.id_ for cmpd in cmpds]
if proteins is None:
ca = [cmpd.sig for cmpd in cmpds]
oa = [cmpd.sig for cmpd in self.compounds]
elif proteins:
index = [self.protein_id_to_index[pro.id_] for pro in proteins]
ca = [[cmpd.sig[i] for i in index] for cmpd in cmpds]
oa = [[cmpd.sig[i] for i in index] for cmpd in self.compounds]
else:
if aux:
ca = [cmpd.aux_sig for cmpd in cmpds]
oa = [cmpd.aux_sig for cmpd in self.compounds]
else:
ca = [cmpd.sig for cmpd in cmpds]
oa = [cmpd.sig for cmpd in self.compounds]
ca = np.asarray(ca)
oa = np.asarray(oa)
# call cdist, speed up with custom RMSD function
if self.dist_metric == "rmsd":
distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)
elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:
distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
# step through the cdist list - add RMSDs to Compound.similar list
n = len(self.compounds)
for j in range(len(cmpds)):
cmpds[j].similar = []
for i in range(n):
c2 = self.compounds[i]
id2 = c2.id_
if id2 == q[j]:
continue
d = distances[j][i]
cmpds[j].similar.append((c2, d))
if sort:
sorted_scores = sorted(cmpds[j].similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
cmpds[j].similar = sorted_scores
cmpds[j].similar_computed = True
cmpds[j].similar_sorted = True
else:
cmpds[j].similar_computed = True
def quantify_pathways(self, indication=None):
"""!
Uses the pathway quantifier defined in the CANDO instantiation to make a
pathway signature for all pathways in the input file (NOTE: does not compute distances)
@param indication object: Indication object
@return Returns None
"""
pq = self.pathway_quantifier
if pq == 'max':
func = max
elif pq == 'sum':
func = sum
elif pq == 'avg':
func = np.average
elif pq == 'proteins':
if not self.indication_pathways:
print('Pathway quantifier "proteins" should only be used in combination with a '
'pathway-disease mapping (indication_pathways), quitting.')
quit()
func = None
else:
print('Please enter a proper pathway quantify method, quitting.')
func = None
quit()
# this is a recursive function for checking if the pathways have proteins
def check_proteins(paths):
pl = [] # list of pathways with >1 protein
n = 0
for path in paths:
if len(path.proteins) > 0:
pl.append(path)
n += 1
if n > 0:
return pl
else:
print('The associated pathways for this indication ({}) do not have enough proteins, '
'using all pathways'.format(indication.id_))
return check_proteins(self.pathways)
if indication:
if len(indication.pathways) == 0:
print('Warning: {} does not have any associated pathways - using all pathways'.format(indication.name))
pws = self.pathways
else:
pws = check_proteins(indication.pathways)
else:
pws = check_proteins(self.pathways)
for ci in range(len(self.compounds)):
pw_sig_all = []
c = self.compounds[ci]
for pw in pws:
if len(pw.proteins) == 0:
print('No associated proteins for pathway {}, skipping'.format(pw.id_))
continue
pw_sig = []
for p in pw.proteins:
ch = p.id_
ch_i = self.protein_id_to_index[ch]
pw_sig.append(c.sig[ch_i])
if pq == 'proteins':
pw_sig_all += pw_sig
else:
pw_sig_all.append(pw_sig)
if pq != 'proteins':
c.aux_sig = list(map(func, pw_sig_all))
else:
c.aux_sig = pw_sig_all
def results_analysed(self, f, metrics, effect_type):
"""!
Creates the results analysed named file for the benchmarking and
computes final avg indication accuracies
@param f str: File path for results analysed named
@param metrics list: Cutoffs used for the benchmarking protocol
@param effect_type str: Defines the effect as either an Indication (disease) or ADR (adverse reaction)
@return Returns dct: dict of accuracies at each cutoff
"""
fo = open(f, 'w')
effects = list(self.accuracies.keys())
# Write header
fo.write("{0}_id\tcmpds_per_{0}\ttop10\ttop25\ttop50\ttop100\ttopAll\ttop1%\t"
"top5%\ttop10%\ttop50%\ttop100%\t{0}_name\n".format(effect_type))
effects_sorted = sorted(effects, key=lambda x: (len(x[0].compounds), x[0].id_))[::-1]
l = len(effects)
final_accs = {}
for m in metrics:
final_accs[m] = 0.0
for effect, c in effects_sorted:
fo.write("{0}\t{1}\t".format(effect.id_, c))
accs = self.accuracies[(effect, c)]
for m in metrics:
n = accs[m]
y = str(n / c * 100)[0:4]
fo.write("{}\t".format(y))
final_accs[m] += n / c / l
fo.write("{}\n".format(effect.name))
fo.close()
return final_accs
def canbenchmark(self, file_name, indications=[], continuous=False, bottom=False,
ranking='standard', adrs=False):
"""!
Benchmarks the platform based on compound similarity of those approved for the same diseases
@param file_name str: Name to be used for the various results files (e.g. file_name=test --> summary_test.tsv)
@param indications list or str: List of Indication ids to be benchmarked, otherwise all will be used.
@param continuous bool: Use the percentile of distances from the similarity matrix as the benchmarking cutoffs
@param bottom bool: Reverse the ranking (descending) for the benchmark
@param ranking str: What ranking method to use for the compounds. This really only affects ties. (standard,
modified, and ordinal)
@param adrs bool: ADRs are used as the Compounds' phenotypic effects instead of Indications
@return Returns None
"""
if (continuous and self.indication_pathways) or (continuous and self.indication_proteins):
print('Continuous benchmarking and indication-based signatures are not compatible, quitting.')
exit()
if not self.indication_proteins and not self.indication_pathways:
if not self.compounds[0].similar_sorted:
for c in self.compounds:
sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
c.similar = sorted_scores
c.similar_sorted = True
if not os.path.exists('./results_analysed_named'):
print("Directory 'results_analysed_named' does not exist, creating directory")
os.system('mkdir results_analysed_named')
if not os.path.exists('./raw_results'):
print("Directory 'raw_results' does not exist, creating directory")
os.system('mkdir raw_results')
ra_named = 'results_analysed_named/results_analysed_named-' + file_name + '.tsv'
ra = 'raw_results/raw_results-' + file_name + '.csv'
summ = 'summary-' + file_name + '.tsv'
ra_out = open(ra, 'w')
def effect_type():
if adrs:
return 'ADR'
else:
return 'disease'
def competitive_standard_bottom(sims, r):
rank = 0
for sim in sims:
if sim[1] > r:
rank += 1.0
else:
return rank
return len(sims)
def competitive_modified_bottom(sims, r):
rank = 0
for sim in sims:
if sim[1] >= r:
rank += 1.0
else:
return rank
return len(sims)
# Competitive modified ranking code
def competitive_modified(sims, r):
rank = 0
for sim in sims:
if sim[1] <= r:
rank += 1.0
else:
return rank
return len(sims)
# Competitive standard ranking code
def competitive_standard(sims, r):
rank = 0
for sim in sims:
if sim[1] < r:
rank += 1.0
else:
return rank
return len(sims)
def filter_indications(ind_set):
if not os.path.exists('v2.0/mappings/group_disease-top_level.tsv'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/mappings/group_disease-top_level.tsv'
dl_file(url, 'v2.0/mappings/group_disease-top_level.tsv')
path_ids = ['C01', 'C02', 'C03']
with open('v2.0/mappings/group_disease-top_level.tsv', 'r') as fgd:
for l in fgd:
ls = l.strip().split('\t')
if ls[1] in path_ids:
ind = self.get_indication(ls[0])
ind.pathogen = True
if ind_set == 'pathogen':
return [indx for indx in self.indications if indx.pathogen]
elif ind_set == 'human':
return [indx for indx in self.indications if not indx.pathogen]
else:
print('Please enter proper indication set, options include "pathogen", "human", or "all".')
quit()
effect_dct = {}
ss = []
c_per_effect = 0
if isinstance(indications, list) and len(indications) >= 1:
effects = list(map(self.get_indication, indications))
elif isinstance(indications, list) and len(indications) == 0 and not adrs:
effects = self.indications
elif adrs:
effects = self.adrs
else:
if isinstance(indications, str):
if indications == 'all':
effects = self.indications
else:
effects = filter_indications(indications)
def cont_metrics():
all_v = []
for c in self.compounds:
for s in c.similar:
if s[1] != 0.0:
all_v.append(s[1])
avl = len(all_v)
all_v_sort = sorted(all_v)
# for tuple 10, have to add the '-1' for index out of range reasons
metrics = [(1, all_v_sort[int(avl/1000.0)]), (2, all_v_sort[int(avl/400.0)]), (3, all_v_sort[int(avl/200.0)]),
(4, all_v_sort[int(avl/100.0)]), (5, all_v_sort[int(avl/20.0)]), (6, all_v_sort[int(avl/10.0)]),
(7, all_v_sort[int(avl/5.0)]), (8, all_v_sort[int(avl/3.0)]), (9, all_v_sort[int(avl/2.0)]),
(10, all_v_sort[int(avl/1.0)-1])]
return metrics
x = (len(self.compounds)) / 100.0 # changed this...no reason to use similar instead of compounds
# had to change from 100.0 to 100.0001 because the int function
# would chop off an additional value of 1 for some reason...
if continuous:
metrics = cont_metrics()
else:
metrics = [(1, 10), (2, 25), (3, 50), (4, 100), (5, int(x*100.0001)),
(6, int(x*1.0001)), (7, int(x*5.0001)), (8, int(x*10.0001)),
(9, int(x*50.0001)), (10, int(x*100.0001))]
if continuous:
ra_out.write("compound_id,{}_id,0.1%({:.3f}),0.25%({:.3f}),0.5%({:.3f}),"
"1%({:.3f}),5%({:.3f}),10%({:.3f}),20%({:.3f}),33%({:.3f}),"
"50%({:.3f}),100%({:.3f}),value\n".format(effect_type(), metrics[0][1], metrics[1][1],
metrics[2][1], metrics[3][1], metrics[4][1],
metrics[5][1], metrics[6][1], metrics[7][1],
metrics[8][1], metrics[9][1]))
else:
ra_out.write("compound_id,{}_id,top10,top25,top50,top100,"
"topAll,top1%,top5%,top10%,top50%,top100%,rank\n".format(effect_type()))
for effect in effects:
count = len(effect.compounds)
if count < 2:
continue
if not adrs:
if self.indication_pathways:
if len(effect.pathways) == 0:
print('No associated pathways for {}, skipping'.format(effect.id_))
continue
elif len(effect.pathways) < 1:
#print('Less than 5 associated pathways for {}, skipping'.format(effect.id_))
continue
c_per_effect += count
effect_dct[(effect, count)] = {}
for m in metrics:
effect_dct[(effect, count)][m] = 0.0
# retrieve the appropriate proteins/pathway indices here, should be
# incorporated as part of the ind object during file reading
vs = []
if self.pathways:
if self.indication_pathways:
if self.pathway_quantifier == 'proteins':
for pw in effect.pathways:
for p in pw.proteins:
if p not in vs:
vs.append(p)
else:
self.quantify_pathways(indication=effect)
# Retrieve the appropriate protein indices here, should be
# incorporated as part of the ind object during file reading
if self.indication_proteins:
dg = []
for p in effect.proteins:
if p not in dg:
dg.append(p)
cmpds = effect.compounds
if self.pathways:
if self.indication_pathways:
if self.pathway_quantifier == 'proteins':
if not vs:
print('Warning: protein list empty for {}, using all proteins'.format(effect.id_))
self.generate_some_similar_sigs(cmpds, sort=True, proteins=None, aux=True)
else:
self.generate_some_similar_sigs(cmpds, sort=True, proteins=vs, aux=True)
else:
self.generate_some_similar_sigs(cmpds, sort=True, aux=True)
elif self.indication_proteins:
if len(dg) < 2:
self.generate_some_similar_sigs(cmpds, sort=True, proteins=None)
else:
self.generate_some_similar_sigs(cmpds, sort=True, proteins=dg)
# call c.generate_similar_sigs()
# use the proteins/pathways specified above
for c in effect.compounds:
for cs in c.similar:
if adrs:
if effect in cs[0].adrs:
cs_dist = cs[1]
else:
continue
else:
if effect in cs[0].indications:
cs_dist = cs[1]
else:
continue
value = 0.0
if continuous:
value = cs_dist
elif bottom:
if ranking == 'modified':
value = competitive_modified_bottom(c.similar, cs_dist)
elif ranking == 'standard':
value = competitive_standard_bottom(c.similar, cs_dist)
elif ranking == 'ordinal':
value = c.similar.index(cs)
else:
print("Ranking function {} is incorrect.".format(ranking))
exit()
elif ranking == 'modified':
value = competitive_modified(c.similar, cs_dist)
elif ranking == 'standard':
value = competitive_standard(c.similar, cs_dist)
elif ranking == 'ordinal':
value = c.similar.index(cs)
else:
print("Ranking function {} is incorrect.".format(ranking))
exit()
if adrs:
s = [str(c.index), effect.name]
else:
s = [str(c.index), effect.id_]
for x in metrics:
if value <= x[1]:
effect_dct[(effect, count)][x] += 1.0
s.append('1')
else:
s.append('0')
if continuous:
s.append(str(value))
else:
s.append(str(int(value)))
ss.append(s)
break
self.accuracies = effect_dct
final_accs = self.results_analysed(ra_named, metrics, effect_type())
ss = sorted(ss, key=lambda xx: int(xx[0]))
top_pairwise = [0.0] * 10
for s in ss:
if s[2] == '1':
top_pairwise[0] += 1.0
if s[3] == '1':
top_pairwise[1] += 1.0
if s[4] == '1':
top_pairwise[2] += 1.0
if s[5] == '1':
top_pairwise[3] += 1.0
if s[6] == '1':
top_pairwise[4] += 1.0
if s[7] == '1':
top_pairwise[5] += 1.0
if s[8] == '1':
top_pairwise[6] += 1.0
if s[9] == '1':
top_pairwise[7] += 1.0
if s[10] == '1':
top_pairwise[8] += 1.0
if s[11] == '1':
top_pairwise[9] += 1.0
sj = ','.join(s)
sj += '\n'
ra_out.write(sj)
ra_out.close()
cov = [0] * 10
for effect, c in list(self.accuracies.keys()):
accs = self.accuracies[effect, c]
for m_i in range(len(metrics)):
v = accs[metrics[m_i]]
if v > 0.0:
cov[m_i] += 1
if continuous:
headers = ['0.1%ile', '.25%ile', '0.5%ile', '1%ile', '5%ile',
'10%ile', '20%ile', '33%ile', '50%ile', '100%ile']
else:
headers = ['top10', 'top25', 'top50', 'top100', 'top{}'.format(len(self.compounds)),
'top1%', 'top5%', 'top10%', 'top50%', 'top100%']
# Create average indication accuracy list in percent
ia = []
for m in metrics:
ia.append(final_accs[m] * 100.0)
# Create average pairwise accuracy list in percent
pa = [(x * 100.0 / len(ss)) for x in top_pairwise]
# Indication coverage
cov = map(int, cov)
# Append 3 lists to df and write to file
with open(summ, 'w') as sf:
sf.write("\t" + '\t'.join(headers) + '\n')
ast = "\t".join(map(str, [format(x, ".3f") for x in ia]))
pst = "\t".join(map(str, [format(x, ".3f") for x in pa]))
cst = "\t".join(map(str, cov)) + '\n'
sf.write('aia\t{}\napa\t{}\nic\t{}\n'.format(ast, pst, cst))
# pretty print the average indication accuracies
cut = 0
print("\taia")
for m in metrics:
print("{}\t{:.3f}".format(headers[cut], final_accs[m] * 100.0))
cut += 1
print('\n')
def canbenchmark_associated(self, file_name, indications=[], continuous=False, ranking='standard'):
"""!
Benchmark only the compounds in the indication mapping, aka get rid of "noisy" compounds.
This function returns the filtered CANDO object in the event that you want to explore further.
@param file_name str: Name to be used for the variosu results files (e.g. file_name=test --> summary_test.tsv)
@param indications list: List of Indication ids to be used for this benchmark, otherwise all will be used.
@param continuous bool: Use the percentile of distances from the similarity matrix as the benchmarking cutoffs
@param ranking str: What ranking method to use for the compounds. This really only affects ties.
(standard, modified, and ordinal)
@return Returns None
"""
print("Making CANDO copy with only benchmarking-associated compounds")
cp = CANDO(self.c_map, self.i_map, self.matrix, compound_set=self.compound_set)
good_cs = []
good_ids = []
for ind in cp.indications:
if len(ind.compounds) >= 2:
for c in ind.compounds:
if c.id_ not in good_ids:
good_cs.append(c)
good_ids.append(c.id_)
cp.compounds = good_cs
print('Computing {} distances...'.format(self.dist_metric))
for c in cp.compounds:
cp.generate_similar_sigs(c, sort=True)
good_sims = []
for s in c.similar:
if s[0].id_ not in good_ids:
pass
else:
good_sims.append(s)
c.similar = good_sims
sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
c.similar = sorted_scores
c.similar_computed = True
c.similar_sorted = True
print('Done computing {} distances.\n'.format(self.dist_metric))
cp.canbenchmark(file_name=file_name, indications=indications, continuous=continuous, ranking=ranking)
def canbenchmark_bottom(self, file_name, indications=[], ranking='standard'):
"""!
Benchmark the reverse ranking of similar compounds as a control.
@param file_name str: Name to be used for the variosu results files (e.g. file_name=test --> summary_test.tsv)
@param indications list: List of Indication ids to be used for this benchmark, otherwise all will be used.
@param ranking str: What ranking method to use for the compounds. This really only affects ties. (standard,
modified, and ordinal)
@return Returns None
"""
print("Making CANDO copy with reversed compound ordering")
cp = CANDO(self.c_map, self.i_map, self.matrix)
print('Computing {} distances...'.format(self.dist_metric))
for ic in range(len(cp.compounds)):
cp.generate_similar_sigs(cp.compounds[ic], sort=True)
sorted_scores = sorted(cp.compounds[ic].similar, key=lambda x: x[1])[::-1]
cp.compounds[ic].similar = sorted_scores
cp.compounds[ic].similar_computed = True
cp.similar_sorted = True
print('Done computing {} distances.\n'.format(self.dist_metric))
cp.canbenchmark(file_name=file_name, indications=indications, ranking=ranking, bottom=True)
def canbenchmark_ndcg(self, file_name):
"""!
Benchmark using the normalized discounted cumulative gain metric
@param file_name str: Name to be used for the results files (file_name=test --> summary_ndcg-test.tsv)
@return Returns None
"""
def dcg(l,k):
dcg = [((2**x)-1)/(math.log2(i+1)) for i,x in enumerate(l[:k],1)]
return np.sum(dcg)
k_s = [10,25,50,100,len(self.compounds),0.01*len(self.compounds),0.05*len(self.compounds),0.10*len(self.compounds),0.50*len(self.compounds),len(self.compounds)]
i_accs = {}
c_accs = {}
nz_counts = {}
for k in range(len(k_s)):
i_accs[k] = {}
c_accs[k] = []
nz_counts[k] = 0
for ind in self.indications:
if len(ind.compounds) < 2:
continue
approved_ids = [i.id_ for i in ind.compounds]
acc = {}
for k in range(len(k_s)):
acc[k] = []
for c in ind.compounds:
if not c.similar_sorted:
sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
c.similar = sorted_scores
c.similar_sorted = True
c_ideal = [0]*len(c.similar)
for x in range(len(approved_ids)):
c_ideal[x] = 1
c_rank = []
for x in c.similar:
if x[0].id_ in approved_ids:
c_rank.append(1)
else:
c_rank.append(0)
for k in range(len(k_s)):
acc[k].append(dcg(c_rank,int(k_s[k]))/dcg(c_ideal,int(k_s[k])))
c_accs[k].append((c.id_,ind.id_,dcg(c_rank,int(k_s[k]))/dcg(c_ideal,int(k_s[k]))))
for k in range(len(k_s)):
i_accs[k][ind.id_] = (ind,np.mean(acc[k]))
for k in range(len(k_s)):
# Non-zero ndcg
i_accs_nz = [i_accs[k][x][1] for x in i_accs[k] if i_accs[k][x][1] > 0.0]
nz_counts[k] = len(i_accs_nz)
# Write NDCG results per indication in results_analysed_named
if not os.path.exists('./results_analysed_named/'):
os.system('mkdir results_analysed_named')
with open("results_analysed_named/results_analysed_named_ndcg-{}.tsv".format(file_name), 'w') as o:
o.write("disease_id\tcmpds_per_disease\ttop10\ttop25\ttop50\ttop100\ttop{}\ttop1%\ttop5%\ttop10%\ttop50%\ttop100%\tdisease_name\n".format(len(self.compounds)))
for x in i_accs[0]:
o.write("{}\t{}".format(i_accs[0][x][0].id_,len(i_accs[0][x][0].compounds)))
for k in range(len(k_s)):
o.write("\t{:.3f}".format(i_accs[k][x][1]))
o.write("\t{}\n".format(i_accs[0][x][0].name))
# Write NDCG results per compound-indication pair in raw_results
if not os.path.exists('./raw_results/'):
os.system('mkdir raw_results')
with open("raw_results/raw_results_ndcg-{}.csv".format(file_name), 'w') as o:
o.write("compound_id,disease_id,top10,top25,top50,top100,top{},top1%,top5%,top10%,top50%,top100%\n".format(len(self.compounds)))
for x in range(len(c_accs[0])):
o.write("{},{}".format(c_accs[0][x][0],c_accs[0][x][1]))
for k in range(len(k_s)):
o.write(",{:.3f}".format(c_accs[k][x][2]))
o.write("\n")
# Write a summary file for NDCG
with open("summary_ndcg-{}.tsv".format(file_name), 'w') as o:
o.write("\ttop10\ttop25\ttop50\ttop100\ttop{}\ttop1%\ttop5%\ttop10%\ttop50%\ttop100%\n".format(len(self.compounds)))
o.write("ai-ndcg")
for k in range(len(k_s)):
o.write("\t{:.3f}".format(np.mean(list(zip(*i_accs[k].values()))[1])))
o.write("\n")
o.write("ap-ndcg")
for k in range(len(k_s)):
o.write("\t{:.3f}".format(np.mean(list(zip(*c_accs[k]))[2])))
o.write("\n")
o.write("ic-ndcg")
for k in range(len(k_s)):
o.write("\t{}".format(int(nz_counts[k])))
o.write("\n")
#print("NDCG averaged over {} indications = {}".format(len(i_accs),np.mean(list(zip(*i_accs.values()))[1])))
#print("Pairwise NDCG averaged over {} compound-indication pairs = {}".format(len(c_accs),np.mean(list(zip(*c_accs))[3])))
def canbenchmark_cluster(self, n_clusters=5):
"""!
Benchmark using k-means clustering
@param n_clusters int: Number of clusters for k-means
@return Returns None
"""
def cluster_kmeans(cmpds):
def f(x):
return x.sig
def g(x):
return x.indications
def h(x):
return x.id_
sigs = np.array(list(map(f, cmpds)))
pca = PCA(n_components=10).fit(sigs)
sigs = pca.transform(sigs)
inds = np.array(list(map(g, cmpds)))
ids = np.array(list(map(h, cmpds)))
sigs_train, sigs_test, inds_train, inds_test, ids_train, ids_test = train_test_split(sigs, inds, ids,
test_size=0.20,
random_state=1)
clusters = KMeans(n_clusters, random_state=1).fit(sigs_train)
return clusters, sigs_test, inds_train, inds_test, ids_train, ids_test
# Calculate the K means clusters for all compound signatures
cs, sigs_test, inds_train, inds_test, ids_train, ids_test = cluster_kmeans(self.compounds)
labels = cs.labels_
# Determine how many compounds are in each cluster
# Plot the results and output the mean, median, and range
c_clusters = [0] * n_clusters
for l in labels:
c_clusters[l] += 1
'''
all_clusters = range(n_clusters)
plt.scatter(all_clusters,c_clusters)
plt.text(1, 1, "Average cluster size = {}".format(np.mean(c_clusters)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)
plt.text(1, 1, "Median cluster size = {}".format(np.median(c_clusters)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)
plt.text(1, 1, "Range of cluster sizes = {}".format(np.min(c_clusters), np.max(c_clusters)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)
plt.savefig("cluster_size.png")
'''
# Map the labels for each compound to the cluster_id for each compound object
for ci in range(len(labels)):
self.compounds[ids_train[ci]].cluster_id = labels[ci]
total_acc = 0.0
total_count = 0
# Calculate the benchmark accuracy by
# mimicking classic benchmark -- leave one out
# and recapture at least one for each indication-drug pair
for i in range(len(sigs_test)):
lab = cs.predict(sigs_test[i].reshape(1,-1))
for ind in inds_test[i]:
for c in range(len(inds_train)):
done = False
for ind_train in inds_train[c]:
if ind.name == ind_train.name and lab[0] == labels[c]:
total_acc+=1.0
done = True
break
if done:
break
total_count += 1
print("Number of cluster = {}".format(n_clusters))
print("Mean cluster size = {}".format(np.mean(c_clusters)))
print("Median cluster size = {}".format(np.median(c_clusters)))
print("Range of cluster sizes = [{},{}]".format(np.min(c_clusters), np.max(c_clusters)))
print("% Accuracy = {}".format(total_acc / total_count * 100.0))
def compounds_analysed(self, f, metrics):
fo = open(f, 'w')
cmpds = list(self.accuracies.keys())
cmpds_sorted = sorted(cmpds, key=lambda x: (len(x[0].compounds), x[0].id_))[::-1]
l = len(cmpds)
final_accs = {}
for m in metrics:
final_accs[m] = 0.0
for cmpd, c in cmpds_sorted:
fo.write("{0}\t{1}\t".format(cmpd.id_, c))
accs = self.accuracies[(cmpd,c)]
for m in metrics:
n = accs[m]
y = str(n / c * 100)[0:4]
fo.write("{}\t".format(y))
final_accs[m] += n / c / l
fo.write("|\t{}\n".format(cmpd.name))
fo.close()
return final_accs
def canbenchmark_compounds(self, file_name, adrs=[], continuous=False,
bottom=False, ranking='standard'):
"""!
Benchmarks the platform based on compound similarity of those known to interact with other compounds.
@param file_name str: Name to be used for the various results files (e.g. file_name=test --> summary_test.tsv)
@param adrs list: List of ADR ids to be used for this benchmark, otherwise all will be used.
@param continuous bool: Use the percentile of distances from the similarity matrix as the cutoffs for
benchmarking
@param bottom bool: Reverse the ranking (descending) for the benchmark
@param ranking str: What ranking method to use for the compounds. This really only affects ties. (standard,
modified, and ordinal)
@return Returns None
"""
if (continuous and self.indication_pathways) or (continuous and self.indication_proteins):
print('Continuous benchmarking and indication-based signatures are not compatible, quitting.')
exit()
if not self.indication_proteins and not self.indication_pathways:
if not self.compounds[0].similar_sorted:
for c in self.compounds:
sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
c.similar = sorted_scores
c.similar_sorted = True
if not os.path.exists('./results_analysed_named'):
print("Directory 'results_analysed_named' does not exist, creating directory")
os.system('mkdir results_analysed_named')
if not os.path.exists('./raw_results'):
print("Directory 'raw_results' does not exist, creating directory")
os.system('mkdir raw_results')
ra_named = 'results_analysed_named/results_analysed_named_' + file_name + '-cmpds.tsv'
ra = 'raw_results/raw_results_' + file_name + '-cmpds.csv'
summ = 'summary_' + file_name + '-cmpds.tsv'
ra_out = open(ra, 'w')
def effect_type():
if adrs:
return 'ADR'
else:
return 'disease'
def competitive_standard_bottom(sims, r):
rank = 0
for sim in sims:
if sim[1] > r:
rank += 1.0
else:
return rank
return len(sims)
def competitive_modified_bottom(sims, r):
rank = 0
for sim in sims:
if sim[1] >= r:
rank += 1.0
else:
return rank
return len(sims)
# Competitive modified ranking code
def competitive_modified(sims, r):
rank = 0
for sim in sims:
if sim[1] <= r:
rank += 1.0
else:
return rank
return len(sims)
# Competitive standard ranking code
def competitive_standard(sims, r):
rank = 0
for sim in sims:
if sim[1] < r:
rank += 1.0
else:
return rank
return len(sims)
cmpd_dct = {}
ss = []
c_per_cmpd = 0
def cont_metrics():
all_v = []
for c in self.compounds:
for s in c.similar:
if s[1] != 0.0:
all_v.append(s[1])
avl = len(all_v)
all_v_sort = sorted(all_v)
# for tuple 10, have to add the '-1' for index out of range reasons
metrics = [(1, all_v_sort[int(avl/1000.0)]), (2, all_v_sort[int(avl/400.0)]), (3, all_v_sort[int(avl/200.0)]),
(4, all_v_sort[int(avl/100.0)]), (5, all_v_sort[int(avl/20.0)]), (6, all_v_sort[int(avl/10.0)]),
(7, all_v_sort[int(avl/5.0)]), (8, all_v_sort[int(avl/3.0)]), (9, all_v_sort[int(avl/2.0)]),
(10, all_v_sort[int(avl/1.0)-1])]
return metrics
x = (len(self.compounds)) / 100.0 # changed this...no reason to use similar instead of compounds
# had to change from 100.0 to 100.0001 because the int function
# would chop off an additional value of 1 for some reason...
if continuous:
metrics = cont_metrics()
else:
metrics = [(1, 10), (2, 25), (3, 50), (4, 100), (5, int(x*100.0001)),
(6, int(x*1.0001)), (7, int(x*5.0001)), (8, int(x*10.0001)),
(9, int(x*50.0001)), (10, int(x*100.0001))]
if continuous:
ra_out.write("compound_id,compound_id,0.1%({:.3f}),0.25%({:.3f}),0.5%({:.3f}),"
"1%({:.3f}),5%({:.3f}),10%({:.3f}),20%({:.3f}),33%({:.3f}),"
"50%({:.3f}),100%({:.3f}),value\n".format(metrics[0][1], metrics[1][1],
metrics[2][1], metrics[3][1], metrics[4][1],
metrics[5][1], metrics[6][1], metrics[7][1],
metrics[8][1], metrics[9][1]))
else:
ra_out.write("compound_id,compound_id,top10,top25,top50,top100,"
"topAll,top1%,top5%,top10%,top50%,top100%,rank\n")
for cmpd in self.compounds:
count = len(cmpd.compounds)
if count < 2:
continue
c_per_cmpd += count
cmpd_dct[(cmpd, count)] = {}
for m in metrics:
cmpd_dct[(cmpd, count)][m] = 0.0
# retrieve the appropriate proteins/pathway indices here, should be
# incorporated as part of the ind object during file reading
vs = []
for c in cmpd.compounds:
for cs in c.similar:
if cs[0] in cmpd.compounds:
#if cmpd in cs[0].compounds:
cs_rmsd = cs[1]
else:
continue
value = 0.0
if continuous:
value = cs_rmsd
elif bottom:
if ranking == 'modified':
value = competitive_modified_bottom(c.similar, cs_rmsd)
elif ranking == 'standard':
value = competitive_standard_bottom(c.similar, cs_rmsd)
elif ranking == 'ordinal':
value = c.similar.index(cs)
else:
print("Ranking function {} is incorrect.".format(ranking))
exit()
elif ranking == 'modified':
value = competitive_modified(c.similar, cs_rmsd)
elif ranking == 'standard':
value = competitive_standard(c.similar, cs_rmsd)
elif ranking == 'ordinal':
value = c.similar.index(cs)
else:
print("Ranking function {} is incorrect.".format(ranking))
exit()
s = [str(c.index), str(cmpd.id_)]
for x in metrics:
if value <= x[1]:
cmpd_dct[(cmpd, count)][x] += 1.0
s.append('1')
else:
s.append('0')
if continuous:
s.append(str(value))
else:
s.append(str(value))
ss.append(s)
break
self.accuracies = cmpd_dct
final_accs = self.compounds_analysed(ra_named, metrics)
ss = sorted(ss, key=lambda xx: int(xx[0]))
top_pairwise = [0.0] * 10
for s in ss:
if s[2] == '1':
top_pairwise[0] += 1.0
if s[3] == '1':
top_pairwise[1] += 1.0
if s[4] == '1':
top_pairwise[2] += 1.0
if s[5] == '1':
top_pairwise[3] += 1.0
if s[6] == '1':
top_pairwise[4] += 1.0
if s[7] == '1':
top_pairwise[5] += 1.0
if s[8] == '1':
top_pairwise[6] += 1.0
if s[9] == '1':
top_pairwise[7] += 1.0
if s[10] == '1':
top_pairwise[8] += 1.0
if s[11] == '1':
top_pairwise[9] += 1.0
sj = ','.join(s)
sj += '\n'
ra_out.write(sj)
ra_out.close()
cov = [0] * 10
for cmpd, c in list(self.accuracies.keys()):
accs = self.accuracies[cmpd, c]
for m_i in range(len(metrics)):
v = accs[metrics[m_i]]
if v > 0.0:
cov[m_i] += 1
if continuous:
headers = ['0.1%ile', '.25%ile', '0.5%ile', '1%ile', '5%ile',
'10%ile', '20%ile', '33%ile', '50%ile', '100%ile']
else:
headers = ['top10', 'top25', 'top50', 'top100', 'top{}'.format(len(self.compounds)),
'top1%', 'top5%', 'top10%', 'top50%', 'top100%']
# Create average indication accuracy list in percent
ia = []
for m in metrics:
ia.append(final_accs[m] * 100.0)
# Create average pairwise accuracy list in percent
pa = [(x * 100.0 / len(ss)) for x in top_pairwise]
# Indication coverage
cov = map(int, cov)
# Append 3 lists to df and write to file
with open(summ, 'w') as sf:
sf.write("\t" + '\t'.join(headers) + '\n')
ast = "\t".join(map(str, [format(x, ".3f") for x in ia]))
pst = "\t".join(map(str, [format(x, ".3f") for x in pa]))
cst = "\t".join(map(str, cov)) + '\n'
sf.write('aia\t{}\napa\t{}\nic\t{}\n'.format(ast, pst, cst))
# pretty print the average indication accuracies
cut = 0
print("\taia")
for m in metrics:
print("{}\t{:.3f}".format(headers[cut], final_accs[m] * 100.0))
cut += 1
print('\n')
def canbenchmark_ddi(self, file_name, adrs=[], continuous=False,
bottom=False, ranking='standard'):
"""!
Benchmarks the platform based on compound pairs known to cause ADRs
@param file_name str: Name to be used for the results files (file_name=test --> summary_test-ddi_adr.tsv)
@param continuous bool: Use the percentile of distances from the similarity matrix as the cutoffs for
benchmarking
@param bottom bool: Reverse the ranking (descending) for the benchmark
@param ranking str: What ranking method to use for the compounds. This really only affects ties. (standard,
modified, and ordinal)
@return Returns None
"""
adrs = True
'''
if (continuous and self.indication_pathways) or (continuous and self.indication_proteins):
print('Continuous benchmarking and indication-based signatures are not compatible, quitting.')
exit()
'''
if not self.indication_proteins and not self.indication_pathways:
if not self.compound_pairs[0].similar_sorted:
#if not self.compound_pairs[0].similar_sorted and not associated:
for cm_p in self.compound_pairs:
cm_p.similar = {k: v for k, v in sorted(cm_p.similar.items(), key=lambda item: item[1])}
#sorted_scores = sorted(cm_p.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
#cm_p.similar = sorted_scores
cm_p.similar_sorted = True
if not os.path.exists('./results_analysed_named'):
print("Directory 'results_analysed_named' does not exist, creating directory")
os.system('mkdir results_analysed_named')
if not os.path.exists('./raw_results'):
print("Directory 'raw_results' does not exist, creating directory")
os.system('mkdir raw_results')
ra_named = 'results_analysed_named/results_analysed_named_' + file_name + '-ddi_adr.tsv'
ra = 'raw_results/raw_results_' + file_name + '-ddi_adr.csv'
summ = 'summary_' + file_name + '-ddi_adr.tsv'
ra_out = open(ra, 'w')
def effect_type():
if adrs:
return 'ADR'
else:
return 'disease'
def competitive_standard_bottom(sims, r):
rank = 0
for sim in sims:
if sims[sim] > r:
rank += 1.0
else:
return rank
return len(sims)
def competitive_modified_bottom(sims, r):
rank = 0
for sim in sims:
if sims[sim] >= r:
rank += 1.0
else:
return rank
return len(sims)
# Competitive modified ranking code
def competitive_modified(sims, r):
rank = 0
for sim in sims:
if sims[sim] <= r:
rank += 1.0
else:
return rank
return len(sims)
# Competitive standard ranking code
def competitive_standard(sims, r):
rank = 0
for sim in sims:
if sims[sim] < r:
rank += 1.0
else:
return rank
return len(sims)
effect_dct = {}
ss = []
c_per_effect = 0
if adrs:
effects = self.adrs
else:
effects = self.indications
def cont_metrics():
all_v = []
for c in self.compound_pairs:
for c_sim in c.similar:
c_dist = c.similar[c_sim]
if c_dist != 0.0:
all_v.append(c_dist)
avl = len(all_v)
all_v_sort = sorted(all_v)
# for tuple 10, have to add the '-1' for index out of range reasons
metrics = [(1, all_v_sort[int(avl/1000.0)]), (2, all_v_sort[int(avl/400.0)]), (3, all_v_sort[int(avl/200.0)]),
(4, all_v_sort[int(avl/100.0)]), (5, all_v_sort[int(avl/20.0)]), (6, all_v_sort[int(avl/10.0)]),
(7, all_v_sort[int(avl/5.0)]), (8, all_v_sort[int(avl/3.0)]), (9, all_v_sort[int(avl/2.0)]),
(10, all_v_sort[int(avl/1.0)-1])]
return metrics
x = (len(self.compound_pairs)) / 100.0 # changed this...no reason to use similar instead of compounds
# had to change from 100.0 to 100.0001 because the int function
# would chop off an additional value of 1 for some reason...
if continuous:
metrics = cont_metrics()
else:
metrics = [(1, 10), (2, 25), (3, 50), (4, 100), (5, int(x*100.0001)),
(6, int(x*1.0001)), (7, int(x*5.0001)), (8, int(x*10.0001)),
(9, int(x*50.0001)), (10, int(x*100.0001))]
if continuous:
ra_out.write("compound_id,{}_id,0.1%({:.3f}),0.25%({:.3f}),0.5%({:.3f}),"
"1%({:.3f}),5%({:.3f}),10%({:.3f}),20%({:.3f}),33%({:.3f}),"
"50%({:.3f}),100%({:.3f}),value\n".format(effect_type(), metrics[0][1], metrics[1][1],
metrics[2][1], metrics[3][1], metrics[4][1],
metrics[5][1], metrics[6][1], metrics[7][1],
metrics[8][1], metrics[9][1]))
else:
ra_out.write("compound_id,{}_id,top10,top25,top50,top100,"
"topAll,top1%,top5%,top10%,top50%,top100%,rank\n".format(effect_type()))
print("Running canbenchmark...")
for effect in effects:
count = len(effect.compound_pairs)
if count < 2:
continue
if not adrs:
if self.indication_pathways:
if len(effect.pathways) == 0:
print('No associated pathways for {}, skipping'.format(effect.id_))
continue
elif len(effect.pathways) < 1:
#print('Less than 5 associated pathways for {}, skipping'.format(effect.id_))
continue
c_per_effect += count
effect_dct[(effect, count)] = {}
for m in metrics:
effect_dct[(effect, count)][m] = 0.0
# retrieve the appropriate proteins/pathway indices here, should be
# incorporated as part of the ind object during file reading
vs = []
if self.pathways:
if self.indication_pathways:
if self.pathway_quantifier == 'proteins':
for pw in effect.pathways:
for p in pw.proteins:
if p not in vs:
vs.append(p)
else:
self.quantify_pathways(indication=effect)
# Retrieve the appropriate protein indices here, should be
# incorporated as part of the ind object during file reading
if self.indication_proteins:
dg = []
for p in effect.proteins:
if p not in dg:
dg.append(p)
c = effect.compound_pairs
if self.pathways:
if self.indication_pathways:
if self.pathway_quantifier == 'proteins':
if not vs:
print('Warning: protein list empty for {}, using all proteins'.format(effect.id_))
self.generate_some_similar_sigs(c, sort=True, proteins=None, aux=True)
else:
self.generate_some_similar_sigs(c, sort=True, proteins=vs, aux=True)
else:
self.generate_some_similar_sigs(c, sort=True, aux=True)
elif self.indication_proteins:
if len(dg) < 2:
self.generate_some_similar_sigs(c, sort=True, proteins=None)
else:
self.generate_some_similar_sigs(c, sort=True, proteins=dg)
# call c.generate_similar_sigs()
# use the proteins/pathways specified above
for c in effect.compound_pairs:
for c_sim in c.similar:
c_dist = c.similar[c_sim]
if adrs:
if effect not in c_sim.adrs:
continue
else:
if effect not in c_sim.indications:
continue
value = 0.0
if continuous:
value = c_dist
elif bottom:
if ranking == 'modified':
value = competitive_modified_bottom(c.similar, c_dist)
elif ranking == 'standard':
value = competitive_standard_bottom(c.similar, c_dist)
elif ranking == 'ordinal':
#value = c.similar.index(cs)
value = list(c.similar).index(c_sim)
else:
print("Ranking function {} is incorrect.".format(ranking))
exit()
elif ranking == 'modified':
value = competitive_modified(c.similar, c_dist)
elif ranking == 'standard':
value = competitive_standard(c.similar, c_dist)
elif ranking == 'ordinal':
#value = c.similar.index(cs)
value = list(c.similar).index(c_sim)
else:
print("Ranking function {} is incorrect.".format(ranking))
exit()
if adrs:
s = [str(c.index), effect.name]
else:
s = [str(c.index), effect.id_]
for x in metrics:
if value <= x[1]:
effect_dct[(effect, count)][x] += 1.0
s.append('1')
else:
s.append('0')
if continuous:
s.append(str(value))
else:
s.append(str(int(value)))
ss.append(s)
break
self.accuracies = effect_dct
final_accs = self.results_analysed(ra_named, metrics, effect_type())
ss = sorted(ss, key=lambda xx: xx[0])
#ss = sorted(ss, key=lambda xx: int(xx[0]))
top_pairwise = [0.0] * 10
for s in ss:
if s[2] == '1':
top_pairwise[0] += 1.0
if s[3] == '1':
top_pairwise[1] += 1.0
if s[4] == '1':
top_pairwise[2] += 1.0
if s[5] == '1':
top_pairwise[3] += 1.0
if s[6] == '1':
top_pairwise[4] += 1.0
if s[7] == '1':
top_pairwise[5] += 1.0
if s[8] == '1':
top_pairwise[6] += 1.0
if s[9] == '1':
top_pairwise[7] += 1.0
if s[10] == '1':
top_pairwise[8] += 1.0
if s[11] == '1':
top_pairwise[9] += 1.0
sj = ','.join(s)
sj += '\n'
ra_out.write(sj)
ra_out.close()
cov = [0] * 10
for effect, c in list(self.accuracies.keys()):
accs = self.accuracies[effect, c]
for m_i in range(len(metrics)):
v = accs[metrics[m_i]]
if v > 0.0:
cov[m_i] += 1
if continuous:
headers = ['0.1%ile', '.25%ile', '0.5%ile', '1%ile', '5%ile',
'10%ile', '20%ile', '33%ile', '50%ile', '100%ile']
else:
headers = ['top10', 'top25', 'top50', 'top100', 'top{}'.format(len(self.compound_pairs)),
'top1%', 'top5%', 'top10%', 'top50%', 'top100%']
# Create average indication accuracy list in percent
ia = []
for m in metrics:
ia.append(final_accs[m] * 100.0)
# Create average pairwise accuracy list in percent
pa = [(x * 100.0 / len(ss)) for x in top_pairwise]
# Indication coverage
cov = map(int, cov)
# Append 3 lists to df and write to file
with open(summ, 'w') as sf:
sf.write("\t" + '\t'.join(headers) + '\n')
ast = "\t".join(map(str, [format(x, ".3f") for x in ia]))
pst = "\t".join(map(str, [format(x, ".3f") for x in pa]))
cst = "\t".join(map(str, cov)) + '\n'
sf.write('aia\t{}\napa\t{}\nic\t{}\n'.format(ast, pst, cst))
# pretty print the average indication accuracies
cut = 0
print("\taia")
for m in metrics:
print("{}\t{:.3f}".format(headers[cut], final_accs[m] * 100.0))
cut += 1
print('\n')
def ml(self, method='rf', effect=None, benchmark=False, adrs=False, predict=[], threshold=0.5,
negative='random', seed=42, out=''):
"""!
Create an ML classifier for a specified indication to make drug-disease predictions or all inds for benchmarking
@param method str: type of machine learning algorithm to use ('rf' or 'log')
@param effect Indication or ADR: provide a specific Indication or ADR object to train a classifer
@param benchmark bool: benchmark the ML pipeline by training a classifier with LOOCV for each Indication or ADR
@param adrs bool: if the models are trained with ADRs instead of Indications
@param predict list: provide a list of Compound objects to classify with the model (only used in
combination with effect=Indication/ADR object)
@param threshold float: decision threshold for positive vs negative classification
@param negative str: choose random negative samples (default) or 'inverse' for most opposite signatures
@param seed int: choose a seed for reproducibility
@param out str: file name extension for the output of benchmark (note: must have benchmark=True)
@return Returns None
"""
if method in ['1csvm', 'svm']:
print('SVMs are currently unsupported by this version of cando.py. Please choose "log" or "rf" - quitting.')
quit()
if out:
if not os.path.exists('./raw_results/'):
os.system('mkdir raw_results')
if not os.path.exists('./results_analysed_named/'):
os.system('mkdir results_analysed_named')
paired_negs = {}
# gather approved compound signatures for training
def split_cs(efct, cmpd=None):
mtrx = []
for cm in efct.compounds:
if cmpd:
if cm.id_ == cmpd.id_:
continue
if self.indication_proteins:
if len(efct.proteins) >= 3:
eps = []
for ep in efct.proteins:
ep_index = self.protein_id_to_index[ep.id_]
eps.append(cm.sig[ep_index])
mtrx.append(eps)
else:
mtrx.append(cm.sig)
return mtrx, [1] * len(mtrx)
def choose_negatives(efct, neg_set=negative, s=None, hold_out=None, avoid=[], test=None):
if neg_set == 'inverse':
if not self.compute_distance and not self.read_dists:
print('Please compute all compound-compound distances before using inverse_negatives().\n'
'Re-run with "compute_distance=True" or read in pre-computed distance file "read_dists="'
'in the CANDO object instantiation -- quitting.')
quit()
negatives = []
used = avoid
def pick_first_last(cmpd, s):
if neg_set == 'inverse':
r = int(len(self.compounds) / 2)
shuffled = [cx[0].id_ for cx in cmpd.similar][::-1][0:r]
else:
shuffled = [cx.id_ for cx in self.compounds]
if s:
random.seed(s)
random.shuffle(shuffled)
else:
s = random.randint(0, len(self.compounds) - 1)
random.seed(s)
random.shuffle(shuffled)
for si in range(len(shuffled)):
n = shuffled[si]
if n in used:
continue
inv = self.get_compound(n)
if inv not in efct.compounds:
if n not in used:
paired_negs[cmpd] = inv
return inv
if test:
inv = pick_first_last(c, s)
return inv
for ce in efct.compounds:
if hold_out:
if ce.id_ == hold_out.id_:
continue
inv = pick_first_last(ce, s)
if self.indication_proteins:
if len(efct.proteins) >= 3:
eps = []
for ep in efct.proteins:
ep_index = self.protein_id_to_index[ep.id_]
eps.append(inv.sig[ep_index])
negatives.append(eps)
else:
negatives.append(inv.sig)
used.append(inv.id_)
return negatives, [0] * len(negatives), used
def model(meth, samples, labels, params=None, seed=None):
if meth == 'rf':
m = RandomForestClassifier(n_estimators=100, random_state=seed)
m.fit(samples, labels)
return m
elif meth == 'svm':
m = svm.SVC(kernel='rbf', gamma='scale', degree=3, random_state=seed)
m.fit(samples, labels)
return m
elif meth == '1csvm':
keep = []
for i in range(len(samples)):
if labels[i] == 1:
keep.append(samples[i])
m = svm.OneClassSVM(kernel='poly', gamma='scale', degree=2)
m.fit(keep)
return m
elif meth == 'log':
m = LogisticRegression(penalty='l2', solver='newton-cg', random_state=seed)
m.fit(samples, labels)
return m
else:
print("Please enter valid machine learning method ('rf', '1csvm', 'log', or 'svm')")
quit()
if benchmark:
if adrs:
effects = sorted(self.adrs, key=lambda x: (len(x.compounds), x.id_))[::-1]
else:
effects = sorted(self.indications, key=lambda x: (len(x.compounds), x.id_))[::-1]
if out:
frr = open('./raw_results/raw_results_ml_{}'.format(out), 'w')
frr.write('Compound,Effect,Prob,Neg,Neg_prob\n')
fran = open('./results_analysed_named/results_analysed_named_ml_{}'.format(out), 'w')
fsum = open('summary_ml-{}'.format(out), 'w')
else:
if len(effect.compounds) < 1:
print('No compounds associated with {} ({}), quitting.'.format(effect.name, effect.id_))
quit()
elif self.indication_proteins and len(effect.proteins) <= 2:
print('Less than 3 proteins associated with {} ({}), quitting.'.format(effect.name, effect.id_))
effects = [effect]
rf_scores = []
for e in effects:
if len(e.compounds) < 2:
continue
if self.indication_proteins:
if not len(e.proteins) >= 3:
continue
tp_fn = [0, 0]
fp_tn = [0, 0]
for c in e.compounds:
pos = split_cs(e, cmpd=c)
negs = choose_negatives(e, s=seed, hold_out=c, avoid=[])
already_used = negs[2]
train_samples = np.array(pos[0] + negs[0])
train_labels = np.array(pos[1] + negs[1])
mdl = model(method, train_samples, train_labels, seed=seed)
test_neg = choose_negatives(e, s=seed, avoid=already_used, test=c)
if self.indication_proteins:
eps_pos = []
eps_neg = []
for ep in e.proteins:
ep_index = self.protein_id_to_index[ep.id_]
eps_pos.append(c.sig[ep_index])
eps_neg.append(test_neg.sig[ep_index])
pred = mdl.predict_proba(np.array([eps_pos]))
pred_neg = mdl.predict_proba(np.array([eps_neg]))
else:
pred = mdl.predict_proba(np.array([c.sig]))
pred_neg = mdl.predict_proba(np.array([test_neg.sig]))
pos_class = list(mdl.classes_).index(1)
if pred[0][pos_class] > threshold:
tp_fn[0] += 1
else:
tp_fn[1] += 1
if pred_neg[0][pos_class] > threshold:
fp_tn[0] += 1
else:
fp_tn[1] += 1
if benchmark and out:
frr.write('{},{},{},{},{}\n'.format(c.id_, e.id_, pred[0][pos_class],
test_neg.id_, pred_neg[0][pos_class]))
# predict whether query drugs are associated with this indication
if predict:
print('Indication: {}'.format(e.name))
print('Leave-one-out cross validation: TP={}, FP={}, FN={}, TN={}, Acc={:0.3f}'.format(
tp_fn[0], fp_tn[0], tp_fn[1], fp_tn[1], 100 * ((tp_fn[0]+fp_tn[1]) / (float(len(e.compounds))*2))))
negs = choose_negatives(e, s=seed)
pos = split_cs(e)
train_samples = np.array(pos[0] + negs[0])
train_labels = np.array(pos[1] + negs[1])
mdl = model(method, train_samples, train_labels, seed=seed)
print('\tCompound\tProb')
for c in predict:
inv = choose_negatives(effect, s=seed, test=c, avoid=negs[2])
if self.indication_proteins:
eps_pos = []
eps_neg = []
for ep in e.proteins:
ep_index = self.protein_id_to_index[ep.id_]
eps_pos.append(c.sig[ep_index])
eps_neg.append(test_neg.sig[ep_index])
pred = mdl.predict_proba(np.array([eps_pos]))
pred_neg = mdl.predict_proba(np.array([test_neg.sig]))
else:
pred = mdl.predict_proba(np.array([c.sig]))
pred_inv = mdl.predict_proba(np.array([inv.sig]))
pos_class = list(mdl.classes_).index(1)
print('\t{}\t{:0.3f}'.format(c.name, pred[0][pos_class]))
#print('\t{}\t{:0.3f}\t(random negative of {})'.format(inv.name, pred_inv[0][pos_class], c.name))
# append loocv results to combined list
rf_scores.append((e, tp_fn, fp_tn))
sm = [0, 0, 0, 0]
if benchmark:
for rf_score in rf_scores:
efct = rf_score[0]
tfp = rf_score[1]
ffp = rf_score[2]
acc = (tfp[0] + ffp[1]) / (float(len(efct.compounds) * 2))
sm[0] += len(efct.compounds)
sm[1] += acc
sm[2] += (acc * len(efct.compounds))
if acc > 0.5:
sm[3] += 1
if out:
fran.write('{}\t{}\t{}\t{}\t{:0.3f}\t{}\n'.format(efct.id_, len(efct.compounds),
tfp[0], tfp[1], 100 * acc, efct.name))
if out:
fsum.write('aia\t{:0.3f}\n'.format(100 * (sm[1]/len(rf_scores))))
fsum.write('apa\t{:0.3f}\n'.format(100 * (sm[2] / sm[0])))
fsum.write('ic\t{}\n'.format(sm[3]))
print('aia\t{:0.3f}'.format(100 * (sm[1]/len(rf_scores))))
print('apa\t{:0.3f}'.format(100 * (sm[2] / sm[0])))
print('ic\t{}'.format(sm[3]))
return
def raw_results_roc(self, rr_files, labels, save='roc-raw_results.pdf'):
if len(labels) != len(rr_files):
print('Please enter a label for each input raw results file '
'({} files, {} labels).'.format(len(rr_files), len(labels)))
quit()
n_per_d = {}
dt = {}
ds = {}
metrics = {}
truth = []
scores = []
for rr_file in rr_files:
for l in open(rr_file, 'r').readlines()[1:]:
ls = l.strip().split(',')
pp = float(ls[2])
truth.append(1)
scores.append(pp)
np = float(ls[4])
truth.append(0)
scores.append(np)
if ls[1] not in n_per_d:
n_per_d[ls[1]] = 1
else:
n_per_d[ls[1]] += 1
pr = average_precision_score(truth, scores)
fpr, tpr, thrs = roc_curve(truth, scores)
area = roc_auc_score(truth, scores)
dt[rr_file] = truth
ds[rr_file] = scores
metrics[rr_file] = [fpr, tpr, thrs, area, pr]
plt.figure()
lw = 2
for rr_file in rr_files:
i = rr_files.index(rr_file)
[fpr, tpr, thrs, area, pr] = metrics[rr_file]
plt.plot(fpr, tpr, lw=lw, label='{} (AUC-ROC={}, AUPR={})'.format(labels[i], format(area, '.3f'),
format(pr, '.3f')))
plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right", prop={'size': 8})
if save:
plt.savefig(save, dpi=300)
plt.show()
def canpredict_denovo(self, method='count', threshold=0.0, topX=10, ind_id=None, proteins=None,
minimize=None, consensus=True, cmpd_set='all', save=''):
"""!
This function is used for predicting putative therapeutics for an indication
of interest by summing/counting the number of interactions above a certain input interaction
threshold for all proteins or a specified subset of proteins. An indication can be specified to
mark drugs associated with that indication in the output. The threshold will vary based on the
values of the input matrix. Method can be 'count' (score1), which ranks compounds based on the
number of interactions above the threshold, 'sum' (score2), which ranks the compounds based on the
highest total sum for interaction scores above the threshold (these two are highly correlated but can
differ for larger sets of proteins or lower thresholds), 'min', which first ranks by 'count' then re-ranks
based on the summed interactions with the proteins in the input 'minimize' list - this list should contain
proteins IDs towards which the user wants low interaction scores - or 'diff', which ranks by the difference of
sums and the summed scores from off-targets in 'minimize'. A fifth option is 'targets', which inspects
and outputs the top protein interactions on an individual basis without summing/counting per drug (the
output format differs from the other two options). If indication_proteins flag is used for
the CANDO object instantiation, the proteins associated with the input indication will automatically
be used. Otherwise, the 'proteins=' input can be used. The output can be saved to a file specified
by 'save='. If ind_id is used, compounds associated with the indication will be included and marked
in the output for comparison.
@param method str: 'sum', 'count', or 'targets'
@param threshold float: a interaction score cutoff to use (ignores values for sum/count less than threshold)
@param topX int: top number of predicted Compounds to be printed/saved
@param ind_id str: an indication id for marking drug output/ specifying protein set
@param proteins List str: list of protein IDs to use from the matrix
@param minimize List str: list of protein IDs to treat as 'off targets' to avoid, ranking
@param consensus bool: if True, only compounds with score1 >= 2 will be printed
@param cmpd_set str: specify the compound set to use ('all', 'approved', or 'other')
@param save str: name of a file to save results
@return Returns None
"""
if ind_id:
ind = self.get_indication(ind_id)
c_dct = {}
top_hits = []
min_hits = []
if self.indication_proteins and ind_id:
indices = []
for p in ind.proteins:
indices.append(self.protein_id_to_index[p.id_])
elif proteins:
indices = []
for p in proteins:
if type(p) is str:
indices.append(self.protein_id_to_index[p])
elif type(p) is int:
indices.append(p)
elif type(p) is Protein:
indices.append(self.protein_id_to_index[p.id_])
else:
indices = range(len(self.proteins))
if minimize is None:
minimize = []
for c in self.compounds:
ss = 0.0
count = 0
min_ss = 0.0
min_count = 0
for pi in indices:
si = float(c.sig[pi])
p = self.proteins[pi]
if si >= threshold:
if p.id_ in minimize:
min_ss += si
min_count += 1
top_hits.append((p.id_, c, si, False))
else:
ss += si
count += 1
top_hits.append((p.id_, c, si, True))
if ind_id:
already_approved = ind in c.indications
else:
already_approved = False # Not relevant since there is no indication
c_dct[c.id_] = [ss, count, already_approved, min_ss, min_count]
if method == 'sum':
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0], x[1][1]))[::-1]
elif method == 'count':
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][1], x[1][0]))[::-1]
elif method == 'min':
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][1], x[1][3]*-1))[::-1]
elif method == 'diff':
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0] - x[1][3]))[::-1]
elif method == 'targets':
sp = sorted(top_hits, key=lambda x: x[2])[::-1]
print('target \tscore\toff_target\tid\tapproved\tname')
if save:
fo = open(save, 'w')
fo.write('target \tscore\toff_target\tid\tapproved\tname\n')
for s in sp:
co = s[1]
if cmpd_set == 'approved':
if co.status == 'approved' or (co in ind.compounds):
pass
else:
continue
st = '{}\t{}\t{}\t{}\t{}\t{}'.format(s[0].ljust(8), round(s[2], 3), co.id_,
str(s[3]).lower().ljust(10),
(str(co.status == 'approved').lower()).ljust(8), co.name)
print(st)
fo.write(st + '\n')
return
else:
sorted_x = []
print('Please enter a valid ranking method -- quitting.')
quit()
if save:
fo = open(save, 'w')
fo.write('rank\tscore1\tscore2\toffhits\tdiff\tid\tapproved\tname\n')
print("Printing the {} highest predicted compounds...\n".format(topX))
i = 0
print('rank\tscore1\tscore2\toffhits\tdiff\tid\tapproved\tname')
for p in enumerate(sorted_x):
if i >= topX != -1:
break
else:
if consensus and p[1][1][1] <= 1:
if i == 0:
print('\n\tFAILED - there are no compounds with score1 >= 2 -- change the\n'
'\targuments to include "consensus=False" to print results with\n'
'\tscore1 == 1, or lower the threshold.\n')
break
co = self.get_compound(p[1][0])
if cmpd_set == 'approved':
if co.status != 'approved':
if ind_id:
if co in ind.compounds:
pass
else:
continue
else:
continue
if p[1][1][2]:
diff = str(round(p[1][1][0] - p[1][1][3], 3))[0:7].ljust(7)
st = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(i + 1, p[1][1][1], str(round(p[1][1][0], 3))[0:7],
str(round(p[1][1][3], 3))[0:7].ljust(7), diff, co.id_,
(str(co.status == 'approved').lower() + '+').ljust(8),
co.name)
else:
diff = str(round(p[1][1][0] - p[1][1][3], 3))[0:7].ljust(7)
st = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(i + 1, p[1][1][1], str(round(p[1][1][0], 3))[0:7],
str(round(p[1][1][3], 3))[0:7].ljust(7), diff, co.id_,
(str(co.status == 'approved').lower()).ljust(8),
co.name)
print(st)
i += 1
if save:
fo.write(st + '\n')
return
def canpredict_compounds(self, ind_id, n=10, topX=10, consensus=True, keep_associated=False, cmpd_set='all',
save=''):
"""!
This function is used for predicting putative therapeutics for an indication
of interest using a homology-based approach. Input an ind_id id and for each of the
associated compounds, it will generate the similar compounds (based on distance) and add
them to a dictionary with a value of how many times it shows up (enrichment). If a
compound not approved for the indication of interest keeps showing
up, that means it is similar in signature to the drugs that are
ALREADY approved for the indication, so it may be a target for repurposing.
Control how many similar compounds to consider with the argument 'n'. In the output, 'score1'
refers to the number of times the compound shows up in the top 'n' drugs associated with
the indication and 'score2' is the average of the ranks for 'score1' (note: 'score2' <= 'n').
@param ind_id str: Indication id
@param n int: top number of similar Compounds to be used for each Compound associated with the given Indication
@param topX int: top number of predicted Compounds to be printed
@param consensus bool: if True, only compounds with at least 2 votes will be printed
@param keep_associated bool: Print Compounds that are already approved/associated for the Indication
@param cmpd_set str: specify the compound set to use ('all', 'approved', or 'other')
@param save str: name of a file to save results
@return Returns None
"""
if int(topX) == -1:
topX = len(self.compounds)-1
if int(n) == -1:
n = len(self.compounds)-1
i = self.indication_ids.index(ind_id)
ind = self.indications[i]
print("{0} compounds found for {1} --> {2}".format(len(ind.compounds), ind.id_, ind.name))
if self.pathways:
if self.indication_pathways:
self.quantify_pathways(ind)
else:
self.quantify_pathways()
for c in ind.compounds:
if c.similar_computed:
continue
if self.pathways:
self.generate_similar_sigs(c, aux=True, sort=True)
elif self.indication_proteins:
self.generate_similar_sigs(c, sort=True, proteins=ind.proteins)
else:
self.generate_similar_sigs(c, sort=True)
print("Generating compound predictions using top{} most similar compounds...\n".format(n))
c_dct = {}
for c in ind.compounds:
c2_i = 0
c_count = 0
while c_count < n:
c2 = c.similar[c2_i]
if c2[0].status != 'approved' and cmpd_set == 'approved':
c2_i += 1
continue
if c2[1] == 0.0:
c2_i += 1
continue
already_approved = ind in c2[0].indications
k = c2[0].id_
if k not in c_dct:
c_dct[k] = [1, already_approved, c_count]
else:
c_dct[k][0] += 1
c_dct[k][2] += c_count
c2_i += 1
c_count += 1
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0], (-1 * (x[1][2] / x[1][0]))))[::-1]
i = 0
if save:
fo = open(save, 'w')
fo.write('rank\tscore1\tscore2\tprobability\tid\tapproved\tname\n')
else:
print('rank\tscore1\tscore2\tprobability\tid\tapproved\tname')
hg_dct = {}
for p in enumerate(sorted_x):
if i >= topX != -1:
break
co = self.get_compound(p[1][0])
if cmpd_set == 'approved':
if co.status != 'approved':
continue
if not keep_associated and p[1][1][1]:
continue
if consensus and p[1][1][0] <= 1:
if i == 0:
print('\n\tFAILED - there are no compounds with score1 >= 2 -- change the\n'
'\targuments to include "consensus=False" to print results with\n'
'\tscore1 == 1, and/or increase "n". \n')
break
if p[1][1][0] in hg_dct:
prb = hg_dct[p[1][1][0]]
else:
prb_success = 1 / (len(self.compounds) - 1) * n
prb = '%.2e' % Decimal(1.0 - stats.binom.cdf(p[1][1][0], len(ind.compounds), prb_success))
hg_dct[p[1][1][0]] = prb
if p[1][1][1]:
st = "{}\t{}\t{}\t{}\t{}\t{}\t{}".format(i + 1, p[1][1][0], round(p[1][1][2] / p[1][1][0], 1),
prb.ljust(11), co.id_,
(str(co.status == 'approved').lower() + '*').ljust(8), co.name)
else:
st = "{}\t{}\t{}\t{}\t{}\t{}\t{}".format(i + 1, p[1][1][0], round(p[1][1][2] / p[1][1][0], 1),
prb.ljust(11), co.id_,
(str(co.status == 'approved').lower()).ljust(8), co.name)
if save:
fo.write(st + '\n')
else:
print(st)
i += 1
print('\n')
def canpredict_indications(self, cmpd, n=10, topX=10, consensus=True, sorting='prob', save=''):
"""!
This function is the inverse of canpredict_compounds. Input a compound
of interest cando_cmpd (or a novel protein signature of interest new_sig)
and the most similar compounds to it will be computed. The indications
associated with the top n most similar compounds to the query compound will
be examined to see if any are repeatedly enriched.
@param cmpd Compound: Compound object to be used
@param n int: top number of similar Compounds to be used for prediction
@param topX int: top number of predicted Indications to be printed
@param consensus bool: if True, only indications with at least 2 votes will be printed
@param sorting str: whether to sort the indications by probability ('prob') or score ('score')
@param save str: path to file to save the output
@return Returns None
"""
if n == -1:
n = len(self.compounds)-1
if topX == -1:
topX = len(self.indications)
if type(cmpd) is Compound:
cmpd = cmpd
elif type(cmpd) is int:
cmpd = self.get_compound(cmpd)
print("Using CANDO compound {}".format(cmpd.name))
print("Compound has id {} and index {}".format(cmpd.id_, cmpd.index))
print("Comparing signature to all CANDO compound signatures...")
self.generate_similar_sigs(cmpd, sort=True)
print("Generating indication predictions using top{} most similar compounds...".format(n))
i_dct = {}
for c in cmpd.similar[0:n]:
for ind in c[0].indications:
if ind.id_ not in i_dct:
i_dct[ind.id_] = [1, len(ind.compounds)]
else:
i_dct[ind.id_][0] += 1
i2p_dct = {}
for ik in i_dct:
[k, n_app] = i_dct[ik]
if consensus and k == 1:
continue
prb = 1.0 - stats.hypergeom.cdf(k, len(self.compounds) - 1, n_app, n)
i2p_dct[ik] = (k, prb)
if consensus and len(i2p_dct) == 0:
print('\n\tFAILED - there are no compounds with score1 >= 2 -- change the\n'
'\targuments to include "consensus=False" to print results with\n'
'\tscore1 == 1, and/or increase "n".\n')
quit()
if sorting == 'score':
sorted_x = sorted(list(i2p_dct.items()), key=lambda x: x[1][0], reverse=True)
elif sorting == 'prob':
sorted_x = sorted(list(i2p_dct.items()), key=lambda x: x[1][1], reverse=False)
else:
sorted_x = []
print('Please enter proper sorting method: "prob" or "score" -- quitting.')
quit()
if save:
fo = open(save, 'w')
print("Saving the {} highest predicted indications...\n".format(topX))
fo.write("rank\tprobability\tscore\tind_id\tindication\n")
else:
print("Printing the {} highest predicted indications...\n".format(topX))
print("rank\tprobability\tscore\tind_id \tindication")
n_print = topX if len(sorted_x) >= topX else len(sorted_x)
for i in range(n_print):
indd = self.get_indication(sorted_x[i][0])
prb = '%.2e' % Decimal(sorted_x[i][1][1])
if save:
fo.write("{}\t{}\t{}\t{}\t{}\n".format(i+1, prb, sorted_x[i][1][0], indd.id_, indd.name))
else:
print("{}\t{}\t{}\t{}\t{}".format(i+1, prb.ljust(11), sorted_x[i][1][0], indd.id_, indd.name))
if save:
fo.close()
print('')
def canpredict_adr(self, cmpd, n=10, topX=10, consensus=True, sorting='prob', save=''):
"""!
This function is the inverse of canpredict_compounds. Input a compound
of interest cando_cmpd (or a novel protein signature of interest new_sig)
and the most similar compounds to it will be computed. The ADRs
associated with the top n most similar compounds to the query compound will
be examined to see if any are repeatedly enriched.
@param cmpd Compound: Compound object to be used
@param n int: top number of similar Compounds to be used for prediction
@param topX int: top number of predicted Indications to be printed
@param consensus bool: if True, only ADRs with at least 2 votes will be printed
@param sorting str: whether to sort the ADRs by probability ('prob') or score ('score')
@param save str: path to file to save output
@return Returns None
"""
if n == -1:
n = len(self.compounds)-1
if topX == -1:
topX = len(self.adrs)
if type(cmpd) is Compound:
cmpd = cmpd
elif type(cmpd) is int:
cmpd = self.get_compound(cmpd)
print("Using CANDO compound {}".format(cmpd.name))
print("Compound has id {} and index {}".format(cmpd.id_, cmpd.index))
print("Comparing signature to all CANDO compound signatures...")
self.generate_similar_sigs(cmpd, sort=True)
print("Generating ADR predictions using top{} most similar compounds...".format(n))
a_dct = {}
for c in cmpd.similar[0:n]:
for adr in c[0].adrs:
if adr.id_ not in a_dct:
a_dct[adr.id_] = [1, len(adr.compounds)]
else:
a_dct[adr.id_][0] += 1
a2p_dct = {}
for ik in a_dct:
[k, n_app] = a_dct[ik]
if consensus and k == 1:
continue
prb = 1.0 - stats.hypergeom.cdf(k, len(self.compounds) - 1, n_app, n)
a2p_dct[ik] = (k, prb)
if consensus and len(a2p_dct) == 0:
print('\n\tFAILED - there are no compounds with score1 >= 2 -- change the\n'
'\targuments to include "consensus=False" to print results with\n'
'\tscore1 == 1, and/or increase "n".\n')
quit()
if sorting == 'score':
sorted_x = sorted(list(a2p_dct.items()), key=lambda x: x[1][0], reverse=True)
elif sorting == 'prob':
sorted_x = sorted(list(a2p_dct.items()), key=lambda x: x[1][1], reverse=False)
else:
sorted_x = []
print('Please enter proper sorting method: "prob" or "score" -- quitting.')
quit()
if save:
fo = open(save, 'w')
print("Saving the {} highest predicted ADRs...\n".format(topX))
fo.write("rank\tprobability\tscore\tadr_id\tadr\n")
else:
print("Printing the {} highest predicted ADRs...\n".format(topX))
print("rank\tprobability\tscore\tadr_id \tadr")
n_print = topX if len(sorted_x) >= topX else len(sorted_x)
for i in range(n_print):
adrr = self.get_adr(sorted_x[i][0])
prb = '%.2e' % Decimal(sorted_x[i][1][1])
if save:
fo.write("{}\t{}\t{}\t{}\t{}\n".format(i+1, prb, sorted_x[i][1][0], adrr.id_, adrr.name))
else:
print("{}\t{}\t{}\t{}\t{}".format(i+1, prb.ljust(11), sorted_x[i][1][0], adrr.id_, adrr.name))
if save:
fo.close()
print('')
def canpredict_ddi_cmpds(self, cmpd, n=10, topX=10, save=''):
"""!
Input a compound of interest cando_cmpd and the most similar compounds to it will be computed
and outputted as potential drug-drug-interactions.
@param cmpd Compound: Compound object to be used
@param n int: top number of similar Compounds to be used for prediction
@param topX int: top number of predicted Drug-drug Interactions to be printed
@return Returns None
"""
if n == -1:
n = len(self.compounds)-1
if topX == -1:
topX = len(self.compounds)-1
if type(cmpd) is Compound:
cmpd = cmpd
elif type(cmpd) is int:
cmpd = self.get_compound(cmpd)
print("Using CANDO compound {}".format(cmpd.name))
print("Compound has id {} and index {}".format(cmpd.id_, cmpd.index))
print("Comparing signature to all CANDO compound signatures...")
self.generate_similar_sigs(cmpd, sort=True)
print("Generating interaction predictions using top{} most similar compounds...".format(n))
i_dct = {}
for c in cmpd.similar[0:n]:
for itx in c[0].compounds:
if itx.id_ not in i_dct:
i_dct[itx.id_] = 1
else:
i_dct[itx.id_] += 1
sorted_x = sorted(i_dct.items(), key=operator.itemgetter(1), reverse=True)
if save:
fo = open(save, 'w')
print("Saving the {} highest predicted compounds...\n".format(topX))
fo.write("rank\tscore\tcmpd_id\tcompound\n")
else:
print("Printing the {} highest predicted compounds...\n".format(topX))
print("rank\tscore\tcmpd_id \tcompound")
topX = min(topX,len(sorted_x))
for i in range(topX):
itxd = self.get_compound(sorted_x[i][0])
if save:
fo.write("{}\t{}\t{}\t{}\n".format(i+1, sorted_x[i][1], itxd.id_, itxd.name))
else:
print("{}\t{}\t{}\t{}".format(i+1, sorted_x[i][1], itxd.id_, itxd.name))
if save:
fo.close()
print('')
def canpredict_ddi_adrs(self, cmpd_pair, n=10, topX=10, save=''):
"""!
Similarly to canpredict_adrs(), input a compound pair of interest (cmpd_pair)
and the most similar compound pairs to it will be computed. The ADRs associated
with the top n most similar compound pairs to the query pair will be examined
to see if any are repeatedly enriched.
@param cmpd_pair Compound_pair: Compound_pair object to be used
@param n int: top number of similar Compounds to be used for prediction
@param topX int: top number of predicted Indications to be printed
@return Returns None
"""
if n == -1:
n = len(self.compound_pairs)-1
if topX == -1:
topX = len(self.adrs)
if type(cmpd_pair) is Compound_pair:
cmpd_pair = cmpd_pair
elif type(cmpd_pair) is tuple:
cmpd = self.get_compound_pair(cmpd_pair)
if type(cmpd_pair) is tuple:
c1 = self.get_compound(cmpd_pair[0])
c2 = self.get_compound(cmpd_pair[1])
cmpd_pair = Compound_pair((c1.name,c2.name),cmpd_pair,cmpd_pair)
self.compound_pairs.append(cmpd_pair)
self.compound_pair_ids.append(cmpd_pair.id_)
cmpd_pair.sig = [i+j for i,j in zip(c1.sig,c2.sig)]
print("Using CANDO compound pair {}".format(cmpd_pair.name))
print("Compound pair has id {} and index {}".format(cmpd_pair.id_, cmpd_pair.index))
print("Comparing signature to all CANDO compound pair signatures...")
self.generate_similar_sigs_cp(cmpd_pair, sort=True)
print("Generating ADR predictions using top{} most similar compound pairs...".format(n))
a_dct = {}
for c in cmpd_pair.similar[0:n]:
for adr in c[0].adrs:
if adr.id_ not in a_dct:
a_dct[adr.id_] = 1
else:
a_dct[adr.id_] += 1
sorted_x = sorted(a_dct.items(), key=operator.itemgetter(1), reverse=True)
if save:
fo = open(save, 'w')
print("Saving the {} highest predicted indications...\n".format(topX))
fo.write("rank\tscore\tadr_id\tadverse_reaction\n")
else:
print("Printing the {} highest predicted indications...\n".format(topX))
print("rank\tscore\tadr_id \tadverse_reaction")
for i in range(topX):
adr = self.get_adr(sorted_x[i][0])
if save:
fo.write("{}\t{}\t{}\t{}\n".format(i+1, sorted_x[i][1], adr.id_, adr.name))
else:
print("{}\t{}\t{}\t{}".format(i+1, sorted_x[i][1], adr.id_, adr.name))
if save:
fo.close()
print('')
def similar_compounds(self, cmpd, n=10):
"""!
Computes and prints the top n most similar compounds to an input
Compound object cando_cmpd or input novel signature new_sig
@param cmpd Compound: Compound object
@param n int: top number of similar Compounds to be used for prediction
@return Returns None
"""
if type(cmpd) is Compound:
cmpd = cmpd
elif type(cmpd) is int:
cmpd = self.get_compound(cmpd)
print("Using CANDO compound {}".format(cmpd.name))
print("Compound has id {} and index {}".format(cmpd.id_, cmpd.index))
print("Comparing signature to all CANDO compound signatures...")
self.generate_similar_sigs(cmpd, sort=True)
print("Printing top{} most similar compounds...\n".format(n))
print("rank\tdist\tid\tname")
for i in range(n+1):
print("{}\t{:.3f}\t{}\t{}".format(i+1, cmpd.similar[i][1], cmpd.similar[i][0].id_, cmpd.similar[i][0].name))
print('\n')
return
def add_cmpd(self, new_sig, new_name=''):
"""!
Add a new Compound object to the platform
@param new_sig str: Path to the tab-separated interaction scores
@param new_name str: Name for the new Compound
@return Returns None
"""
with open(new_sig, 'r') as nsf:
n_sig = [0.00] * len(self.proteins)
for l in nsf:
[pr, sc] = l.strip().split('\t')
pr_i = self.protein_id_to_index[pr]
n_sig[pr_i] = sc
i = max([cm.id_ for cm in self.compounds]) + 1
if not new_name:
new_name = 'compound_{}'.format(i)
cmpd = Compound(new_name, i, i)
cmpd.sig = n_sig
self.compounds.append(cmpd)
if self.compounds[0].similar_computed or len(self.compounds[0].similar) > 1:
dists = self.generate_similar_sigs(cmpd, sort=True)
for c, dist in dists:
c.similar.append((cmpd, dist))
c.similar = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
print("New compound is " + cmpd.name)
print("New compound has id {} and index {}".format(cmpd.id_, cmpd.index))
def sigs(self, rm):
"""!
Return a list of all signatures, rm is a list of compound ids you do not want in the list
@param rm list: List of compound ids to remove from list of signatures
@return list: List of all signatures
"""
return [x.sig for x in self.proteins if x.id_ not in rm]
def save_dists_to_file(self, f):
"""!
Write calculated distances of all compounds to all compounds to file
@param f File name to save distances
"""
def dists_to_str(cmpd):
o = ''
for s in cmpd.similar:
o += '{}\t'.format(s[1])
o = o + '\n'
return o
with open(f, 'w') as srf:
for c in self.compounds:
srf.write(dists_to_str(c))
def fusion(self, cando_objs, out_file='', method='sum'):
"""!
This function re-ranks the compounds according to the desired comparison specified by
'method' -> currently supports 'min', 'avg', 'mult', and 'sum'
@param cando_objs list: List of CANDO objects
@param out_file str: Path to where the result will be written
@param method str: Method of fusion to be used (e.g., sum, mult, etc.)
@return Returns CANDO object
"""
print("Fusing CANDO objects using " + method)
cnd = CANDO(self.c_map, self.i_map)
if self.rm_cmpds:
cnd.compounds = self.compounds
cnd.indications = self.indications
for c in cnd.compounds:
c.similar = []
c.sig = []
dn = [self.data_name]
for obj in cando_objs:
dn.append(obj.data_name)
cnd.data_name = "-".join(dn) + '-' + method
cid_to_ranks = {}
for c in self.compounds:
cid_to_ranks[c.id_] = {}
sims = c.similar
for i in range(len(sims)):
cid_to_ranks[c.id_][sims[i][0].id_] = [i]
for cando_obj in cando_objs:
for c2 in cando_obj.compounds:
sims2 = c2.similar
for j in range(len(sims2)):
cid_to_ranks[c2.id_][sims2[j][0].id_].append(j)
for c3 in cnd.compounds:
ranks_dct = cid_to_ranks[c3.id_]
for c4 in cnd.compounds:
if c4.id_ == c3.id_:
continue
ranks = ranks_dct[c4.id_]
if method == 'min':
c3.similar.append((c4, float(min(ranks))))
if method == 'sum':
c3.similar.append((c4, float(sum(ranks))))
if method == 'avg':
c3.similar.append((c4, (float(sum(ranks))) / len(ranks)))
if method == 'mult':
m = 1.0
for r in ranks:
m *= r
c3.similar.append((c4, m))
if out_file:
with open(out_file, 'w') as fo:
for co in cnd.compounds:
s = list(map(str, [x[1] for x in co.similar]))
fo.write('\t'.join(s) + '\n')
for cf in cnd.compounds:
sorted_scores = sorted(cf.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
cf.similar = sorted_scores
cf.similar_computed = True
cf.similar_sorted = True
return cnd
def normalize(self):
"""!
Normalize the distance scores to between [0,1]. Simply divides all scores by the largest distance
between any two compounds.
@return Returns None
"""
if len(self.compounds[0].similar) == 0:
print('Similar scores not computed yet -- quitting')
return
mx = 0
for c in self.compounds:
for s in c.similar:
if s[1] > mx:
mx = s[1]
print('Max value is {}'.format(mx))
def norm(x):
v = x[1] / mx
return x[0], v
for c in self.compounds:
c.similar = list(map(norm, c.similar))
return
def cando_methods(self):
return []
def compounds_drugs_methods(self):
return ['canpredict_compounds']
def indications_methods(self):
return ['canpredict_indications', 'canbenchmark', 'canbenchmark_associated', 'canbenchmark_bottom', 'canbenchmark_cluster',
'canbenchmark_compounds', 'canbenchmark_ddi', 'canbenchmark_ndcg']
def adr_methods(self):
return ['canpredict_adr']
def inspect_method(self, method_name):
return inspect.getargspec(getattr(CANDO, method_name)).args[1:]
def __str__(self):
"""!
Print stats about the CANDO object
"""
nc = len(self.compounds)
b = self.compounds[0].similar_computed
ni = len(self.indications)
np = len(self.proteins)
if np:
return 'CANDO: {0} compounds, {1} proteins, {2} indications\n' \
'\tMatrix - {3}\nIndication mapping - {4}\n' \
'\tDistances computed - {5}'.format(nc, np, ni, self.matrix, self.i_map, b)
elif self.read_dists:
return 'CANDO: {0} compounds, {1} indications\n' \
'\tCompound comparison file - {2}\n' \
'\tIndication mapping - {3}'.format(nc, ni, self.read_dists, self.i_map)
else:
return 'CANDO: {0} compounds, {1} indications\n' \
'\tIndication mapping - {2}'.format(nc, ni, self.i_map)
class Matrix(object):
"""!
An object to represent a matrix
Intended for easier handling of matrices.
Convert between fpt and tsv, as well as distance to similarity (and vice versa)
"""
def __init__(self, matrix_file, dist=False, convert_to_tsv=False):
## @var matrix_file
# str: Path to file with interaction scores
self.matrix_file = matrix_file
## @var dist
# bool: if the matrix_file is an dist file
self.dist = dist
## @var convert_to_tsv
# bool: Convert old matrix format (.fpt) to .tsv
self.convert_to_tsv = convert_to_tsv
## @var proteins
# list: Proteins in the Matrix
self.proteins = []
## @var values
# list: Values in the Matrix
self.values = []
def pro_name(l):
name = l[0]
curr = l[1]
index = 1
while curr != ' ':
name += curr
index += 1
curr = l[index]
return name
if not dist:
with open(matrix_file, 'r') as f:
lines = f.readlines()
if convert_to_tsv:
if matrix_file[-4:] == '.fpt':
out_file = '.'.join(matrix_file.split('.')[:-1]) + '.tsv'
else:
out_file = matrix_file + '.tsv'
of = open(out_file, 'w')
for l_i in range(len(lines)):
name = pro_name(lines[l_i])
scores = []
i = 24
while i < len(lines[l_i]):
score = lines[l_i][i:i + 5]
i += 8
scores.append(score)
self.proteins.append(name)
self.values.append(list(map(float, scores)))
of.write("{0}\t{1}\n".format(name, '\t'.join(scores)))
of.close()
else:
for l_i in range(len(lines)):
vec = lines[l_i].strip().split('\t')
if len(vec) < 2:
print('The matrix file {} is in the old fpt format -- please '
'convert to tsv with the following line of code:'.format(self.matrix_file))
print('-> Matrix("{}", convert_to_tsv=True) <-'.format(self.matrix_file))
quit()
name = vec[0]
scores = vec[1:]
self.proteins.append(name)
self.values.append(list(map(float, scores)))
else:
with open(matrix_file, 'r') as rrs:
lines = rrs.readlines()
for i in range(len(lines)):
scores = list(map(float, lines[i].strip().split('\t')))
self.values.append(scores)
def convert(self, out_file):
"""!
Convert similarity matrix to distance matrix or vice versa. The
first value in the matrix will determine the type of conversion
(0.0 means distance to similarity, 1.0 means similarity to distance).
@param out_file str: File path to which write the converted matrix.
@return Returns None
"""
if self.values[0][0] == 0.0:
metric = 'd'
elif self.values[0][0] == 1.0:
metric = 's'
else:
metric = None
print('The first value is not 0.0 or 1.0; '
'please ensure the matrix is generated properly')
quit()
def to_dist(s):
return 1 - s
def to_sim(d):
return 1 / (1 + d)
of = open(out_file, 'w')
if metric == 'd':
for vs in self.values:
vs = list(map(to_sim, vs))
of.write("{}\n".format('\t'.join(list(map(str, vs)))))
else:
if metric == 's':
for vs in self.values:
vs = list(map(to_dist, vs))
of.write("{}\n".format('\t'.join(list(map(str, vs)))))
of.close()
def normalize(self, outfile, dimension='drugs', method='avg'):
"""!
Normalize the interaction scores across drugs (default) or proteins (not implemented yet).
@param outfile str: File path to which is written the converted matrix.
@param dimension str: which vector to normalize - either 'drugs' to normalize all
scores within the proteomic vector or 'proteins' to normalize for a protein against
all drug scores.
@param method str: normalize by the average or max within the vectors
@return Returns None
"""
# dimensions include drugs or features (e.g. "proteins")
# methods are average ('avg') or max ('max')
dvs = {} # drug vectors
cc = 0
if dimension == 'drugs':
for vec in self.values:
for vi in range(len(vec)):
if cc == 0:
dvs[vi] = []
dvs[vi].append(vec[vi])
cc += 1
new_dvecs = []
for i in range(len(dvs)):
vec = dvs[i]
if method == 'avg':
norm_val = np.average(vec)
elif method == 'max':
norm_val = max(vec)
else:
print('Please enter a proper normalization method: "max" or "avg"')
quit()
def norm(x):
if norm_val == 0:
return 0.0
else:
return x/norm_val
new_dvecs.append(list(map(norm, vec)))
pvs = {}
for dvi in range(len(new_dvecs)):
for p in range(len(self.proteins)):
try:
pvs[p].append(new_dvecs[dvi][p])
except KeyError:
pvs[p] = [new_dvecs[dvi][p]]
with open(outfile, 'w') as fo:
for p in range(len(self.proteins)):
fo.write('{}\t{}\n'.format(self.proteins[p], '\t'.join(list(map(str, pvs[p])))))
def single_interaction(c_id, p_id, v="v2.2", fp="rd_ecfp4", vect="int",
dist="dice", org="nrpdb", bs="coach",
c_cutoff=0.0, p_cutoff=0.0, percentile_cutoff=0.0,
i_score="P", nr_ligs=True, approved_only=False, lig_name=False,
lib_path='',prot_path=''):
def print_time(s):
if s >= 60:
m = s / 60.0
s -= m * 60.0
if m >= 60.0:
h = m / 60.0
m -= h * 60.0
print("Interaciton calculation took {:.0f} hr {:.0f} min {:.0f} s to finish.".format(h, m, s))
else:
print("Interaciton calculation took {:.0f} min {:.0f} s to finish.".format(m, s))
else:
print("Interaciton calculation took {:.0f} s to finish.".format(s))
print("Calculating BANDOCK interaction...")
start = time.time()
c_id = int(c_id)
pre = os.path.dirname(__file__) + "/data/v2.2+/"
lig_path = "{}/ligs/fps".format(pre)
if not lib_path:
cmpd_path = "{}/cmpds/fps-{}".format(pre,v)
map_path = "{}/mappings".format(pre)
else:
cmpd_path = "{0}/{1}/cmpds/fps-{1}".format(lib_path,v)
map_path = "{0}/{1}/mappings".format(lib_path,v)
# Remove redundant ligands from full list
# Especially important for percentile calculations
if nr_ligs:
if not os.path.exists("{}/mappings/nr_ligs.csv".format(pre)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'
dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))
nr_ligs = pd.read_csv("{}/mappings/nr_ligs.csv".format(pre),header=None)
nr_ligs = nr_ligs[0].values.flatten()
# Download protein matrix if it does not exist
if not prot_path:
if not os.path.exists("{}/prots/{}-{}.tsv".format(pre,org,bs)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)
dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))
p_matrix = pd.read_csv("{}/prots/{}-{}.tsv".format(pre,org,bs),sep='\t',header=None,index_col=0)
else:
p_matrix = pd.read_csv("{}/{}-{}.tsv".format(prot_path,org,bs),sep='\t',header=None,index_col=0)
# Create dictionary of lists
# Keys == proteins
# Values == list of predicted bs + bs scores
p_dict = {}
for p in p_matrix.itertuples():
p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))
try:
p_dict = {p_id: p_dict[p_id]}
except:
print("{} does not exist in protein library".format(p_id))
sys.exit()
if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:
print("{} is not an applicable interaction score.".format(i_score))
return
if not os.path.exists("{}/{}-{}_vect.pickle".format(cmpd_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(v,fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect))
if not os.path.exists("{}/{}-{}_vect.pickle".format(lig_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))
# Load compound and ligand fingerprint pickles
with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'rb') as f:
c_fps = pickle.load(f)
with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:
l_fps = pickle.load(f)
try:
check = c_fps[c_id]
except:
print("{} does not exist in compound library".format(c_id))
sys.exit()
print("Interaction between {} and {}.".format(c_id,p_id))
score = calc_scores(c_id,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name)
print("Interaction score between {} and {} = {}".format(c_id,p_id,score[1][0]))
end = time.time()
print_time(end-start)
return(score[1][0])
def generate_matrix(v="v2.2", fp="rd_ecfp4", vect="int", dist="dice", org="nrpdb", bs="coach", c_cutoff=0.0,
p_cutoff=0.0, percentile_cutoff=0.0, i_score="P", out_file='', out_path=".", nr_ligs=True,
approved_only=False, lig_name=False, lib_path='', prot_path='', ncpus=1):
"""!
Generate a matrix using our in-house protocol BANDOCK.
@param v str: version to use (supports v2.2 - v2.5)
@param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)
@param vect str: integer "int" or binary "bit" vector for fingerprint
@param dist str: use Sorenson-Dice "dice" for vect="int" and Tanimoto "tani" for vect="bit"
@param org str: protein library to use ('nrpdb' or 'homo_sapien')
@param bs str: the method to use, just use "coach"
@param c_cutoff float: minimum Cscore (Tanimoto/Dice similarity score) to consider for scoring
@param p_cutoff float: minimum Pscore (binding site score from COACH) to consider for scoring
@param percentile_cutoff float: %ile cutoff for fingerprint similarity scores in 'dC' scoring protocols
@param i_score str: the scoring protocol to use ('P', 'C', 'dC', 'CxP', dCxP')
@param out_file str: filename of the output matrix
@param out_path str: path to the output matrix
@param nr_ligs bool: use only the non-redundant set of ligands for 'dC' scoring protocols (recommended)
@param approved_only bool: use only approved drugs to create the matrix
@param lig_name bool: output the ligand chosen for the compound-protein interaction score instead of the score
@param lib_path str: specify a local compound fingerprint set for custom analyses
@param prot_path str: specify a local protein library for custom analyses
@param ncpus int: number of cores to run on
@return Returns None
"""
def print_time(s):
if s >= 60:
m = s / 60.0
s -= m * 60.0
if m >= 60.0:
h = m / 60.0
m -= h * 60.0
print("Matrix generation took {:.0f} hr {:.0f} min {:.0f} s to finish.".format(h, m, s))
else:
print("Matrix generation took {:.0f} min {:.0f} s to finish.".format(m, s))
else:
print("Matrix generation took {:.0f} s to finish.".format(s))
print("Generating CANDO matrix...")
start = time.time()
pre = os.path.dirname(__file__) + "/data/v2.2+/"
lig_path = "{}/ligs/fps".format(pre)
if not lib_path:
cmpd_path = "{}/cmpds/fps-{}".format(pre,v)
map_path = "{}/mappings".format(pre)
else:
cmpd_path = "{0}/{1}/cmpds/fps-{1}".format(lib_path,v)
map_path = "{0}/{1}/mappings".format(lib_path,v)
if out_file == '':
if percentile_cutoff != 0.0:
if approved_only:
out_file = "{}-{}-{}-{}-{}-percentile{}-p{}-{}-approved.tsv".format(fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)
else:
out_file = "{}-{}-{}-{}-{}-percentile{}-p{}-{}.tsv".format(fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)
else:
if approved_only:
out_file = "{}-{}-{}-{}-{}-c{}-p{}-{}-approved.tsv".format(fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)
else:
out_file = "{}-{}-{}-{}-{}-c{}-p{}-{}.tsv".format(fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)
if not out_path and not lib_path:
out_path = '{}/matrices/{}'.format(pre,v)
elif not out_path and lib_path:
out_path = '{}/{}/matrices'.format(lib_path,v)
os.makedirs(out_path, exist_ok=True)
# Remove redundant ligands from full list
# Especially important for percentile calculations
if nr_ligs:
if not os.path.exists("{}/mappings/nr_ligs.csv".format(pre)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'
dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))
nr_ligs = pd.read_csv("{}/mappings/nr_ligs.csv".format(pre),header=None)
nr_ligs = nr_ligs[0].values.flatten()
# Download protein matrix if it does not exist
if not prot_path:
if not os.path.exists("{}/prots/{}-{}.tsv".format(pre,org,bs)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)
dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))
p_matrix = pd.read_csv("{}/prots/{}-{}.tsv".format(pre,org,bs),sep='\t',header=None,index_col=0)
else:
p_matrix = pd.read_csv("{}/{}-{}.tsv".format(prot_path,org,bs),sep='\t',header=None,index_col=0)
# Create dictionary of lists
# Keys == proteins
# Values == list of predicted bs + bs scores
p_dict = {}
for p in p_matrix.itertuples():
p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))
if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:
print("{} is not an applicable interaction score.".format(i_score))
return
if not os.path.exists("{}/{}-{}_vect.pickle".format(cmpd_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(v,fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect))
if not os.path.exists("{}/{}-{}_vect.pickle".format(lig_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))
# Load compound and ligand fingerprint pickles
with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'rb') as f:
c_fps = pickle.load(f)
with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:
l_fps = pickle.load(f)
if approved_only:
if not os.path.exists("{}/drugbank-{}-approved.tsv".format(map_path,v)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}-approved.tsv'.format(v)
dl_file(url, '{}/drugbank-{}-approved.tsv'.format(map_path,v))
approved_df = pd.read_csv('{}/drugbank-{}-approved.tsv'.format(map_path,v),sep='\t',index_col=0)
c_list = approved_df.index
else:
c_list = list(c_fps.keys())
if ncpus > 1:
pool = mp.Pool(ncpus)
scores = pool.starmap_async(calc_scores, [(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name) for c in c_list]).get()
pool.close
pool.join
else:
scores = [calc_scores(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name) for c in c_list]
scores = {d[0]:d[1] for d in scores}
mat = pd.DataFrame.from_dict(scores)
mat.sort_index(axis=1,inplace=True)
mat.rename(index=dict(zip(range(len(p_matrix.index)), p_matrix.index)), inplace=True)
mat.to_csv("{}/{}".format(out_path,out_file), sep='\t', index=True, header=False, float_format='%.3f')
end = time.time()
print("Matrix written to {}/{}.".format(out_path,out_file))
print_time(end-start)
def calc_scores(c,c_fps,l_fps,p_dict,dist,pscore_cutoff=0.0,cscore_cutoff=0.0,percentile_cutoff=0.0,i_score='P',nr_ligs=[],lig_name=False):
if i_score in ['dC','dCxP'] or percentile_cutoff != 0.0:
if dist == 'dice':
all_scores = DataStructs.BulkDiceSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())
elif dist == 'tani':
all_scores = DataStructs.BulkTanimotoSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())
elif dist == 'cos':
all_scores = DataStructs.BulkCosineSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())
if percentile_cutoff != 0.0:
cscore_cutoff = np.percentile(all_scores,percentile_cutoff)
scores = []
for p in p_dict.keys():
li = [i[0:2] for i in p_dict[p] if i[0] in l_fps.index and float(i[1]) >= pscore_cutoff]
if li:
li_bs, li_score = zip(*li)
li_bs = list(li_bs)
li_score = list(li_score)
else:
li_bs = li_score = []
x = l_fps.loc[li_bs,0].values.tolist()
y = l_fps.loc[li_bs].index.tolist()
# Pscore
if i_score in ['P','CxP','dCxP','avgP','medP']:
try:
if dist == 'dice':
temp_scores = list(zip(y,DataStructs.BulkDiceSimilarity(c_fps[c],x)))
elif dist == 'tani':
temp_scores = list(zip(y,DataStructs.BulkTanimotoSimilarity(c_fps[c],x)))
elif dist == 'cos':
temp_scores = list(zip(y,DataStructs.BulkCosineSimilarity(c_fps[c],x)))
#Cscore cutoff
temp_scores = [i for i in temp_scores if float(i[1]) >= cscore_cutoff]
if i_score == 'dCxP':
temp_c = max(temp_scores, key = lambda i:i[1])
if not lig_name:
c_score = stats.percentileofscore(all_scores,temp_c[1])/100.0
p_score = li_score[li_bs.index(temp_c[0])]
scores.append(float(c_score) * float(p_score))
else:
scores.append(temp_c[0])
elif i_score == 'CxP':
temp_c = max(temp_scores, key = lambda i:i[1])
if not lig_name:
c_score = temp_c[1]
p_score = li_score[li_bs.index(temp_c[0])]
scores.append(float(c_score) * float(p_score))
continue
else:
scores.append(temp_c[0])
elif i_score == 'P':
temp_c = max(temp_scores, key = lambda i:i[1])
if not lig_name:
p_score = li_score[li_bs.index(temp_c[0])]
scores.append(float(p_score))
else:
scores.append(temp_c[0])
elif i_score == 'avgP':
# Will produce a warning when li_score is empty
# temp_p will then == nan, so we check for that
# append 0.00 if True.
temp_p = np.mean(li_score)
if not np.isnan(temp_p):
scores.append(temp_p)
else:
scores.append(0.000)
elif i_score == 'medP':
temp_p = np.median(li_score)
if not np.isnan(temp_p):
scores.append(temp_p)
else:
scores.append(0.000)
except:
if not lig_name:
scores.append(0.000)
else:
scores.append("None")
# Cscore
elif i_score in ['dC','C','avgC','medC']:
try:
if dist == 'dice':
temp_scores = DataStructs.BulkDiceSimilarity(c_fps[c],x)
elif dist == 'tani':
temp_scores = DataStructs.BulkTanimotoSimilarity(c_fps[c],x)
elif dist == 'cos':
temp_scores = DataStructs.BulkCosineSimilarity(c_fps[c],x)
#Cscore cutoff
temp_scores = [i for i in temp_scores if float(i) >= cscore_cutoff]
if i_score == 'dC':
temp_c = max(temp_scores)
if not lig_name:
scores.append(stats.percentileofscore(all_scores, temp_c) / 100.0)
else:
scores.append(li_bs[li_score.index(temp_c)])
elif i_score == 'C':
temp_c = max(temp_scores)
if not lig_name:
scores.append(temp_c)
else:
scores.append(li_bs[li_score.index(temp_c)])
elif i_score == 'avgC':
temp_c = np.mean(temp_scores)
if not np.isnan(temp_c):
scores.append(temp_c)
else:
scores.append(0.000)
elif i_score == 'medC':
temp_c = np.median(temp_scores)
if not np.isnan(temp_c):
scores.append(temp_c)
else:
scores.append(0.000)
except:
if not lig_name:
scores.append(0.000)
else:
scores.append("None")
return (c, scores)
def generate_signature(cmpd_file, fp="rd_ecfp4", vect="int", dist="dice", org="nrpdb", bs="coach", c_cutoff=0.0,
p_cutoff=0.0, percentile_cutoff=0.0, i_score="P", out_file='', out_path=".", nr_ligs=True,
prot_path=''):
"""!
Generate an interaction signature for a query compound using our in-house protocol BANDOCK. Note: the parameters
for this function MUST MATCH the parameters used to generate the matrix in use. Otherwise, the scores will be
incompatible.
@param cmpd_file str: filepath to an input mol file
@param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)
@param vect str: integer "int" or binary "bit" vector for fingerprint
@param dist str: use Sorenson-Dice "dice" for vect="int" and Tanimoto "tani" for vect="bit"
@param org str: protein library to use ('nrpdb' or 'homo_sapien')
@param bs str: the method to use, just use "coach"
@param c_cutoff float: minimum Cscore (Tanimoto/Dice similarity score) to consider for scoring
@param p_cutoff float: minimum Pscore (binding site score from COACH) to consider for scoring
@param percentile_cutoff float: %ile cutoff for fingerprint similarity scores in 'dC' scoring protocols
@param i_score str: the scoring protocol to use ('P', 'C', 'dC', 'CxP', dCxP')
@param out_file str: filename of the output signature
@param out_path str: path to the output signature
@param nr_ligs bool: use only the non-redundant set of ligands for 'dC' scoring protocols (recommended)
@param prot_path str: specify a local protein library for custom analyses
@return Returns None
"""
def print_time(s):
if s >= 60:
m = s / 60.0
s -= m * 60.0
if m >= 60.0:
h = m / 60.0
m -= h * 60.0
print("Signature generation took {:.0f} hr {:.0f} min {:.0f} s to finish.".format(h, m, s))
else:
print("Signature generation took {:.0f} min {:.0f} s to finish.".format(m, s))
else:
print("signature generation took {:.0f} s to finish.".format(s))
print("Generating CANDO signature...")
start = time.time()
pre = os.path.dirname(__file__) + "/data/v2.2+/"
lig_path = "{}/ligs/fps/".format(pre)
if out_file == '':
if percentile_cutoff != 0.0:
out_file = "{}/cmpd_0-{}-{}-{}-{}-{}-percentile{}-p{}-{}.tsv".format(out_path,fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)
else:
out_file = "{}/cmpd_0-{}-{}-{}-{}-{}-c{}-p{}-{}.tsv".format(out_path,fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)
os.makedirs(out_path, exist_ok=True)
# Remove redundant ligands from full list
# Especially important for percentile calculations
if nr_ligs:
if not os.path.exists("{}/mappings/nr_ligs.csv".format(pre)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'
dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))
nr_ligs = pd.read_csv("{}/mappings/nr_ligs.csv".format(pre),header=None)
nr_ligs = nr_ligs[0].values.flatten()
# Download protein matrix if it does not exist
if not prot_path:
if not os.path.exists("{}/prots/{}-{}.tsv".format(pre,org,bs)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)
dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))
p_matrix = pd.read_csv("{}/prots/{}-{}.tsv".format(pre,org,bs),sep='\t',header=None,index_col=0)
else:
p_matrix = pd.read_csv("{}/{}-{}.tsv".format(prot_path,org,bs),sep='\t',header=None,index_col=0)
# Create dictionary of lists
# Keys == proteins
# Values == list of predicted bs + bs scores
p_dict = {}
for p in p_matrix.itertuples():
p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))
if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:
print("{} is not an applicable interaction score.".format(i_score))
return
nc = Chem.MolFromMolFile(cmpd_file)
nc = Chem.RemoveHs(nc)
name = nc.GetProp("_Name")
c_fps = {}
rad = int(int(fp[7:])/2)
if fp[3]=='f':
features = True
else:
features = False
if vect=='int':
c_fps[0] = AllChem.GetMorganFingerprint(nc,rad,useFeatures=features)
else:
bits = int(vect[:4])
c_fps[0] = AllChem.GetMorganFingerprintAsBitVect(nc,rad,useFeatures=features,nBits=bits)
if not os.path.exists("{}/{}-{}_vect.pickle".format(lig_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))
# Load ligand fingerprint pickles
with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:
l_fps = pickle.load(f)
scores = calc_scores(0,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs)
#scores = pool.starmap_async(calc_scores, [(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs) for c in c_list]).get()
scores = {scores[0]:scores[1]}
mat = pd.DataFrame.from_dict(scores)
mat.sort_index(axis=1,inplace=True)
mat.rename(index=dict(zip(range(len(p_matrix.index)), p_matrix.index)), inplace=True)
mat.to_csv("{}/{}".format(out_path,out_file), sep='\t', index=True, header=False, float_format='%.3f')
end = time.time()
print("Signature written to {}/{}.".format(out_path,out_file))
print_time(end-start)
return(mat.iloc[:,0].values)
def add_cmpds(cmpd_list, file_type='smi', fp="rd_ecfp4", vect="int", cmpd_dir=".", v=None, map_indications='v2.3'):
"""!
Add new compounds to an existing CANDO Compound library, or create a new Compound library using our in-house protocol
BANDOCK.
@param cmpd_list str: filepath to all input compounds
@param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)
@param vect str: integer "int" or binary "bit" vector for fingerprint
@param cmpd_dir str: ??
@param v str: ??
@param map_indications str: CANDO version number to string match exact names from compound file to existing ind_map
@return Returns None
"""
start = time.time()
pre = os.path.dirname(__file__) + "/data/v2.2+/"
# List of new compounds loaded into df
ncs = pd.read_csv(cmpd_list, sep='\t', header=None)
vs = ['v2.2', 'v2.3', 'v2.4', 'v2.5', 'test.0']
if v in vs:
# Redundant with future lines.
# Remove future lines and implement them into get_data()
#get_data(v=v, org=None)
curr_v = v
print("Adding new compounds to compound library {}...".format(curr_v))
t = curr_v.split('.')
t[-1] = str(int(t[-1])+1)
new_v = '.'.join(t)
print("New compound library is {}.".format(new_v))
curr_cmpd_path = "{}/cmpds/fps-{}/".format(pre, curr_v)
if not os.path.exists("{}/cmpds/fps-{}/{}-{}_vect.pickle".format(pre, curr_v, fp, vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(curr_v,
fp, vect)
dl_file(url, '{}/cmpds/fps-{}/{}-{}_vect.pickle'.format(pre, curr_v, fp, vect))
cmpd_path = "{}/cmpds/fps-{}/".format(pre, new_v)
os.makedirs(cmpd_path, exist_ok=True)
os.system("cp {0}/{2}-{3}_vect.pickle {1}/{2}-{3}_vect.pickle".format(curr_cmpd_path, cmpd_path, fp, vect))
if not os.path.exists("{}/mappings/drugbank-{}.tsv".format(pre, curr_v)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}.tsv'.format(curr_v)
dl_file(url, '{}/mappings/drugbank-{}.tsv'.format(pre, curr_v))
d_map = pd.read_csv("{}/mappings/drugbank-{}.tsv".format(pre, curr_v), sep='\t')
if not os.path.exists("{}/mappings/drugbank2ctd-{}.tsv".format(pre, curr_v)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank2ctd-{}.tsv'.format(curr_v)
dl_file(url, '{}/mappings/drugbank2ctd-{}.tsv'.format(pre, curr_v))
os.system("cp {0}/mappings/drugbank2ctd-{1}.tsv {0}/mappings/drugbank2ctd-{2}.tsv".format(pre, curr_v, new_v))
if not os.path.exists("{}/cmpds/fps-{}/inchi_keys.pickle".format(pre, curr_v)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/inchi_keys.pickle'.format(curr_v)
dl_file(url, '{}/cmpds/fps-{}/inchi_keys.pickle'.format(pre, curr_v))
with open('{}/inchi_keys.pickle'.format(curr_cmpd_path), 'rb') as f:
inchi_dict = pickle.load(f)
cmpd_num = len(inchi_dict)
for c in ncs.itertuples(index=False):
try:
if file_type == 'mol':
nc = Chem.MolFromMolFile("{}/{}.mol".format(cmpd_dir, c[0]))
name = nc.GetProp("_Name")
elif file_type == 'smi':
nc = Chem.MolFromSmiles("{}".format(c[0]))
name = c[1]
nc.SetProp("_Name", name)
nc = Chem.RemoveHs(nc)
except:
print("{} cannot load this molecule.".format(c[0]))
continue
inchi_key = Chem.MolToInchiKey(nc)
try:
match = str(inchi_dict[inchi_key])
except:
match = None
if match:
print(" {} is the same as {} - {} in the library".format(name, int(match),
d_map.loc[(d_map['CANDO_ID'] == int(match)),
'GENERIC_NAME'].values[0], match))
continue
else:
print(" Adding compound {} - {}".format(cmpd_num,name))
with open('{}/inchi_keys.pickle'.format(cmpd_path), 'wb') as f:
inchi_dict[inchi_key] = cmpd_num
pickle.dump(inchi_dict, f)
d_map = d_map.append(pd.DataFrame([[cmpd_num, 'NA', name, 'other']],
columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']),
ignore_index=True)
rad = int(int(fp[7:])/2)
if fp[3] == 'f':
features = True
else:
features = False
if vect == 'int':
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:
c_fps = pickle.load(f)
c_fps[cmpd_num] = AllChem.GetMorganFingerprint(nc, rad, useFeatures=features)
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
else:
bits = int(vect[:4])
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:
c_fps = pickle.load(f)
c_fps[cmpd_num] = AllChem.GetMorganFingerprintAsBitVect(nc, rad, useFeatures=features, nBits=bits)
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
cmpd_num += 1
elif v and v not in vs:
new_v = v
print("Creating new compound library {}...".format(new_v))
print("The library will be built at {}/{}.".format(os.getcwd(), new_v))
os.makedirs(new_v, exist_ok=True)
os.makedirs("{}/cmpds".format(new_v), exist_ok=True)
os.makedirs("{}/mappings".format(new_v), exist_ok=True)
cmpd_path = "{0}/cmpds/fps-{0}/".format(new_v)
os.makedirs(cmpd_path, exist_ok=True)
d_map = pd.DataFrame(columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS'])
cid2name = {}
cname2inds = {}
if map_indications:
if not os.path.exists("{}/mappings/drugbank-{}.tsv".format(pre, map_indications)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/' \
'drugbank-{}.tsv'.format(map_indications)
dl_file(url, '{}/mappings/drugbank-{}.tsv'.format(pre, map_indications))
if not os.path.exists("{}/mappings/drugbank2ctd-{}.tsv".format(pre, map_indications)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/' \
'drugbank2ctd-{}.tsv'.format(map_indications)
dl_file(url, '{}/mappings/drugbank2ctd-{}.tsv'.format(pre, map_indications))
fcm = open('{}/mappings/drugbank-{}.tsv'.format(pre, map_indications), 'r')
cmls = fcm.readlines()
fcm.close()
for cml in cmls[1:]:
cls = cml.split('\t')
cid = cls[0]
cname = cls[2]
cid2name[cid] = cname
fim = open('{}/mappings/drugbank2ctd-{}.tsv'.format(pre, map_indications), 'r')
imls = fim.readlines()
fim.close()
for iml in imls[1:]:
ils = iml.split('\t')
cid = ils[0]
indname = ils[1]
indid = ils[2]
cname = cid2name[cid]
if cname in cname2inds:
if (indname, indid) not in cname2inds[cname]:
cname2inds[cname].append((indname, indid))
else:
cname2inds[cname] = [(indname, indid)]
cmpd_num = 0
# Create new fingerprint dict and save it to pickle for future use
c_fps = {}
if vect == 'int':
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
else:
bits = int(vect[:4])
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
# Create new inchi dict
inchi_dict = {}
if map_indications:
foind = open("{0}/mappings/inds-{0}.tsv".format(new_v), 'w')
foind.write('CANDO_ID\tINDICATION_NAME\tMESH_ID\tINDICATION_ID\n')
ind2id = {}
curr_ind_id = 0
for c in ncs.itertuples(index=False):
try:
if file_type == 'mol':
nc = Chem.MolFromMolFile("{}/{}.mol".format(cmpd_dir, c[0]))
name = nc.GetProp("_Name")
elif file_type == 'smi':
nc = Chem.MolFromSmiles("{}".format(c[0]))
name = c[1]
nc.SetProp("_Name", name)
except:
print("{} cannot load this molecule.".format(c[0]))
continue
inchi_key = Chem.MolToInchiKey(nc)
try:
match = str(inchi_dict[inchi_key])
except:
match = None
if match:
print(" {} is the same as {} - {} in the library".format(name, int(match),
d_map.loc[(d_map['CANDO_ID'] == int(match)),
'GENERIC_NAME'].values[0], match))
continue
else:
print(" Adding compound {} - {}".format(cmpd_num, name))
with open('{}/inchi_keys.pickle'.format(cmpd_path), 'wb') as f:
inchi_dict[inchi_key] = cmpd_num
pickle.dump(inchi_dict, f)
d_map = d_map.append(pd.DataFrame([[cmpd_num, 'NA', name, 'other']],
columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']),
ignore_index=True)
if map_indications:
if name in cname2inds:
inds = cname2inds[name]
for ind in inds:
if ind in ind2id:
indid = ind2id[ind]
else:
indid = curr_ind_id
ind2id[ind] = curr_ind_id
curr_ind_id += 1
foind.write('{}\t{}\t{}\t{}\n'.format(cmpd_num, ind[0], ind[1], indid))
rad = int(int(fp[7:])/2)
if fp[3] == 'f':
features = True
else:
features = False
if vect == 'int':
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:
c_fps = pickle.load(f)
c_fps[cmpd_num] = AllChem.GetMorganFingerprint(nc, rad, useFeatures=features)
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
else:
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:
c_fps = pickle.load(f)
c_fps[cmpd_num] = AllChem.GetMorganFingerprintAsBitVect(nc, rad, useFeatures=features, nBits=bits)
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
cmpd_num += 1
elif not v:
new_v = "v0.0"
print("Creating new compound library {}...".format(new_v))
cmpd_path = "{0}/cmpds/fps-{0}/".format(new_v)
os.makedirs(cmpd_path, exist_ok=True)
d_map = pd.DataFrame(columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS'])
cmpd_num = 0
# Create new fingerprint dict and save it to pickle for future use
c_fps = {}
if vect=='int':
with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'wb') as f:
pickle.dump(c_fps, f)
else:
bits = int(vect[:4])
with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'wb') as f:
pickle.dump(c_fps, f)
# Create new inchi dict
inchi_dict = {}
for c in ncs.itertuples(index=False):
try:
nc = Chem.MolFromMolFile("{}/{}.mol".format(cmpd_dir, c[0]))
nc = Chem.RemoveHs(nc)
except:
print("{} cannot load this molecule.".format(c[0]))
continue
name = nc.GetProp("_Name")
inchi_key = Chem.MolToInchiKey(nc)
try:
match = str(inchi_dict[inchi_key])
except:
match = None
if match:
print(" {} is the same as {} - {} in the library".format(name, int(match),
d_map.loc[(d_map['CANDO_ID'] == int(match)),
'GENERIC_NAME'].values[0], match))
continue
else:
print(" Adding compound {} - {}".format(cmpd_num, name))
with open('{}/inchi_keys.pickle'.format(cmpd_path), 'wb') as f:
inchi_dict[inchi_key] = cmpd_num
pickle.dump(inchi_dict, f)
d_map = d_map.append(pd.DataFrame([[cmpd_num, 'NA', name, 'other']],
columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']),
ignore_index=True)
rad = int(int(fp[7:])/2)
if fp[3] == 'f':
features = True
else:
features = False
if vect == 'int':
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:
c_fps = pickle.load(f)
c_fps[cmpd_num] = AllChem.GetMorganFingerprint(nc, rad, useFeatures=features)
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
else:
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:
c_fps = pickle.load(f)
c_fps[cmpd_num] = AllChem.GetMorganFingerprintAsBitVect(nc, rad, useFeatures=features, nBits=bits)
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
cmpd_num += 1
os.makedirs("{}/mappings".format(new_v), exist_ok=True)
d_map.to_csv("{0}/mappings/cmpds-{0}.tsv".format(new_v), sep='\t', index=False, na_rep='NA')
print("Added compounds to compound library {}.\n".format(new_v))
# Need to add functionality to handle loading a new version created by user.
def cosine_dist(A):
similarity = np.dot(A, A.T)
# squared magnitude of preference vectors (number of occurrences)
square_mag = np.diag(similarity)
# inverse squared magnitude
inv_square_mag = 1 / square_mag
# if it doesn't occur, set it's inverse magnitude to zero (instead of inf)
inv_square_mag[np.isinf(inv_square_mag)] = 0
# inverse of the magnitude
inv_mag = np.sqrt(inv_square_mag)
# cosine similarity (elementwise multiply by inverse magnitudes)
cosine = similarity * inv_mag
cos_sim = cosine.T * inv_mag
cos_dist = [1-i for i in cos_sim]
return np.asarray(cos_dist)
def tanimoto_sparse(str1, str2):
"""!
Calculate the tanimoto coefficient for a pair of sparse vectors
@param str1 str: String of 1s and 0s representing the first compound fingerprint
@param str2 str: String of 1s and 0s representing the second compound fingerprint
@return Returns float
"""
n_c = 0.0
n_a = 0.0
n_b = 0.0
for i in range(len(str1)):
if str1[i] == '1' and str2[i] == '1':
n_c += 1
if str1[i] == '1':
n_a += 1
if str2[i] == '1':
n_b += 1
if n_c + n_a + n_b == 0:
return 0.000
return float(n_c/(n_a+n_b-n_c))
def tanimoto_dense(list1, list2):
"""!
Calculate the tanimoto coefficient for a pair of dense vectors
@param list1 list: List of positions that have a 1 in first compound fingerprint
@param list2 list: List of positions that have a 1 in second compound fingerprint
@return Returns float
"""
c = [common_item for common_item in list1 if common_item in list2]
return float(len(c))/(len(list1) + len(list2) - len(c))
def get_fp_lig(fp):
"""!
Download precompiled binding site ligand fingerprints using the given fingerprint method.
@param fp str: Fingerprinting method used to compile each binding site ligand fingerprint
@return Returns None
"""
pre = os.path.dirname(__file__)
out_file = '{}/v2.2+/ligs/{}.pickle'.format(pre, fp)
if not os.path.exists(out_file):
print('Downloading ligand fingerprints for {}...'.format(fp))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/{}.pickle'.format(fp)
dl_file(url, out_file)
print("{} ligand fingerprints downloaded.".format(fp))
else:
print("{} ligand fingerprints have already been downloaded.".format(fp))
print("This file can be found at {}".format(out_file))
def get_data(v="v2.2", org='nrpdb', fp='rd_ecfp4', vect='int'):
"""!
Download CANDO v2.2+ data.
@param v str: version to use (supports v2.2 - v2.5)
@param org str: protein library to use ('nrpdb' or 'homo_sapien')
@param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)
@param vect str: integer "int" or binary "bit" vector for fingerprint
@returns Returns None
"""
# Check v and org before moving on
vs = ['v2.2','v2.3','v2.4','v2.5','v2.6','v2.7','v2.8','test.0']
orgs = ['all','nrpdb','homo_sapien','cryptococcus','test','tutorial']
if v not in vs:
print("{} is not a correct version.".format(v))
sys.exit()
if org not in orgs:
print("{} is not a correct organism.".format(org))
sys.exit()
print('Downloading data for {}...'.format(v))
pre = os.path.dirname(__file__) + "/data/v2.2+"
# Dirs
os.makedirs(pre, exist_ok=True)
os.makedirs('{}/mappings'.format(pre), exist_ok=True)
#os.makedirs('{}/matrices'.format(pre), exist_ok=True)
os.makedirs('{}/prots'.format(pre), exist_ok=True)
os.makedirs('{}/cmpds'.format(pre), exist_ok=True)
# Mappings
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}.tsv'.format(v)
dl_file(url, '{}/mappings/drugbank-{}.tsv'.format(pre,v))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}-approved.tsv'.format(v)
dl_file(url, '{}/mappings/drugbank-{}-approved.tsv'.format(pre, v))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank2ctd-{}.tsv'.format(v)
dl_file(url, '{}/mappings/drugbank2ctd-{}.tsv'.format(pre,v))
# Compounds
if not os.path.exists("{}/cmpds/fps-{}/{}-{}_vect.pickle".format(pre,v,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(v,fp,vect)
dl_file(url, '{}/cmpds/fps-{}/{}-{}_vect.pickle'.format(pre,v,fp,vect))
if not os.path.exists("{}/ligs/fps/{}-{}_vect.pickle".format(pre,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)
dl_file(url, '{}/ligs/fps/{}-{}_vect.pickle'.format(pre,fp,vect))
# Matrices
'''
if matrix == 'all':
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/rd_ecfp4/drugbank-approved_x_nrpdb.tsv'
dl_file(url, 'v2.0/matrices/rd_ecfp4/drugbank-approved_x_nrpdb.tsv')
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/ob_fp4/drugbank-approved_x_nrpdb.tsv'
dl_file(url, 'v2.0/matrices/ob_fp4/drugbank-approved_x_nrpdb.tsv')
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/rd_ecfp4/drugbank-human.tsv'
dl_file(url, 'v2.0/matrices/rd_ecfp4/drugbank-human.tsv')
elif matrix == 'nrpdb':
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/rd_ecfp4/drugbank-approved_x_nrpdb.tsv'
dl_file(url, 'v2.0/matrices/rd_ecfp4/drugbank-approved_x_nrpdb.tsv')
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/ob_fp4/drugbank-approved_x_nrpdb.tsv'
dl_file(url, 'v2.0/matrices/ob_fp4/drugbank-approved_x_nrpdb.tsv')
elif matrix == 'human':
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/rd_ecfp4/drugbank-human.tsv'
dl_file(url, 'v2.0/matrices/rd_ecfp4/drugbank-human.tsv')
'''
print('Downloading data for {}...'.format(org))
# Proteins
if org=='all':
for o in orgs[1:]:
if o=='test' or o=='tutorial':
continue
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-coach.tsv'.format(o)
dl_file(url, '{}/prots/{}-coach.tsv'.format(pre,o))
else:
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-coach.tsv'.format(org)
dl_file(url, '{}/prots/{}-coach.tsv'.format(pre,org))
'''
if not os.path.exists('v2.0/cmpds/scores/drugbank-approved-rd_ecfp4.tsv'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/cmpds/scores/drugbank-approved-rd_ecfp4.tsv.gz'
dl_file(url, 'v2.0/cmpds/scores/drugbank-approved-rd_ecfp4.tsv.gz')
os.chdir("v2.0/cmpds/scores")
os.system("gunzip -f drugbank-approved-rd_ecfp4.tsv.gz")
os.chdir("../../..")
'''
print('All data for {} and {} downloaded.'.format(v,org))
def clear_cache():
"""!
Clear files in "data/" directory.
@returns Returns None
"""
pre = os.path.dirname(__file__) + "/data/"
os.system("rm -r {}".format(pre))
print("{} directory has been removed.".format(pre))
def get_tutorial():
"""!
Download data for tutorial.
@returns Returns None
"""
print('Downloading data for tutorial...')
pre = os.path.dirname(__file__) + "/data/v2.2+"
if not os.path.exists('tutorial'):
os.mkdir('tutorial')
# Example matrix (rd_ecfp4 w/ 64 prots x 2,449 drugs)
if not os.path.exists('./tutorial/tutorial_matrix-all.tsv'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/tutorial_matrix-all.tsv'
dl_file(url, './tutorial/tutorial_matrix-all.tsv')
if not os.path.exists('./tutorial/tutorial_matrix-approved.tsv'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/tutorial_matrix-approved.tsv'
dl_file(url, './tutorial/tutorial_matrix-approved.tsv')
# Mappings
if not os.path.exists('./tutorial/cmpds-v2.2.tsv'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/cmpds-v2.2.tsv'
dl_file(url, './tutorial/cmpds-v2.2.tsv')
#if not os.path.exists('./tutorial/cmpds-v2.2-approved.tsv'):
# url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/cmpds-v2.2-approved.tsv'
# dl_file(url, './tutorial/cmpds-v2.2-approved.tsv')
if not os.path.exists('./tutorial/cmpds2inds-v2.2.tsv'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/cmpds2inds-v2.2.tsv'
dl_file(url, './tutorial/cmpds2inds-v2.2.tsv')
# Protein scores
if not os.path.exists('{}/prots/tutorial-coach.tsv'.format(pre)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/tutorial-coach.tsv'
dl_file(url, '{}/prots/tutorial-coach.tsv'.format(pre))
# New compound set
if not os.path.exists('./tutorial/tki_set-test.smi'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/tki_set-test.smi'
dl_file(url, './tutorial/tki_set-test.smi')
# New compound
if not os.path.exists('./tutorial/lm235.mol'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/lmk235.mol'
dl_file(url, './tutorial/lmk235.mol')
# Protein subset
if not os.path.exists('./tutorial/tutorial-bac-prots.txt'):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/tutorial-bac-prots.txt'
dl_file(url, './tutorial/tutorial-bac-prots.txt')
print('All data for tutorial downloaded.\n')
def get_test():
"""!
Download data for test script.
@returns Returns None
"""
print('Downloading data for test...')
pre = os.path.dirname(__file__) + "/data/v2.2+/test"
os.makedirs(pre,exist_ok=True)
#url = 'http://protinfo.compbio.buffalo.edu/cando/data/test/test-cmpd_scores.tsv'
#dl_file(url, '{}/test-cmpd_scores.tsv'.format(pre))
#url = 'http://protinfo.compbio.buffalo.edu/cando/data/test/test-prot_scores.tsv'
#dl_file(url, '{}/test-prot_scores.tsv'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-cmpds.tsv'
dl_file(url, '{}/test-cmpds.tsv'.format(pre))
with open('{}/test-cmpds.tsv'.format(pre), 'r') as f:
l = []
f.readline()
for i in f:
i = i.split('\t')[0]
i = "{}.mol".format(i)
l.append(i)
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-cmpds_mol'
out = '{}/test-cmpds_mol'.format(pre)
dl_dir(url, out, l)
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-inds.tsv'
dl_file(url, '{}/test-inds.tsv'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-cmpds_mol/8100.mol'
dl_file(url, '{}/test-cmpds_mol/8100.mol'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test_set.smi'
dl_file(url, '{}/tki_set-test.smi'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-uniprot_set.tsv'
dl_file(url, '{}/test-uniprot_set.tsv'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/vina64x.fpt'
dl_file(url, '{}/vina64x.fpt'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/toy64x.fpt'
dl_file(url, '{}/toy64x.fpt'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-pathway-prot.tsv'
dl_file(url, '{}/test-pathway-prot.tsv'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-pathway-mesh.tsv'
dl_file(url, '{}/test-pathway-mesh.tsv'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-new_cmpds.tsv'
dl_file(url, '{}/test-new_cmpds.tsv'.format(pre))
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-uniprot_set.tsv'
print('All test data downloaded.\n')
def dl_dir(url, out, l):
"""!
Function to recursively download a directory.
Prints the name of the directory and a progress bar.
@param url str: URL of the dir to be downloaded
@param out str: Path to where the dir will be downloaded
@param l list: List of files in dir to be downloaded
@returns Returns None
"""
if not os.path.exists(out):
os.makedirs(out)
else:
for n in l:
if not os.path.exists("{}/{}".format(out, n)):
break
return
format_custom_text = progressbar.FormatCustomText(
'%(f)s',
dict(
f='',
),
)
widgets = [
format_custom_text,
' [', progressbar.DataSize(format='%(scaled)i files',), '] ',
progressbar.Bar(left='[', right=']'),
' [', progressbar.ETA(), '] ',
]
num_bars = len(l)
bar = progressbar.ProgressBar(max_value=num_bars, widgets=widgets).start()
i = 0
for n in l:
format_custom_text.update_mapping(f=out)
url2 = "{}/{}".format(url, n)
r = requests.get(url2)
out_file = "{}/{}".format(out, n)
with open(out_file, 'wb') as f:
f.write(r.content)
bar.update(i)
i += 1
bar.finish()
def dl_file(url, out_file):
"""!
Function to download a file.
Prints the name of the file and a progress bar.
@param url str: URL of the file to be downloaded
@param out_file str: File path to where the file will be downloaded
@returns Returns None
"""
if os.path.exists(out_file):
print("{} exists.".format(out_file))
return
elif not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
time.sleep(1)
r = requests.get(url, stream=True)
format_custom_text = progressbar.FormatCustomText(
'%(f)s',
dict(
f='',
),
)
widgets = [
format_custom_text,
' [', progressbar.DataSize(prefixes=('K', 'M', 'G')), '] ',
progressbar.Bar(left='[', right=']'),
' [', progressbar.ETA(), '] ',
]
with open(out_file, 'wb') as f:
total_length = int(r.headers.get('content-length'))
if total_length >= 1024:
chunk_size = 1024
num_bars = total_length / chunk_size
else:
chunk_size = 1
num_bars = total_length / chunk_size
bar = progressbar.ProgressBar(max_value=num_bars, widgets=widgets).start()
i = 0
for chunk in r.iter_content(chunk_size=chunk_size):
format_custom_text.update_mapping(f=out_file)
f.write(chunk)
f.flush()
os.fsync(f.fileno())
bar.update(i)
i += 1
bar.finish()
def load_version(v='v2.3', protlib='nrpdb', i_score='CxP', approved_only=False, compute_distance=False,
dist_metric='cosine', protein_set='', ncpus=1):
"""!
Directly load a pre-compiled version of CANDO.
@param v str: version to use (supports v2.2 - v2.5)
@param protlib str: protein library to use ('nrpdb' or 'homo_sapien')
@param i_score str: the scoring protocol to use ('P', 'C', 'dC', 'CxP', dCxP')
@param approved_only bool: use only approved drugs to create the matrix
@param compute_distance bool: compute distance between compounds for specified matrix
@param dist_metric str: the distance metric to use if compute_distance=True ('cosine', 'rmsd', etc)
@param protein_set str: path to a file containing a subset of proteins of interest
@param ncpus int: number of cores to run on
@return Returns CANDO object
"""
# download data for version
get_data(v=v, org=protlib)
# separate matrix file download (for now)
app = 'approved' if approved_only else 'all'
mat_name = 'rd_ecfp4-{}-{}-{}-int_vect-dice-{}.tsv'.format(protlib, v, app, i_score)
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/matrices/{}'.format(mat_name)
dl_file(url, './data/v2.2+/matrices/{}'.format(mat_name))
# create CANDO object
if approved_only:
cmpd_map_path = 'data/v2.2+/mappings/drugbank-{}-approved.tsv'.format(v)
matrix_path = 'data/v2.2+/matrices/rd_ecfp4-{}-{}-approved-int_vect-dice-{}.tsv'.format(protlib, v, i_score)
else:
cmpd_map_path = 'data/v2.2+/mappings/drugbank-{}.tsv'.format(v)
matrix_path = 'data/v2.2+/matrices/rd_ecfp4-{}-{}-all-int_vect-dice-{}.tsv'.format(protlib, v, i_score)
ind_map_path = 'data/v2.2+/mappings/drugbank2ctd-{}.tsv'.format(v)
cando = CANDO(cmpd_map_path, ind_map_path, matrix=matrix_path, compound_set=app,
compute_distance=compute_distance, dist_metric=dist_metric, protein_set=protein_set, ncpus=ncpus)
return cando | [
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"progressbar.DataSize",
"math.log2",
"rdkit.DataStructs.BulkCosineSimilarity",
"time.sleep",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"sklearn.metrics.roc_curve",
"sys.exit",... | [((178783, 178794), 'time.time', 'time.time', ([], {}), '()\n', (178792, 178794), False, 'import time\n'), ((181917, 181928), 'time.time', 'time.time', ([], {}), '()\n', (181926, 181928), False, 'import time\n'), ((184372, 184383), 'time.time', 'time.time', ([], {}), '()\n', (184381, 184383), False, 'import time\n'), ((185588, 185624), 'os.makedirs', 'os.makedirs', (['out_path'], {'exist_ok': '(True)'}), '(out_path, exist_ok=True)\n', (185599, 185624), False, 'import os, sys, pickle\n'), ((188749, 188779), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['scores'], {}), '(scores)\n', (188771, 188779), True, 'import pandas as pd\n'), ((189036, 189047), 'time.time', 'time.time', ([], {}), '()\n', (189045, 189047), False, 'import time\n'), ((196969, 196980), 'time.time', 'time.time', ([], {}), '()\n', (196978, 196980), False, 'import time\n'), ((197429, 197465), 'os.makedirs', 'os.makedirs', (['out_path'], {'exist_ok': '(True)'}), '(out_path, exist_ok=True)\n', (197440, 197465), False, 'import os, sys, pickle\n'), ((198880, 198910), 'rdkit.Chem.MolFromMolFile', 'Chem.MolFromMolFile', (['cmpd_file'], {}), '(cmpd_file)\n', (198899, 198910), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((198920, 198937), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['nc'], {}), '(nc)\n', (198933, 198937), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((200032, 200062), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['scores'], {}), '(scores)\n', (200054, 200062), True, 'import pandas as pd\n'), ((200319, 200330), 'time.time', 'time.time', ([], {}), '()\n', (200328, 200330), False, 'import time\n'), ((201134, 201145), 'time.time', 'time.time', ([], {}), '()\n', (201143, 201145), False, 'import time\n'), ((201252, 201297), 'pandas.read_csv', 'pd.read_csv', (['cmpd_list'], {'sep': '"""\t"""', 'header': 'None'}), "(cmpd_list, sep='\\t', header=None)\n", (201263, 201297), True, 'import pandas as pd\n'), ((215957, 215971), 'numpy.dot', 'np.dot', (['A', 'A.T'], {}), '(A, A.T)\n', (215963, 215971), True, 'import numpy as np\n'), ((216059, 216078), 'numpy.diag', 'np.diag', (['similarity'], {}), '(similarity)\n', (216066, 216078), True, 'import numpy as np\n'), ((216320, 216343), 'numpy.sqrt', 'np.sqrt', (['inv_square_mag'], {}), '(inv_square_mag)\n', (216327, 216343), True, 'import numpy as np\n'), ((216529, 216549), 'numpy.asarray', 'np.asarray', (['cos_dist'], {}), '(cos_dist)\n', (216539, 216549), True, 'import numpy as np\n'), ((217903, 217928), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (217918, 217928), False, 'import os, sys, pickle\n'), ((219350, 219381), 'os.makedirs', 'os.makedirs', (['pre'], {'exist_ok': '(True)'}), '(pre, exist_ok=True)\n', (219361, 219381), False, 'import os, sys, pickle\n'), ((225768, 225799), 'os.makedirs', 'os.makedirs', (['pre'], {'exist_ok': '(True)'}), '(pre, exist_ok=True)\n', (225779, 225799), False, 'import os, sys, pickle\n'), ((229532, 229556), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (229546, 229556), False, 'import os, sys, pickle\n'), ((229725, 229738), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (229735, 229738), False, 'import time\n'), ((229747, 229777), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (229759, 229777), False, 'import requests\n'), ((47608, 47669), 'difflib.get_close_matches', 'difflib.get_close_matches', (['name', 'cando_drugs'], {'n': 'n', 'cutoff': '(0.5)'}), '(name, cando_drugs, n=n, cutoff=0.5)\n', (47633, 47669), False, 'import difflib\n'), ((51569, 51629), 'difflib.get_close_matches', 'difflib.get_close_matches', (['name', 'cando_inds'], {'n': 'n', 'cutoff': '(0.3)'}), '(name, cando_inds, n=n, cutoff=0.3)\n', (51594, 51629), False, 'import difflib\n'), ((54373, 54419), 'pandas.read_csv', 'pd.read_csv', (['cmpds_file'], {'sep': '"""\t"""', 'header': 'None'}), "(cmpds_file, sep='\\t', header=None)\n", (54384, 54419), True, 'import pandas as pd\n'), ((61516, 61533), 'numpy.array', 'np.array', (['[c_sig]'], {}), '([c_sig])\n', (61524, 61533), True, 'import numpy as np\n'), ((62179, 62199), 'numpy.array', 'np.array', (['other_sigs'], {}), '(other_sigs)\n', (62187, 62199), True, 'import numpy as np\n'), ((64419, 64437), 'numpy.array', 'np.array', (['[cp_sig]'], {}), '([cp_sig])\n', (64427, 64437), True, 'import numpy as np\n'), ((65104, 65124), 'numpy.array', 'np.array', (['other_sigs'], {}), '(other_sigs)\n', (65112, 65124), True, 'import numpy as np\n'), ((67677, 67691), 'numpy.asarray', 'np.asarray', (['ca'], {}), '(ca)\n', (67687, 67691), True, 'import numpy as np\n'), ((67705, 67719), 'numpy.asarray', 'np.asarray', (['oa'], {}), '(oa)\n', (67715, 67719), True, 'import numpy as np\n'), ((136946, 136958), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (136956, 136958), True, 'import matplotlib.pyplot as plt\n'), ((137332, 137379), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], lw=lw, linestyle='--')\n", (137340, 137379), True, 'import matplotlib.pyplot as plt\n'), ((137388, 137408), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (137396, 137408), True, 'import matplotlib.pyplot as plt\n'), ((137417, 137438), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (137425, 137438), True, 'import matplotlib.pyplot as plt\n'), ((137447, 137480), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (137457, 137480), True, 'import matplotlib.pyplot as plt\n'), ((137489, 137521), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (137499, 137521), True, 'import matplotlib.pyplot as plt\n'), ((137530, 137577), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'prop': "{'size': 8}"}), "(loc='lower right', prop={'size': 8})\n", (137540, 137577), True, 'import matplotlib.pyplot as plt\n'), ((137642, 137652), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (137650, 137652), True, 'import matplotlib.pyplot as plt\n'), ((178828, 178853), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (178843, 178853), False, 'import os, sys, pickle\n'), ((181385, 181399), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (181396, 181399), False, 'import os, sys, pickle\n'), ((181491, 181505), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (181502, 181505), False, 'import os, sys, pickle\n'), ((184395, 184420), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (184410, 184420), False, 'import os, sys, pickle\n'), ((187700, 187714), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (187711, 187714), False, 'import os, sys, pickle\n'), ((187806, 187820), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (187817, 187820), False, 'import os, sys, pickle\n'), ((188337, 188351), 'multiprocessing.Pool', 'mp.Pool', (['ncpus'], {}), '(ncpus)\n', (188344, 188351), True, 'import multiprocessing as mp\n'), ((189798, 189842), 'numpy.percentile', 'np.percentile', (['all_scores', 'percentile_cutoff'], {}), '(all_scores, percentile_cutoff)\n', (189811, 189842), True, 'import numpy as np\n'), ((196992, 197017), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (197007, 197017), False, 'import os, sys, pickle\n'), ((199132, 199191), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'AllChem.GetMorganFingerprint', (['nc', 'rad'], {'useFeatures': 'features'}), '(nc, rad, useFeatures=features)\n', (199160, 199191), False, 'from rdkit.Chem import AllChem, rdmolops\n'), ((199248, 199333), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['nc', 'rad'], {'useFeatures': 'features', 'nBits': 'bits'}), '(nc, rad, useFeatures=features, nBits=bits\n )\n', (199285, 199333), False, 'from rdkit.Chem import AllChem, rdmolops\n'), ((199715, 199729), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (199726, 199729), False, 'import os, sys, pickle\n'), ((201156, 201181), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (201171, 201181), False, 'import os, sys, pickle\n'), ((202325, 202362), 'os.makedirs', 'os.makedirs', (['cmpd_path'], {'exist_ok': '(True)'}), '(cmpd_path, exist_ok=True)\n', (202336, 202362), False, 'import os, sys, pickle\n'), ((216245, 216269), 'numpy.isinf', 'np.isinf', (['inv_square_mag'], {}), '(inv_square_mag)\n', (216253, 216269), True, 'import numpy as np\n'), ((217997, 218021), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (218011, 218021), False, 'import os, sys, pickle\n'), ((219120, 219130), 'sys.exit', 'sys.exit', ([], {}), '()\n', (219128, 219130), False, 'import os, sys, pickle\n'), ((219222, 219232), 'sys.exit', 'sys.exit', ([], {}), '()\n', (219230, 219232), False, 'import os, sys, pickle\n'), ((219293, 219318), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (219308, 219318), False, 'import os, sys, pickle\n'), ((223018, 223043), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (223033, 223043), False, 'import os, sys, pickle\n'), ((223302, 223327), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (223317, 223327), False, 'import os, sys, pickle\n'), ((223355, 223381), 'os.path.exists', 'os.path.exists', (['"""tutorial"""'], {}), "('tutorial')\n", (223369, 223381), False, 'import os, sys, pickle\n'), ((223391, 223411), 'os.mkdir', 'os.mkdir', (['"""tutorial"""'], {}), "('tutorial')\n", (223399, 223411), False, 'import os, sys, pickle\n'), ((223481, 223533), 'os.path.exists', 'os.path.exists', (['"""./tutorial/tutorial_matrix-all.tsv"""'], {}), "('./tutorial/tutorial_matrix-all.tsv')\n", (223495, 223533), False, 'import os, sys, pickle\n'), ((223707, 223764), 'os.path.exists', 'os.path.exists', (['"""./tutorial/tutorial_matrix-approved.tsv"""'], {}), "('./tutorial/tutorial_matrix-approved.tsv')\n", (223721, 223764), False, 'import os, sys, pickle\n'), ((223963, 224006), 'os.path.exists', 'os.path.exists', (['"""./tutorial/cmpds-v2.2.tsv"""'], {}), "('./tutorial/cmpds-v2.2.tsv')\n", (223977, 224006), False, 'import os, sys, pickle\n'), ((224391, 224439), 'os.path.exists', 'os.path.exists', (['"""./tutorial/cmpds2inds-v2.2.tsv"""'], {}), "('./tutorial/cmpds2inds-v2.2.tsv')\n", (224405, 224439), False, 'import os, sys, pickle\n'), ((224877, 224922), 'os.path.exists', 'os.path.exists', (['"""./tutorial/tki_set-test.smi"""'], {}), "('./tutorial/tki_set-test.smi')\n", (224891, 224922), False, 'import os, sys, pickle\n'), ((225101, 225139), 'os.path.exists', 'os.path.exists', (['"""./tutorial/lm235.mol"""'], {}), "('./tutorial/lm235.mol')\n", (225115, 225139), False, 'import os, sys, pickle\n'), ((225308, 225359), 'os.path.exists', 'os.path.exists', (['"""./tutorial/tutorial-bac-prots.txt"""'], {}), "('./tutorial/tutorial-bac-prots.txt')\n", (225322, 225359), False, 'import os, sys, pickle\n'), ((225717, 225742), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (225732, 225742), False, 'import os, sys, pickle\n'), ((228332, 228351), 'os.path.exists', 'os.path.exists', (['out'], {}), '(out)\n', (228346, 228351), False, 'import os, sys, pickle\n'), ((228361, 228377), 'os.makedirs', 'os.makedirs', (['out'], {}), '(out)\n', (228372, 228377), False, 'import os, sys, pickle\n'), ((228683, 228730), 'progressbar.DataSize', 'progressbar.DataSize', ([], {'format': '"""%(scaled)i files"""'}), "(format='%(scaled)i files')\n", (228703, 228730), False, 'import progressbar\n'), ((228747, 228783), 'progressbar.Bar', 'progressbar.Bar', ([], {'left': '"""["""', 'right': '"""]"""'}), "(left='[', right=']')\n", (228762, 228783), False, 'import progressbar\n'), ((228799, 228816), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (228814, 228816), False, 'import progressbar\n'), ((229056, 229074), 'requests.get', 'requests.get', (['url2'], {}), '(url2)\n', (229068, 229074), False, 'import requests\n'), ((229957, 230003), 'progressbar.DataSize', 'progressbar.DataSize', ([], {'prefixes': "('K', 'M', 'G')"}), "(prefixes=('K', 'M', 'G'))\n", (229977, 230003), False, 'import progressbar\n'), ((230019, 230055), 'progressbar.Bar', 'progressbar.Bar', ([], {'left': '"""["""', 'right': '"""]"""'}), "(left='[', right=']')\n", (230034, 230055), False, 'import progressbar\n'), ((230071, 230088), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (230086, 230088), False, 'import progressbar\n'), ((22538, 22574), 'pandas.read_csv', 'pd.read_csv', (['ddi_compounds'], {'sep': '"""\t"""'}), "(ddi_compounds, sep='\\t')\n", (22549, 22574), True, 'import pandas as pd\n'), ((23106, 23137), 'pandas.read_csv', 'pd.read_csv', (['ddi_adrs'], {'sep': '"""\t"""'}), "(ddi_adrs, sep='\\t')\n", (23117, 23137), True, 'import pandas as pd\n'), ((46888, 46935), 'pandas.read_csv', 'pd.read_csv', (['protein_map'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(protein_map, sep='\\t', index_col=0)\n", (46899, 46935), True, 'import pandas as pd\n'), ((59313, 59338), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (59328, 59338), False, 'import os, sys, pickle\n'), ((74695, 74737), 'os.path.exists', 'os.path.exists', (['"""./results_analysed_named"""'], {}), "('./results_analysed_named')\n", (74709, 74737), False, 'import os, sys, pickle\n'), ((74842, 74883), 'os.system', 'os.system', (['"""mkdir results_analysed_named"""'], {}), "('mkdir results_analysed_named')\n", (74851, 74883), False, 'import os, sys, pickle\n'), ((74899, 74930), 'os.path.exists', 'os.path.exists', (['"""./raw_results"""'], {}), "('./raw_results')\n", (74913, 74930), False, 'import os, sys, pickle\n'), ((75024, 75054), 'os.system', 'os.system', (['"""mkdir raw_results"""'], {}), "('mkdir raw_results')\n", (75033, 75054), False, 'import os, sys, pickle\n'), ((91721, 91732), 'numpy.sum', 'np.sum', (['dcg'], {}), '(dcg)\n', (91727, 91732), True, 'import numpy as np\n'), ((93559, 93602), 'os.path.exists', 'os.path.exists', (['"""./results_analysed_named/"""'], {}), "('./results_analysed_named/')\n", (93573, 93602), False, 'import os, sys, pickle\n'), ((93616, 93657), 'os.system', 'os.system', (['"""mkdir results_analysed_named"""'], {}), "('mkdir results_analysed_named')\n", (93625, 93657), False, 'import os, sys, pickle\n'), ((94320, 94352), 'os.path.exists', 'os.path.exists', (['"""./raw_results/"""'], {}), "('./raw_results/')\n", (94334, 94352), False, 'import os, sys, pickle\n'), ((94366, 94396), 'os.system', 'os.system', (['"""mkdir raw_results"""'], {}), "('mkdir raw_results')\n", (94375, 94396), False, 'import os, sys, pickle\n'), ((96594, 96658), 'sklearn.model_selection.train_test_split', 'train_test_split', (['sigs', 'inds', 'ids'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(sigs, inds, ids, test_size=0.2, random_state=1)\n', (96610, 96658), False, 'from sklearn.model_selection import train_test_split\n'), ((101640, 101682), 'os.path.exists', 'os.path.exists', (['"""./results_analysed_named"""'], {}), "('./results_analysed_named')\n", (101654, 101682), False, 'import os, sys, pickle\n'), ((101787, 101828), 'os.system', 'os.system', (['"""mkdir results_analysed_named"""'], {}), "('mkdir results_analysed_named')\n", (101796, 101828), False, 'import os, sys, pickle\n'), ((101844, 101875), 'os.path.exists', 'os.path.exists', (['"""./raw_results"""'], {}), "('./raw_results')\n", (101858, 101875), False, 'import os, sys, pickle\n'), ((101969, 101999), 'os.system', 'os.system', (['"""mkdir raw_results"""'], {}), "('mkdir raw_results')\n", (101978, 101999), False, 'import os, sys, pickle\n'), ((112210, 112252), 'os.path.exists', 'os.path.exists', (['"""./results_analysed_named"""'], {}), "('./results_analysed_named')\n", (112224, 112252), False, 'import os, sys, pickle\n'), ((112357, 112398), 'os.system', 'os.system', (['"""mkdir results_analysed_named"""'], {}), "('mkdir results_analysed_named')\n", (112366, 112398), False, 'import os, sys, pickle\n'), ((112414, 112445), 'os.path.exists', 'os.path.exists', (['"""./raw_results"""'], {}), "('./raw_results')\n", (112428, 112445), False, 'import os, sys, pickle\n'), ((112539, 112569), 'os.system', 'os.system', (['"""mkdir raw_results"""'], {}), "('mkdir raw_results')\n", (112548, 112569), False, 'import os, sys, pickle\n'), ((136673, 136711), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['truth', 'scores'], {}), '(truth, scores)\n', (136696, 136711), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\n'), ((136741, 136765), 'sklearn.metrics.roc_curve', 'roc_curve', (['truth', 'scores'], {}), '(truth, scores)\n', (136750, 136765), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\n'), ((136785, 136813), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['truth', 'scores'], {}), '(truth, scores)\n', (136798, 136813), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\n'), ((137607, 137633), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {'dpi': '(300)'}), '(save, dpi=300)\n', (137618, 137633), True, 'import matplotlib.pyplot as plt\n'), ((180533, 180543), 'sys.exit', 'sys.exit', ([], {}), '()\n', (180541, 180543), False, 'import os, sys, pickle\n'), ((181632, 181642), 'sys.exit', 'sys.exit', ([], {}), '()\n', (181640, 181642), False, 'import os, sys, pickle\n'), ((203663, 203677), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (203674, 203677), False, 'import os, sys, pickle\n'), ((204302, 204324), 'rdkit.Chem.MolToInchiKey', 'Chem.MolToInchiKey', (['nc'], {}), '(nc)\n', (204320, 204324), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((206560, 206593), 'os.makedirs', 'os.makedirs', (['new_v'], {'exist_ok': '(True)'}), '(new_v, exist_ok=True)\n', (206571, 206593), False, 'import os, sys, pickle\n'), ((206782, 206819), 'os.makedirs', 'os.makedirs', (['cmpd_path'], {'exist_ok': '(True)'}), '(cmpd_path, exist_ok=True)\n', (206793, 206819), False, 'import os, sys, pickle\n'), ((206836, 206921), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']"}), "(columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']\n )\n", (206848, 206921), True, 'import pandas as pd\n'), ((228862, 228922), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'num_bars', 'widgets': 'widgets'}), '(max_value=num_bars, widgets=widgets)\n', (228885, 228922), False, 'import progressbar\n'), ((42864, 42884), 'numpy.array', 'np.array', (['signatures'], {}), '(signatures)\n', (42872, 42884), True, 'import numpy as np\n'), ((62530, 62593), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['ca', 'oa', 'self.dist_metric'], {'n_jobs': 'self.ncpus'}), '(ca, oa, self.dist_metric, n_jobs=self.ncpus)\n', (62548, 62593), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\n'), ((65455, 65518), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['ca', 'oa', 'self.dist_metric'], {'n_jobs': 'self.ncpus'}), '(ca, oa, self.dist_metric, n_jobs=self.ncpus)\n', (65473, 65518), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\n'), ((68050, 68113), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['ca', 'oa', 'self.dist_metric'], {'n_jobs': 'self.ncpus'}), '(ca, oa, self.dist_metric, n_jobs=self.ncpus)\n', (68068, 68113), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\n'), ((76538, 76597), 'os.path.exists', 'os.path.exists', (['"""v2.0/mappings/group_disease-top_level.tsv"""'], {}), "('v2.0/mappings/group_disease-top_level.tsv')\n", (76552, 76597), False, 'import os, sys, pickle\n'), ((99156, 99175), 'numpy.mean', 'np.mean', (['c_clusters'], {}), '(c_clusters)\n', (99163, 99175), True, 'import numpy as np\n'), ((99226, 99247), 'numpy.median', 'np.median', (['c_clusters'], {}), '(c_clusters)\n', (99235, 99247), True, 'import numpy as np\n'), ((99306, 99324), 'numpy.min', 'np.min', (['c_clusters'], {}), '(c_clusters)\n', (99312, 99324), True, 'import numpy as np\n'), ((99326, 99344), 'numpy.max', 'np.max', (['c_clusters'], {}), '(c_clusters)\n', (99332, 99344), True, 'import numpy as np\n'), ((125523, 125555), 'os.path.exists', 'os.path.exists', (['"""./raw_results/"""'], {}), "('./raw_results/')\n", (125537, 125555), False, 'import os, sys, pickle\n'), ((125573, 125603), 'os.system', 'os.system', (['"""mkdir raw_results"""'], {}), "('mkdir raw_results')\n", (125582, 125603), False, 'import os, sys, pickle\n'), ((125623, 125666), 'os.path.exists', 'os.path.exists', (['"""./results_analysed_named/"""'], {}), "('./results_analysed_named/')\n", (125637, 125666), False, 'import os, sys, pickle\n'), ((125684, 125725), 'os.system', 'os.system', (['"""mkdir results_analysed_named"""'], {}), "('mkdir results_analysed_named')\n", (125693, 125725), False, 'import os, sys, pickle\n'), ((128917, 128976), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'random_state': 'seed'}), '(n_estimators=100, random_state=seed)\n', (128939, 128976), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((131356, 131382), 'numpy.array', 'np.array', (['(pos[0] + negs[0])'], {}), '(pos[0] + negs[0])\n', (131364, 131382), True, 'import numpy as np\n'), ((131414, 131440), 'numpy.array', 'np.array', (['(pos[1] + negs[1])'], {}), '(pos[1] + negs[1])\n', (131422, 131440), True, 'import numpy as np\n'), ((133302, 133328), 'numpy.array', 'np.array', (['(pos[0] + negs[0])'], {}), '(pos[0] + negs[0])\n', (133310, 133328), True, 'import numpy as np\n'), ((133360, 133386), 'numpy.array', 'np.array', (['(pos[1] + negs[1])'], {}), '(pos[1] + negs[1])\n', (133368, 133386), True, 'import numpy as np\n'), ((154898, 154924), 'decimal.Decimal', 'Decimal', (['sorted_x[i][1][1]'], {}), '(sorted_x[i][1][1])\n', (154905, 154924), False, 'from decimal import Decimal\n'), ((158542, 158568), 'decimal.Decimal', 'Decimal', (['sorted_x[i][1][1]'], {}), '(sorted_x[i][1][1])\n', (158549, 158568), False, 'from decimal import Decimal\n'), ((160288, 160310), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (160307, 160310), False, 'import operator\n'), ((163081, 163103), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (163100, 163103), False, 'import operator\n'), ((177069, 177084), 'numpy.average', 'np.average', (['vec'], {}), '(vec)\n', (177079, 177084), True, 'import numpy as np\n'), ((204147, 204164), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['nc'], {}), '(nc)\n', (204160, 204164), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((205072, 205098), 'pickle.dump', 'pickle.dump', (['inchi_dict', 'f'], {}), '(inchi_dict, f)\n', (205083, 205098), False, 'import os, sys, pickle\n'), ((205144, 205263), 'pandas.DataFrame', 'pd.DataFrame', (["[[cmpd_num, 'NA', name, 'other']]"], {'columns': "['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']"}), "([[cmpd_num, 'NA', name, 'other']], columns=['CANDO_ID',\n 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS'])\n", (205156, 205263), True, 'import pandas as pd\n'), ((205706, 205765), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'AllChem.GetMorganFingerprint', (['nc', 'rad'], {'useFeatures': 'features'}), '(nc, rad, useFeatures=features)\n', (205734, 205765), False, 'from rdkit.Chem import AllChem, rdmolops\n'), ((206120, 206205), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['nc', 'rad'], {'useFeatures': 'features', 'nBits': 'bits'}), '(nc, rad, useFeatures=features, nBits=bits\n )\n', (206157, 206205), False, 'from rdkit.Chem import AllChem, rdmolops\n'), ((209879, 209901), 'rdkit.Chem.MolToInchiKey', 'Chem.MolToInchiKey', (['nc'], {}), '(nc)\n', (209897, 209901), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((212600, 212637), 'os.makedirs', 'os.makedirs', (['cmpd_path'], {'exist_ok': '(True)'}), '(cmpd_path, exist_ok=True)\n', (212611, 212637), False, 'import os, sys, pickle\n'), ((212654, 212739), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']"}), "(columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']\n )\n", (212666, 212739), True, 'import pandas as pd\n'), ((229646, 229671), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (229661, 229671), False, 'import os, sys, pickle\n'), ((229694, 229719), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (229709, 229719), False, 'import os, sys, pickle\n'), ((230414, 230474), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'num_bars', 'widgets': 'widgets'}), '(max_value=num_bars, widgets=widgets)\n', (230437, 230474), False, 'import progressbar\n'), ((30637, 30650), 'numpy.array', 'np.array', (['snp'], {}), '(snp)\n', (30645, 30650), True, 'import numpy as np\n'), ((36803, 36823), 'numpy.array', 'np.array', (['signatures'], {}), '(signatures)\n', (36811, 36823), True, 'import numpy as np\n'), ((43277, 43304), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {}), '(distance_matrix)\n', (43287, 43304), False, 'from scipy.spatial.distance import squareform, cdist\n'), ((91655, 91671), 'math.log2', 'math.log2', (['(i + 1)'], {}), '(i + 1)\n', (91664, 91671), False, 'import math\n'), ((93267, 93282), 'numpy.mean', 'np.mean', (['acc[k]'], {}), '(acc[k])\n', (93274, 93282), True, 'import numpy as np\n'), ((96347, 96367), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(10)'}), '(n_components=10)\n', (96350, 96367), False, 'from sklearn.decomposition import PCA\n'), ((96877, 96911), 'sklearn.cluster.KMeans', 'KMeans', (['n_clusters'], {'random_state': '(1)'}), '(n_clusters, random_state=1)\n', (96883, 96911), False, 'from sklearn.cluster import KMeans\n'), ((127391, 127405), 'random.seed', 'random.seed', (['s'], {}), '(s)\n', (127402, 127405), False, 'import random\n'), ((127426, 127450), 'random.shuffle', 'random.shuffle', (['shuffled'], {}), '(shuffled)\n', (127440, 127450), False, 'import random\n'), ((127560, 127574), 'random.seed', 'random.seed', (['s'], {}), '(s)\n', (127571, 127574), False, 'import random\n'), ((127595, 127619), 'random.shuffle', 'random.shuffle', (['shuffled'], {}), '(shuffled)\n', (127609, 127619), False, 'import random\n'), ((129093, 129158), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': '"""scale"""', 'degree': '(3)', 'random_state': 'seed'}), "(kernel='rbf', gamma='scale', degree=3, random_state=seed)\n", (129100, 129158), False, 'from sklearn import svm\n'), ((205657, 205671), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (205668, 205671), False, 'import os, sys, pickle\n'), ((205876, 205897), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (205887, 205897), False, 'import os, sys, pickle\n'), ((206071, 206085), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (206082, 206085), False, 'import os, sys, pickle\n'), ((206311, 206332), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (206322, 206332), False, 'import os, sys, pickle\n'), ((206531, 206542), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (206540, 206542), False, 'import os, sys, pickle\n'), ((208847, 208868), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (208858, 208868), False, 'import os, sys, pickle\n'), ((209018, 209039), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (209029, 209039), False, 'import os, sys, pickle\n'), ((210650, 210676), 'pickle.dump', 'pickle.dump', (['inchi_dict', 'f'], {}), '(inchi_dict, f)\n', (210661, 210676), False, 'import os, sys, pickle\n'), ((210722, 210841), 'pandas.DataFrame', 'pd.DataFrame', (["[[cmpd_num, 'NA', name, 'other']]"], {'columns': "['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']"}), "([[cmpd_num, 'NA', name, 'other']], columns=['CANDO_ID',\n 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS'])\n", (210734, 210841), True, 'import pandas as pd\n'), ((211813, 211872), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'AllChem.GetMorganFingerprint', (['nc', 'rad'], {'useFeatures': 'features'}), '(nc, rad, useFeatures=features)\n', (211841, 211872), False, 'from rdkit.Chem import AllChem, rdmolops\n'), ((212190, 212275), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['nc', 'rad'], {'useFeatures': 'features', 'nBits': 'bits'}), '(nc, rad, useFeatures=features, nBits=bits\n )\n', (212227, 212275), False, 'from rdkit.Chem import AllChem, rdmolops\n'), ((213577, 213599), 'rdkit.Chem.MolToInchiKey', 'Chem.MolToInchiKey', (['nc'], {}), '(nc)\n', (213595, 213599), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((15182, 15192), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15190, 15192), False, 'import os, sys, pickle\n'), ((31005, 31032), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {}), '(distance_matrix)\n', (31015, 31032), False, 'from scipy.spatial.distance import squareform, cdist\n'), ((37159, 37186), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {}), '(distance_matrix)\n', (37169, 37186), False, 'from scipy.spatial.distance import squareform, cdist\n'), ((43437, 43541), 'sklearn.metrics.pairwise_distances_chunked', 'pairwise_distances_chunked', (['snp'], {'metric': 'self.dist_metric', 'force_all_finite': '(False)', 'n_jobs': 'self.ncpus'}), '(snp, metric=self.dist_metric, force_all_finite=\n False, n_jobs=self.ncpus)\n', (43463, 43541), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\n'), ((44027, 44068), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {'checks': '(False)'}), '(distance_matrix, checks=False)\n', (44037, 44068), False, 'from scipy.spatial.distance import squareform, cdist\n'), ((62377, 62398), 'numpy.mean', 'np.mean', (['((u - v) ** 2)'], {}), '((u - v) ** 2)\n', (62384, 62398), True, 'import numpy as np\n'), ((65302, 65323), 'numpy.mean', 'np.mean', (['((u - v) ** 2)'], {}), '((u - v) ** 2)\n', (65309, 65323), True, 'import numpy as np\n'), ((67897, 67918), 'numpy.mean', 'np.mean', (['((u - v) ** 2)'], {}), '((u - v) ** 2)\n', (67904, 67918), True, 'import numpy as np\n'), ((129436, 129491), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'kernel': '"""poly"""', 'gamma': '"""scale"""', 'degree': '(2)'}), "(kernel='poly', gamma='scale', degree=2)\n", (129451, 129491), False, 'from sklearn import svm\n'), ((131985, 132004), 'numpy.array', 'np.array', (['[eps_pos]'], {}), '([eps_pos])\n', (131993, 132004), True, 'import numpy as np\n'), ((132055, 132074), 'numpy.array', 'np.array', (['[eps_neg]'], {}), '([eps_neg])\n', (132063, 132074), True, 'import numpy as np\n'), ((132143, 132160), 'numpy.array', 'np.array', (['[c.sig]'], {}), '([c.sig])\n', (132151, 132160), True, 'import numpy as np\n'), ((132211, 132235), 'numpy.array', 'np.array', (['[test_neg.sig]'], {}), '([test_neg.sig])\n', (132219, 132235), True, 'import numpy as np\n'), ((193041, 193084), 'rdkit.DataStructs.BulkDiceSimilarity', 'DataStructs.BulkDiceSimilarity', (['c_fps[c]', 'x'], {}), '(c_fps[c], x)\n', (193071, 193084), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((211764, 211778), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (211775, 211778), False, 'import os, sys, pickle\n'), ((211983, 212004), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (211994, 212004), False, 'import os, sys, pickle\n'), ((212141, 212155), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (212152, 212155), False, 'import os, sys, pickle\n'), ((212381, 212402), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (212392, 212402), False, 'import os, sys, pickle\n'), ((212974, 212995), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (212985, 212995), False, 'import os, sys, pickle\n'), ((213143, 213164), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (213154, 213164), False, 'import os, sys, pickle\n'), ((213383, 213400), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['nc'], {}), '(nc)\n', (213396, 213400), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((214348, 214374), 'pickle.dump', 'pickle.dump', (['inchi_dict', 'f'], {}), '(inchi_dict, f)\n', (214359, 214374), False, 'import os, sys, pickle\n'), ((214420, 214539), 'pandas.DataFrame', 'pd.DataFrame', (["[[cmpd_num, 'NA', name, 'other']]"], {'columns': "['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']"}), "([[cmpd_num, 'NA', name, 'other']], columns=['CANDO_ID',\n 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS'])\n", (214432, 214539), True, 'import pandas as pd\n'), ((214995, 215054), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'AllChem.GetMorganFingerprint', (['nc', 'rad'], {'useFeatures': 'features'}), '(nc, rad, useFeatures=features)\n', (215023, 215054), False, 'from rdkit.Chem import AllChem, rdmolops\n'), ((215372, 215457), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['nc', 'rad'], {'useFeatures': 'features', 'nBits': 'bits'}), '(nc, rad, useFeatures=features, nBits=bits\n )\n', (215409, 215457), False, 'from rdkit.Chem import AllChem, rdmolops\n'), ((37319, 37414), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['snp'], {'metric': 'self.dist_metric', 'force_all_finite': '(False)', 'n_jobs': 'self.ncpus'}), '(snp, metric=self.dist_metric, force_all_finite=False,\n n_jobs=self.ncpus)\n', (37337, 37414), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\n'), ((37682, 37723), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {'checks': '(False)'}), '(distance_matrix, checks=False)\n', (37692, 37723), False, 'from scipy.spatial.distance import squareform, cdist\n'), ((44215, 44256), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {'checks': '(False)'}), '(distance_matrix, checks=False)\n', (44225, 44256), False, 'from scipy.spatial.distance import squareform, cdist\n'), ((129597, 129668), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'solver': '"""newton-cg"""', 'random_state': 'seed'}), "(penalty='l2', solver='newton-cg', random_state=seed)\n", (129615, 129668), False, 'from sklearn.linear_model import LogisticRegression\n'), ((134038, 134057), 'numpy.array', 'np.array', (['[eps_pos]'], {}), '([eps_pos])\n', (134046, 134057), True, 'import numpy as np\n'), ((134112, 134136), 'numpy.array', 'np.array', (['[test_neg.sig]'], {}), '([test_neg.sig])\n', (134120, 134136), True, 'import numpy as np\n'), ((134213, 134230), 'numpy.array', 'np.array', (['[c.sig]'], {}), '([c.sig])\n', (134221, 134230), True, 'import numpy as np\n'), ((134285, 134304), 'numpy.array', 'np.array', (['[inv.sig]'], {}), '([inv.sig])\n', (134293, 134304), True, 'import numpy as np\n'), ((190416, 190459), 'rdkit.DataStructs.BulkDiceSimilarity', 'DataStructs.BulkDiceSimilarity', (['c_fps[c]', 'x'], {}), '(c_fps[c], x)\n', (190446, 190459), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((191016, 191062), 'scipy.stats.percentileofscore', 'stats.percentileofscore', (['all_scores', 'temp_c[1]'], {}), '(all_scores, temp_c[1])\n', (191039, 191062), False, 'from scipy import stats\n'), ((193155, 193202), 'rdkit.DataStructs.BulkTanimotoSimilarity', 'DataStructs.BulkTanimotoSimilarity', (['c_fps[c]', 'x'], {}), '(c_fps[c], x)\n', (193189, 193202), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((214946, 214960), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (214957, 214960), False, 'import os, sys, pickle\n'), ((215165, 215186), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (215176, 215186), False, 'import os, sys, pickle\n'), ((215323, 215337), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (215334, 215337), False, 'import os, sys, pickle\n'), ((215563, 215584), 'pickle.dump', 'pickle.dump', (['c_fps', 'f'], {}), '(c_fps, f)\n', (215574, 215584), False, 'import os, sys, pickle\n'), ((63114, 63130), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (63124, 63130), False, 'import math\n'), ((66064, 66080), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (66074, 66080), False, 'import math\n'), ((89645, 89661), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (89655, 89661), False, 'import math\n'), ((169365, 169381), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (169375, 169381), False, 'import math\n'), ((190543, 190590), 'rdkit.DataStructs.BulkTanimotoSimilarity', 'DataStructs.BulkTanimotoSimilarity', (['c_fps[c]', 'x'], {}), '(c_fps[c], x)\n', (190577, 190590), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((192299, 192316), 'numpy.mean', 'np.mean', (['li_score'], {}), '(li_score)\n', (192306, 192316), True, 'import numpy as np\n'), ((193272, 193317), 'rdkit.DataStructs.BulkCosineSimilarity', 'DataStructs.BulkCosineSimilarity', (['c_fps[c]', 'x'], {}), '(c_fps[c], x)\n', (193304, 193317), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((194069, 194089), 'numpy.mean', 'np.mean', (['temp_scores'], {}), '(temp_scores)\n', (194076, 194089), True, 'import numpy as np\n'), ((29596, 29612), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (29606, 29612), False, 'import math\n'), ((31491, 31524), 'scipy.spatial.distance.cdist', 'cdist', (['[snp[i]]', 'snp', 'dist_metric'], {}), '([snp[i]], snp, dist_metric)\n', (31496, 31524), False, 'from scipy.spatial.distance import squareform, cdist\n'), ((41222, 41238), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (41232, 41238), False, 'import math\n'), ((43141, 43162), 'numpy.mean', 'np.mean', (['((u - v) ** 2)'], {}), '((u - v) ** 2)\n', (43148, 43162), True, 'import numpy as np\n'), ((68733, 68749), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (68743, 68749), False, 'import math\n'), ((165954, 165970), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (165964, 165970), False, 'import math\n'), ((190673, 190718), 'rdkit.DataStructs.BulkCosineSimilarity', 'DataStructs.BulkCosineSimilarity', (['c_fps[c]', 'x'], {}), '(c_fps[c], x)\n', (190705, 190718), False, 'from rdkit import Chem, DataStructs, RDConfig\n'), ((192344, 192360), 'numpy.isnan', 'np.isnan', (['temp_p'], {}), '(temp_p)\n', (192352, 192360), True, 'import numpy as np\n'), ((192548, 192567), 'numpy.median', 'np.median', (['li_score'], {}), '(li_score)\n', (192557, 192567), True, 'import numpy as np\n'), ((193591, 193634), 'scipy.stats.percentileofscore', 'stats.percentileofscore', (['all_scores', 'temp_c'], {}), '(all_scores, temp_c)\n', (193614, 193634), False, 'from scipy import stats\n'), ((194117, 194133), 'numpy.isnan', 'np.isnan', (['temp_c'], {}), '(temp_c)\n', (194125, 194133), True, 'import numpy as np\n'), ((194321, 194343), 'numpy.median', 'np.median', (['temp_scores'], {}), '(temp_scores)\n', (194330, 194343), True, 'import numpy as np\n'), ((35863, 35885), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (35882, 35885), False, 'import operator\n'), ((37080, 37101), 'numpy.mean', 'np.mean', (['((u - v) ** 2)'], {}), '((u - v) ** 2)\n', (37087, 37101), True, 'import numpy as np\n'), ((74559, 74575), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (74569, 74575), False, 'import math\n'), ((92486, 92502), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (92496, 92502), False, 'import math\n'), ((101504, 101520), 'math.isnan', 'math.isnan', (['x[1]'], {}), '(x[1])\n', (101514, 101520), False, 'import math\n'), ((192595, 192611), 'numpy.isnan', 'np.isnan', (['temp_p'], {}), '(temp_p)\n', (192603, 192611), True, 'import numpy as np\n'), ((194371, 194387), 'numpy.isnan', 'np.isnan', (['temp_c'], {}), '(temp_c)\n', (194379, 194387), True, 'import numpy as np\n')] |
import os
import os.path as osp
import sys
import pdb
import argparse
import librosa
import numpy as np
from tqdm import tqdm
import h5py
from PIL import Image
import subprocess
from options.test_options import TestOptions
import torchvision.transforms as transforms
import torch
import torchvision
from data.stereo_dataset import generate_spectrogram
from models.networks import VisualNet, VisualNetDilated, AudioNet, AssoConv, APNet, weights_init
def audio_normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return rms / desired_rms, samples
def main():
#load test arguments
opt = TestOptions().parse()
opt.device = torch.device("cuda")
## build network
# visual net
original_resnet = torchvision.models.resnet18(pretrained=True)
if opt.visual_model == 'VisualNet':
net_visual = VisualNet(original_resnet)
elif opt.visual_model == 'VisualNetDilated':
net_visual = VisualNetDilated(original_resnet)
else:
raise TypeError("please input correct visual model type")
if len(opt.weights_visual) > 0:
print('Loading weights for visual stream')
net_visual.load_state_dict(torch.load(opt.weights_visual), strict=True)
# audio net
net_audio = AudioNet(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
)
net_audio.apply(weights_init)
if len(opt.weights_audio) > 0:
print('Loading weights for audio stream')
net_audio.load_state_dict(torch.load(opt.weights_audio), strict=True)
# fusion net
if opt.fusion_model == 'none':
net_fusion = None
elif opt.fusion_model == 'AssoConv':
net_fusion = AssoConv()
elif opt.fusion_model == 'APNet':
net_fusion = APNet()
else:
raise TypeError("Please input correct fusion model type")
if net_fusion is not None and len(opt.weights_fusion) > 0:
net_fusion.load_state_dict(torch.load(opt.weights_fusion), strict=True)
net_visual.to(opt.device)
net_audio.to(opt.device)
net_visual.eval()
net_audio.eval()
if net_fusion is not None:
net_fusion.to(opt.device)
net_fusion.eval()
test_h5_path = opt.hdf5FolderPath
print("---Testing---: ", test_h5_path)
testf = h5py.File(test_h5_path, 'r')
audio_list = testf['audio'][:]
# ensure output dir
if not osp.exists(opt.output_dir_root):
os.mkdir(opt.output_dir_root)
for audio_file in tqdm(audio_list):
audio_file = bytes.decode(audio_file)
video_path = audio_file.replace('audio_resave', 'frames')[:-4]
input_audio_path = audio_file
video_frame_path = video_path
audio_id = audio_file.split('/')[-1][:-4]
cur_output_dir_root = os.path.join(opt.output_dir_root, audio_id)
#load the audio to perform separation
audio, audio_rate = librosa.load(input_audio_path, sr=opt.audio_sampling_rate, mono=False)
audio_channel1 = audio[0,:]
audio_channel2 = audio[1,:]
#define the transformation to perform on visual frames
vision_transform_list = [transforms.Resize((224,448)), transforms.ToTensor()]
vision_transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
vision_transform = transforms.Compose(vision_transform_list)
#perform spatialization over the whole audio using a sliding window approach
overlap_count = np.zeros((audio.shape)) #count the number of times a data point is calculated
binaural_audio = np.zeros((audio.shape))
#perform spatialization over the whole spectrogram in a siliding-window fashion
sliding_window_start = 0
data = {}
samples_per_window = int(opt.audio_length * opt.audio_sampling_rate)
while sliding_window_start + samples_per_window < audio.shape[-1]:
sliding_window_end = sliding_window_start + samples_per_window
normalizer, audio_segment = audio_normalize(audio[:,sliding_window_start:sliding_window_end])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
audio_segment_mix = audio_segment_channel1 + audio_segment_channel2
audio_diff = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
audio_mix = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for current window
frame_index = int(round((((sliding_window_start + samples_per_window / 2.0) / audio.shape[-1]) * opt.input_audio_length + 0.05) * 10 ))
image = Image.open(os.path.join(video_frame_path, str(frame_index) + '.jpg')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
# data to device
audio_diff = audio_diff.to(opt.device)
audio_mix = audio_mix.to(opt.device)
frame = frame.to(opt.device)
vfeat = net_visual(frame)
if net_fusion is not None:
upfeatures, output = net_audio(audio_diff, audio_mix, vfeat, return_upfeatures=True)
output.update(net_fusion(audio_mix, vfeat, upfeatures))
else:
output = net_audio(audio_diff, audio_mix, vfeat)
#ISTFT to convert back to audio
if opt.use_fusion_pred:
pred_left_spec = output['pred_left'][0,:,:,:].data[:].cpu().numpy()
pred_left_spec = pred_left_spec[0,:,:] + 1j * pred_left_spec[1,:,:]
reconstructed_signal_left = librosa.istft(pred_left_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)
pred_right_spec = output['pred_right'][0,:,:,:].data[:].cpu().numpy()
pred_right_spec = pred_right_spec[0,:,:] + 1j * pred_right_spec[1,:,:]
reconstructed_signal_right = librosa.istft(pred_right_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)
else:
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
binaural_audio[:,sliding_window_start:sliding_window_end] = binaural_audio[:,sliding_window_start:sliding_window_end] + reconstructed_binaural
overlap_count[:,sliding_window_start:sliding_window_end] = overlap_count[:,sliding_window_start:sliding_window_end] + 1
sliding_window_start = sliding_window_start + int(opt.hop_size * opt.audio_sampling_rate)
#deal with the last segment
normalizer, audio_segment = audio_normalize(audio[:,-samples_per_window:])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
audio_diff = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
audio_mix = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for last window
frame_index = int(round(((opt.input_audio_length - opt.audio_length / 2.0) + 0.05) * 10))
image = Image.open(os.path.join(video_frame_path, str(frame_index) + '.jpg')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
# data to device
audio_diff = audio_diff.to(opt.device)
audio_mix = audio_mix.to(opt.device)
frame = frame.to(opt.device)
vfeat = net_visual(frame)
if net_fusion is not None:
upfeatures, output = net_audio(audio_diff, audio_mix, vfeat, return_upfeatures=True)
output.update(net_fusion(audio_mix, vfeat, upfeatures))
else:
output = net_audio(audio_diff, audio_mix, vfeat)
#ISTFT to convert back to audio
if opt.use_fusion_pred:
pred_left_spec = output['pred_left'][0,:,:,:].data[:].cpu().numpy()
pred_left_spec = pred_left_spec[0,:,:] + 1j * pred_left_spec[1,:,:]
reconstructed_signal_left = librosa.istft(pred_left_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)
pred_right_spec = output['pred_right'][0,:,:,:].data[:].cpu().numpy()
pred_right_spec = pred_right_spec[0,:,:] + 1j * pred_right_spec[1,:,:]
reconstructed_signal_right = librosa.istft(pred_right_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)
else:
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
#add the spatialized audio to reconstructed_binaural
binaural_audio[:,-samples_per_window:] = binaural_audio[:,-samples_per_window:] + reconstructed_binaural
overlap_count[:,-samples_per_window:] = overlap_count[:,-samples_per_window:] + 1
#divide aggregated predicted audio by their corresponding counts
predicted_binaural_audio = np.divide(binaural_audio, overlap_count)
#check output directory
if not os.path.isdir(cur_output_dir_root):
os.mkdir(cur_output_dir_root)
mixed_mono = (audio_channel1 + audio_channel2) / 2
librosa.output.write_wav(os.path.join(cur_output_dir_root, 'predicted_binaural.wav'), predicted_binaural_audio, opt.audio_sampling_rate)
librosa.output.write_wav(os.path.join(cur_output_dir_root, 'mixed_mono.wav'), mixed_mono, opt.audio_sampling_rate)
librosa.output.write_wav(os.path.join(cur_output_dir_root, 'input_binaural.wav'), audio, opt.audio_sampling_rate)
if __name__ == '__main__':
main()
| [
"librosa.istft",
"torchvision.models.resnet18",
"models.networks.VisualNet",
"numpy.divide",
"librosa.load",
"os.path.exists",
"numpy.mean",
"models.networks.VisualNetDilated",
"models.networks.APNet",
"os.path.isdir",
"os.mkdir",
"models.networks.AudioNet",
"torchvision.transforms.ToTensor"... | [((737, 757), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (749, 757), False, 'import torch\n'), ((819, 863), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (846, 863), False, 'import torchvision\n'), ((1333, 1422), 'models.networks.AudioNet', 'AudioNet', ([], {'ngf': 'opt.unet_ngf', 'input_nc': 'opt.unet_input_nc', 'output_nc': 'opt.unet_output_nc'}), '(ngf=opt.unet_ngf, input_nc=opt.unet_input_nc, output_nc=opt.\n unet_output_nc)\n', (1341, 1422), False, 'from models.networks import VisualNet, VisualNetDilated, AudioNet, AssoConv, APNet, weights_init\n'), ((2374, 2402), 'h5py.File', 'h5py.File', (['test_h5_path', '"""r"""'], {}), "(test_h5_path, 'r')\n", (2383, 2402), False, 'import h5py\n'), ((2568, 2584), 'tqdm.tqdm', 'tqdm', (['audio_list'], {}), '(audio_list)\n', (2572, 2584), False, 'from tqdm import tqdm\n'), ((925, 951), 'models.networks.VisualNet', 'VisualNet', (['original_resnet'], {}), '(original_resnet)\n', (934, 951), False, 'from models.networks import VisualNet, VisualNetDilated, AudioNet, AssoConv, APNet, weights_init\n'), ((2474, 2505), 'os.path.exists', 'osp.exists', (['opt.output_dir_root'], {}), '(opt.output_dir_root)\n', (2484, 2505), True, 'import os.path as osp\n'), ((2515, 2544), 'os.mkdir', 'os.mkdir', (['opt.output_dir_root'], {}), '(opt.output_dir_root)\n', (2523, 2544), False, 'import os\n'), ((2859, 2902), 'os.path.join', 'os.path.join', (['opt.output_dir_root', 'audio_id'], {}), '(opt.output_dir_root, audio_id)\n', (2871, 2902), False, 'import os\n'), ((2978, 3048), 'librosa.load', 'librosa.load', (['input_audio_path'], {'sr': 'opt.audio_sampling_rate', 'mono': '(False)'}), '(input_audio_path, sr=opt.audio_sampling_rate, mono=False)\n', (2990, 3048), False, 'import librosa\n'), ((3405, 3446), 'torchvision.transforms.Compose', 'transforms.Compose', (['vision_transform_list'], {}), '(vision_transform_list)\n', (3423, 3446), True, 'import torchvision.transforms as transforms\n'), ((3550, 3571), 'numpy.zeros', 'np.zeros', (['audio.shape'], {}), '(audio.shape)\n', (3558, 3571), True, 'import numpy as np\n'), ((3653, 3674), 'numpy.zeros', 'np.zeros', (['audio.shape'], {}), '(audio.shape)\n', (3661, 3674), True, 'import numpy as np\n'), ((10627, 10667), 'numpy.divide', 'np.divide', (['binaural_audio', 'overlap_count'], {}), '(binaural_audio, overlap_count)\n', (10636, 10667), True, 'import numpy as np\n'), ((546, 567), 'numpy.mean', 'np.mean', (['(samples ** 2)'], {}), '(samples ** 2)\n', (553, 567), True, 'import numpy as np\n'), ((698, 711), 'options.test_options.TestOptions', 'TestOptions', ([], {}), '()\n', (709, 711), False, 'from options.test_options import TestOptions\n'), ((1022, 1055), 'models.networks.VisualNetDilated', 'VisualNetDilated', (['original_resnet'], {}), '(original_resnet)\n', (1038, 1055), False, 'from models.networks import VisualNet, VisualNetDilated, AudioNet, AssoConv, APNet, weights_init\n'), ((1255, 1285), 'torch.load', 'torch.load', (['opt.weights_visual'], {}), '(opt.weights_visual)\n', (1265, 1285), False, 'import torch\n'), ((1602, 1631), 'torch.load', 'torch.load', (['opt.weights_audio'], {}), '(opt.weights_audio)\n', (1612, 1631), False, 'import torch\n'), ((1787, 1797), 'models.networks.AssoConv', 'AssoConv', ([], {}), '()\n', (1795, 1797), False, 'from models.networks import VisualNet, VisualNetDilated, AudioNet, AssoConv, APNet, weights_init\n'), ((2041, 2071), 'torch.load', 'torch.load', (['opt.weights_fusion'], {}), '(opt.weights_fusion)\n', (2051, 2071), False, 'import torch\n'), ((3211, 3240), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 448)'], {}), '((224, 448))\n', (3228, 3240), True, 'import torchvision.transforms as transforms\n'), ((3241, 3262), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3260, 3262), True, 'import torchvision.transforms as transforms\n'), ((3301, 3376), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3321, 3376), True, 'import torchvision.transforms as transforms\n'), ((9122, 9227), 'librosa.istft', 'librosa.istft', (['pred_left_spec'], {'hop_length': '(160)', 'win_length': '(400)', 'center': '(True)', 'length': 'samples_per_window'}), '(pred_left_spec, hop_length=160, win_length=400, center=True,\n length=samples_per_window)\n', (9135, 9227), False, 'import librosa\n'), ((9430, 9536), 'librosa.istft', 'librosa.istft', (['pred_right_spec'], {'hop_length': '(160)', 'win_length': '(400)', 'center': '(True)', 'length': 'samples_per_window'}), '(pred_right_spec, hop_length=160, win_length=400, center=True,\n length=samples_per_window)\n', (9443, 9536), False, 'import librosa\n'), ((9790, 9904), 'librosa.istft', 'librosa.istft', (['reconstructed_stft_diff'], {'hop_length': '(160)', 'win_length': '(400)', 'center': '(True)', 'length': 'samples_per_window'}), '(reconstructed_stft_diff, hop_length=160, win_length=400,\n center=True, length=samples_per_window)\n', (9803, 9904), False, 'import librosa\n'), ((10709, 10743), 'os.path.isdir', 'os.path.isdir', (['cur_output_dir_root'], {}), '(cur_output_dir_root)\n', (10722, 10743), False, 'import os\n'), ((10757, 10786), 'os.mkdir', 'os.mkdir', (['cur_output_dir_root'], {}), '(cur_output_dir_root)\n', (10765, 10786), False, 'import os\n'), ((10880, 10939), 'os.path.join', 'os.path.join', (['cur_output_dir_root', '"""predicted_binaural.wav"""'], {}), "(cur_output_dir_root, 'predicted_binaural.wav')\n", (10892, 10939), False, 'import os\n'), ((11025, 11076), 'os.path.join', 'os.path.join', (['cur_output_dir_root', '"""mixed_mono.wav"""'], {}), "(cur_output_dir_root, 'mixed_mono.wav')\n", (11037, 11076), False, 'import os\n'), ((11148, 11203), 'os.path.join', 'os.path.join', (['cur_output_dir_root', '"""input_binaural.wav"""'], {}), "(cur_output_dir_root, 'input_binaural.wav')\n", (11160, 11203), False, 'import os\n'), ((1857, 1864), 'models.networks.APNet', 'APNet', ([], {}), '()\n', (1862, 1864), False, 'from models.networks import VisualNet, VisualNetDilated, AudioNet, AssoConv, APNet, weights_init\n'), ((5912, 6017), 'librosa.istft', 'librosa.istft', (['pred_left_spec'], {'hop_length': '(160)', 'win_length': '(400)', 'center': '(True)', 'length': 'samples_per_window'}), '(pred_left_spec, hop_length=160, win_length=400, center=True,\n length=samples_per_window)\n', (5925, 6017), False, 'import librosa\n'), ((6232, 6338), 'librosa.istft', 'librosa.istft', (['pred_right_spec'], {'hop_length': '(160)', 'win_length': '(400)', 'center': '(True)', 'length': 'samples_per_window'}), '(pred_right_spec, hop_length=160, win_length=400, center=True,\n length=samples_per_window)\n', (6245, 6338), False, 'import librosa\n'), ((6608, 6722), 'librosa.istft', 'librosa.istft', (['reconstructed_stft_diff'], {'hop_length': '(160)', 'win_length': '(400)', 'center': '(True)', 'length': 'samples_per_window'}), '(reconstructed_stft_diff, hop_length=160, win_length=400,\n center=True, length=samples_per_window)\n', (6621, 6722), False, 'import librosa\n'), ((7736, 7805), 'data.stereo_dataset.generate_spectrogram', 'generate_spectrogram', (['(audio_segment_channel1 - audio_segment_channel2)'], {}), '(audio_segment_channel1 - audio_segment_channel2)\n', (7756, 7805), False, 'from data.stereo_dataset import generate_spectrogram\n'), ((7894, 7963), 'data.stereo_dataset.generate_spectrogram', 'generate_spectrogram', (['(audio_segment_channel1 + audio_segment_channel2)'], {}), '(audio_segment_channel1 + audio_segment_channel2)\n', (7914, 7963), False, 'from data.stereo_dataset import generate_spectrogram\n'), ((10135, 10184), 'numpy.expand_dims', 'np.expand_dims', (['reconstructed_signal_left'], {'axis': '(0)'}), '(reconstructed_signal_left, axis=0)\n', (10149, 10184), True, 'import numpy as np\n'), ((10186, 10236), 'numpy.expand_dims', 'np.expand_dims', (['reconstructed_signal_right'], {'axis': '(0)'}), '(reconstructed_signal_right, axis=0)\n', (10200, 10236), True, 'import numpy as np\n'), ((4379, 4448), 'data.stereo_dataset.generate_spectrogram', 'generate_spectrogram', (['(audio_segment_channel1 - audio_segment_channel2)'], {}), '(audio_segment_channel1 - audio_segment_channel2)\n', (4399, 4448), False, 'from data.stereo_dataset import generate_spectrogram\n'), ((4541, 4610), 'data.stereo_dataset.generate_spectrogram', 'generate_spectrogram', (['(audio_segment_channel1 + audio_segment_channel2)'], {}), '(audio_segment_channel1 + audio_segment_channel2)\n', (4561, 4610), False, 'from data.stereo_dataset import generate_spectrogram\n'), ((6965, 7014), 'numpy.expand_dims', 'np.expand_dims', (['reconstructed_signal_left'], {'axis': '(0)'}), '(reconstructed_signal_left, axis=0)\n', (6979, 7014), True, 'import numpy as np\n'), ((7016, 7066), 'numpy.expand_dims', 'np.expand_dims', (['reconstructed_signal_right'], {'axis': '(0)'}), '(reconstructed_signal_right, axis=0)\n', (7030, 7066), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Image Processing
#
# ## Section table of contents
#
# ```{tableofcontents}
# ```
# ## Set up Environment
# I suppose I better import some modules:
# * scipy
# * numpy
# * matplotlib
# In[1]:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
from scipy import ndimage, fft
# In[3]:
from PIL import Image
image = Image.open('../images/HNI_0079.JPG')
image
# In[4]:
print(image.size)
# In[5]:
imarray = np.asarray(image)
tint_imarray = imarray[:,:,0]
plt.imshow( tint_imarray)
# Hmmm, load
# In[6]:
im = image.load()
im[0,0]
# In[7]:
import matplotlib.image as mpimg
gehry = mpimg.imread('../images/HNI_0079.JPG')
#print(gehry)
imgplt = plt.imshow(gehry)
# Uh, let's make it gray, why not.
# In[8]:
grayimage = image.convert("L")
grayimage
# In[9]:
res = fft.fft2(grayimage)
# In[10]:
res.shape
# In[11]:
print(res)
# ## Image Data
#
# box of numbers
#
# 
#
# ## Filters/Kernels
#
# ## Convolution
# ## Fourier Transform
# In[ ]:
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"scipy.fft.fft2",
"matplotlib.image.imread",
"numpy.asarray"
] | [((460, 496), 'PIL.Image.open', 'Image.open', (['"""../images/HNI_0079.JPG"""'], {}), "('../images/HNI_0079.JPG')\n", (470, 496), False, 'from PIL import Image\n'), ((557, 574), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (567, 574), True, 'import numpy as np\n'), ((605, 629), 'matplotlib.pyplot.imshow', 'plt.imshow', (['tint_imarray'], {}), '(tint_imarray)\n', (615, 629), True, 'import matplotlib.pyplot as plt\n'), ((739, 777), 'matplotlib.image.imread', 'mpimg.imread', (['"""../images/HNI_0079.JPG"""'], {}), "('../images/HNI_0079.JPG')\n", (751, 777), True, 'import matplotlib.image as mpimg\n'), ((801, 818), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gehry'], {}), '(gehry)\n', (811, 818), True, 'import matplotlib.pyplot as plt\n'), ((928, 947), 'scipy.fft.fft2', 'fft.fft2', (['grayimage'], {}), '(grayimage)\n', (936, 947), False, 'from scipy import ndimage, fft\n')] |
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import math
class driver_analysis:
def __init__(self, beta):
self.beta = beta*(10**10)
def get_sat_lvl(self, data, ds, alpha, media_var):
'''
Returns a indexed response curve with different saturation level:
1. Breakthrough
2. Optimal
3. Saturation Begin
4. Full Saturation
Saturation level is calculated by taking 1st, 2nd, 3rd integral of the curve function
Note saturation level is default at weekly level
'''
data[ds] = pd.to_datetime(data[ds])
data['week'] = data[ds].map(lambda x:x - timedelta(days=x.isoweekday() % 7))
data = data[['week', media_var]].groupby("week").sum().reset_index()
df_curve= pd.DataFrame()
index=((np.mean(data[media_var])/10)*100)/np.max(data[media_var])
df_curve['Index']=np.arange(0,300,index)
df_curve['var_volume']=df_curve['Index']*np.max(data[media_var])/100
def s_curve_chart (data, column_name, alpha, beta):
media_input_index = data['Index']
beta1 = np.float(beta*(10**-10))
column_name1 = str(column_name)+'_alpha_'+str(alpha).replace('.','')
data[column_name1] = round(beta1**(alpha**media_input_index),8)
return column_name1
df_curve['var_curve'] = s_curve_chart(df_curve,'var_volume',alpha, self.beta)
df_curve['max_var'] = np.max(data[media_var])
df_curve['mean_var'] = np.mean(data[media_var])
df_curve.drop('var_curve',axis = 1,inplace = True)
df_curve.sort_values(by = 'var_volume',inplace = True)
########################################################################
##########Calculate optimal point 1st derivative of the curve###########
########################################################################
def deri_1st(data,var_column,index_column):
data['deri_1st']=alpha**(data[index_column])*data[var_column]*np.log(alpha)*np.log(np.float(self.beta*(10**-10)))
deri_1st(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index')
self.opt_x=df_curve[df_curve['deri_1st']==df_curve['deri_1st'].max()]['var_volume']
self.opt_y=df_curve[df_curve['deri_1st']==df_curve['deri_1st'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]
df_curve['opt_x'] = self.opt_x
df_curve['opt_y'] = self.opt_y
############################################################
#######Calculate breakthrough point 2nd derivative #########
############################################################
def deri_2nd(data,var_column,index_column,frist_column):
data['deri_2nd']=data[frist_column]*np.log(alpha)+\
alpha**(2*data[index_column])*data[var_column]*\
np.log(alpha)*np.log(alpha)*np.log(np.float(self.beta*(10**-10)))*np.log(np.float(self.beta*(10**-10)))
deri_2nd(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index','deri_1st')
self.bt_x=df_curve[df_curve['deri_2nd']==df_curve['deri_2nd'].max()]['var_volume']
self.bt_y=df_curve[df_curve['deri_2nd']==df_curve['deri_2nd'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]
df_curve['bt_x']=self.bt_x
df_curve['bt_y']=self.bt_y
##################################################################
#########Calculate saturation begins point 3rd derivative#########
##################################################################
def deri_3rd(data,var_column,index_column,frist_column):
data['deri_3rd']=data[frist_column]*(alpha**(2*data[index_column])*np.log(np.float(self.beta*(10**-10))**2)+\
3*alpha**data[index_column]*np.log(np.float(self.beta*(10**-10)))+1)
deri_3rd(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index','deri_1st')
self.sb_x=df_curve[df_curve['deri_3rd']==df_curve['deri_3rd'].max()]['var_volume']
self.sb_y=df_curve[df_curve['deri_3rd']==df_curve['deri_3rd'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]
df_curve['sb_x']=self.sb_x
df_curve['sb_y']=self.sb_y
#################################################
#########Calculate full saturation point#########
#################################################
self.fs_x=df_curve[df_curve['var_volume_alpha_'+str(alpha).replace('.','')]>=0.992]['var_volume'][0:1]
self.fs_y=df_curve[df_curve['var_volume_alpha_'+str(alpha).replace('.','')]>=0.992]['var_volume_alpha_'+str(alpha).replace('.','')][0:1]
df_curve['fs_x']=self.fs_x
df_curve['fs_y']=self.fs_y
return df_curve
def readable_number(self, n):
mill_lst = ['',' Thousand',' Million',' Billion',' Trillion']
n = float(n)
millidx = max(0,min(len(mill_lst)-1, int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
return '{:.1f}{}'.format(n / 10**(3 * millidx), mill_lst[millidx])
def plot_sat_lvl(self, df_curve, model_df, ds, var):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 10))
plt.style.use('ggplot')
#plot curve line
lm = sns.lineplot(x='var_volume', y = [col for col in df_curve.columns if "alpha" in col][0],
data = df_curve, color = '#37536d', ax = ax1)
# formatting number into readable format
y_ticks = lm.get_yticks()
x_ticks = lm.get_xticks()
lm.set_yticklabels(['{:,.0%}'.format(i) for i in y_ticks])
lm.set_xticklabels([self.readable_number(i) for i in x_ticks])
# plot saturation levels
ax1.plot(df_curve['bt_x'], df_curve['bt_y'],'ro',label="Break Through",marker='o', markersize=10,color='m')
ax1.plot(df_curve['opt_x'], df_curve['opt_y'], 'ro',label="Optimal",marker='o', markersize=10,color='g')
ax1.plot(df_curve['sb_x'], df_curve['sb_y'], 'ro',label="Satuation Begins",marker='o', markersize=10,color='r')
ax1.plot(df_curve['fs_x'], df_curve['fs_y'], 'ro',label="Full Satuation",marker='o', markersize=10,color='c')
# # Set plot options and show plot
ax1.set_xlabel('Variable Volumes',fontsize=20)
ax1.set_ylabel('Response Index',fontsize=20)
ax1.set_title(var +' Response Curve',fontsize=20)
ax1.legend(loc='center right', fancybox=False, framealpha=0)
# creating dataframe for plotting volume against saturation level plot
df_volume = pd.DataFrame()
df_volume['period'] = pd.to_datetime(pd.to_datetime(model_df[ds]).map(lambda x:x.strftime("%Y-%m-%d")))
df_volume['week'] = df_volume['period'].map(lambda x:x - timedelta(days=x.isoweekday() % 7))
df_volume['week'] = pd.to_datetime(df_volume['week']).map(lambda x:x.strftime("%Y-%m-%d"))
df_volume['var_volume'] = model_df[var]
df_volume = df_volume[['week', 'var_volume']].groupby("week").sum().reset_index()
max_x=df_volume['var_volume'].max()
df_volume['Optimal']=int(df_curve['opt_x'].unique()[1])
df_volume['Break Through']=int(df_curve['bt_x'].unique()[1])
df_volume['Satuation Begins']=int(df_curve['sb_x'].unique()[1])
try:
df_volume['Full Satuation']=int(df_curve['fs_x'].unique()[1])
except:
print('out of range')
fs_x=0
pass
df_volume['Max'] = max_x
df_volume['var_name'] = var
# plot volume against saturation level
textstr = '\n'.join((
r'Breakthrough: ${}'.format(self.readable_number(int(df_volume['Break Through'].unique()[0])), ),
r'Optimal: ${}'.format(self.readable_number(int(df_volume['Optimal'].unique()[0])), ),
r'Saturation Begins: ${}'.format(self.readable_number(int(df_volume['Satuation Begins'].unique()[0])),),
r'Full Saturation: ${}'.format(self.readable_number(int(df_volume['Full Satuation'].unique()[0])),),
))
ax2 = sns.barplot(x=df_volume['week'], y = df_volume['var_volume'], color = '#37536d', ax = ax2)
y_ticks2 = ax2.get_yticks()
ax2.set_yticklabels([self.readable_number(i) for i in y_ticks2])
ax2.plot('week','Break Through',data=df_volume, color='m', linewidth=5,linestyle='dashed')
ax2.plot('week','Optimal', data=df_volume, color='g', linewidth=5,linestyle='dashed')
ax2.plot('week','Satuation Begins', data=df_volume, color='r', linewidth=5,linestyle='dashed')
ax2.plot('week','Full Satuation', data=df_volume, color='c', linewidth=5,linestyle='dashed')
ax2.set_title(var +' Volume Against Weekly Saturation Levels',fontsize=20)
ax2.set_xlabel("Week",fontsize=20)
ax2.set_xticks(df_volume['week'])
ax2.set_xticklabels(df_volume['week'], rotation=40, ha='right')
ax2.set_ylabel("Volume",fontsize=20)
ax2.set_yticks(y_ticks2)
props = dict(boxstyle='round', alpha=0.5)
ax2.text(0.6, 0.95, textstr, transform=ax2.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax2.legend(loc='upper right', fancybox=True, framealpha=5, bbox_to_anchor=(1, 0.95))
plt.tight_layout(pad=5)
plt.show()
| [
"numpy.mean",
"numpy.float",
"numpy.arange",
"numpy.log",
"matplotlib.pyplot.style.use",
"numpy.max",
"seaborn.lineplot",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"seaborn.barplot",
"matplotlib.pyplot.subplots",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((663, 687), 'pandas.to_datetime', 'pd.to_datetime', (['data[ds]'], {}), '(data[ds])\n', (677, 687), True, 'import pandas as pd\n'), ((869, 883), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (881, 883), True, 'import pandas as pd\n'), ((985, 1009), 'numpy.arange', 'np.arange', (['(0)', '(300)', 'index'], {}), '(0, 300, index)\n', (994, 1009), True, 'import numpy as np\n'), ((1543, 1566), 'numpy.max', 'np.max', (['data[media_var]'], {}), '(data[media_var])\n', (1549, 1566), True, 'import numpy as np\n'), ((1598, 1622), 'numpy.mean', 'np.mean', (['data[media_var]'], {}), '(data[media_var])\n', (1605, 1622), True, 'import numpy as np\n'), ((5307, 5343), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(30, 10)'}), '(1, 2, figsize=(30, 10))\n', (5319, 5343), True, 'import matplotlib.pyplot as plt\n'), ((5352, 5375), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (5365, 5375), True, 'import matplotlib.pyplot as plt\n'), ((5415, 5545), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""var_volume"""', 'y': "[col for col in df_curve.columns if 'alpha' in col][0]", 'data': 'df_curve', 'color': '"""#37536d"""', 'ax': 'ax1'}), "(x='var_volume', y=[col for col in df_curve.columns if 'alpha' in\n col][0], data=df_curve, color='#37536d', ax=ax1)\n", (5427, 5545), True, 'import seaborn as sns\n'), ((6712, 6726), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6724, 6726), True, 'import pandas as pd\n'), ((8216, 8304), 'seaborn.barplot', 'sns.barplot', ([], {'x': "df_volume['week']", 'y': "df_volume['var_volume']", 'color': '"""#37536d"""', 'ax': 'ax2'}), "(x=df_volume['week'], y=df_volume['var_volume'], color='#37536d',\n ax=ax2)\n", (8227, 8304), True, 'import seaborn as sns\n'), ((9413, 9436), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(5)'}), '(pad=5)\n', (9429, 9436), True, 'import matplotlib.pyplot as plt\n'), ((9445, 9455), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9453, 9455), True, 'import matplotlib.pyplot as plt\n'), ((935, 958), 'numpy.max', 'np.max', (['data[media_var]'], {}), '(data[media_var])\n', (941, 958), True, 'import numpy as np\n'), ((1212, 1238), 'numpy.float', 'np.float', (['(beta * 10 ** -10)'], {}), '(beta * 10 ** -10)\n', (1220, 1238), True, 'import numpy as np\n'), ((1057, 1080), 'numpy.max', 'np.max', (['data[media_var]'], {}), '(data[media_var])\n', (1063, 1080), True, 'import numpy as np\n'), ((6968, 7001), 'pandas.to_datetime', 'pd.to_datetime', (["df_volume['week']"], {}), "(df_volume['week'])\n", (6982, 7001), True, 'import pandas as pd\n'), ((901, 925), 'numpy.mean', 'np.mean', (['data[media_var]'], {}), '(data[media_var])\n', (908, 925), True, 'import numpy as np\n'), ((2117, 2130), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (2123, 2130), True, 'import numpy as np\n'), ((2138, 2169), 'numpy.float', 'np.float', (['(self.beta * 10 ** -10)'], {}), '(self.beta * 10 ** -10)\n', (2146, 2169), True, 'import numpy as np\n'), ((2868, 2881), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (2874, 2881), True, 'import numpy as np\n'), ((6772, 6800), 'pandas.to_datetime', 'pd.to_datetime', (['model_df[ds]'], {}), '(model_df[ds])\n', (6786, 6800), True, 'import pandas as pd\n'), ((3030, 3061), 'numpy.float', 'np.float', (['(self.beta * 10 ** -10)'], {}), '(self.beta * 10 ** -10)\n', (3038, 3061), True, 'import numpy as np\n'), ((2971, 2984), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (2977, 2984), True, 'import numpy as np\n'), ((2992, 3023), 'numpy.float', 'np.float', (['(self.beta * 10 ** -10)'], {}), '(self.beta * 10 ** -10)\n', (3000, 3023), True, 'import numpy as np\n'), ((2957, 2970), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (2963, 2970), True, 'import numpy as np\n'), ((3903, 3934), 'numpy.float', 'np.float', (['(self.beta * 10 ** -10)'], {}), '(self.beta * 10 ** -10)\n', (3911, 3934), True, 'import numpy as np\n'), ((3820, 3851), 'numpy.float', 'np.float', (['(self.beta * 10 ** -10)'], {}), '(self.beta * 10 ** -10)\n', (3828, 3851), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
def decision_regions(x, y, classifier, test_idx=None, resolution=0.02, plot_support=False, plot_custom_support=False):
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min, x1_max = x[:, 0].min() - 1, x[:, 0].max() + 1
x2_min, x2_max = x[:, 1].min() - 1, x[:, 1].max() + 1
xx1, xx2 = np.meshgrid(
np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution)
)
#z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
xy = np.array([xx1.ravel(), xx2.ravel()]).T
z = classifier.decision_function(xy) #.reshape(x.shape)
z = z.reshape(xx1.shape)
plt.contour(xx1, xx2, z, alpha=0.3, cmap=cmap, levels=[-1, 0, 1], linestyles=['--', '-', '--'])
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, c1 in enumerate(np.unique(y)):
plt.scatter(
x=x[y == c1, 0], y=x[y == c1, 1],
alpha=0.8, c=colors[idx],
marker=markers[idx], label=c1,
edgecolors='none'
)
if test_idx:
x_test, y_test = x[test_idx,:], y[test_idx]
plt.scatter(
x_test[:,0],
x_test[:,1],
c='none',
edgecolors='green',
alpha=1.0, linewidth=1,
marker='o',
s=250,
label='test set')
if plot_support:
plt.scatter(
classifier.support_vectors_[:, 0],
classifier.support_vectors_[:, 1],
marker='o',
s=100,
c='none',
alpha=1.0,
linewidth=1,
edgecolors='purple',
#facecolors='none',
label='support set'
)
if plot_custom_support:
preds = classifier.decision_function(x)
support_vectors = np.where(abs(preds) <= 1, 1, 0)
#print(support_vectors)
support_vector_idxs = np.where(support_vectors == 1)[0]
#print(support_vector_idxs)
x_support = x[support_vector_idxs, :]
plt.scatter(
x_support[:, 0],
x_support[:, 1],
marker='o',
s=200,
c='none',
alpha=1.0,
linewidth=1,
edgecolors='orange',
facecolors='none',
label='custom support set'
) | [
"numpy.unique",
"numpy.where",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.scatter",
"numpy.arange"
] | [((848, 947), 'matplotlib.pyplot.contour', 'plt.contour', (['xx1', 'xx2', 'z'], {'alpha': '(0.3)', 'cmap': 'cmap', 'levels': '[-1, 0, 1]', 'linestyles': "['--', '-', '--']"}), "(xx1, xx2, z, alpha=0.3, cmap=cmap, levels=[-1, 0, 1],\n linestyles=['--', '-', '--'])\n", (859, 947), True, 'import matplotlib.pyplot as plt\n'), ((524, 561), 'numpy.arange', 'np.arange', (['x1_min', 'x1_max', 'resolution'], {}), '(x1_min, x1_max, resolution)\n', (533, 561), True, 'import numpy as np\n'), ((571, 608), 'numpy.arange', 'np.arange', (['x2_min', 'x2_max', 'resolution'], {}), '(x2_min, x2_max, resolution)\n', (580, 608), True, 'import numpy as np\n'), ((1044, 1056), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1053, 1056), True, 'import numpy as np\n'), ((1067, 1192), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'x[y == c1, 0]', 'y': 'x[y == c1, 1]', 'alpha': '(0.8)', 'c': 'colors[idx]', 'marker': 'markers[idx]', 'label': 'c1', 'edgecolors': '"""none"""'}), "(x=x[y == c1, 0], y=x[y == c1, 1], alpha=0.8, c=colors[idx],\n marker=markers[idx], label=c1, edgecolors='none')\n", (1078, 1192), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1471), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_test[:, 0]', 'x_test[:, 1]'], {'c': '"""none"""', 'edgecolors': '"""green"""', 'alpha': '(1.0)', 'linewidth': '(1)', 'marker': '"""o"""', 's': '(250)', 'label': '"""test set"""'}), "(x_test[:, 0], x_test[:, 1], c='none', edgecolors='green', alpha\n =1.0, linewidth=1, marker='o', s=250, label='test set')\n", (1347, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1784), 'matplotlib.pyplot.scatter', 'plt.scatter', (['classifier.support_vectors_[:, 0]', 'classifier.support_vectors_[:, 1]'], {'marker': '"""o"""', 's': '(100)', 'c': '"""none"""', 'alpha': '(1.0)', 'linewidth': '(1)', 'edgecolors': '"""purple"""', 'label': '"""support set"""'}), "(classifier.support_vectors_[:, 0], classifier.support_vectors_[\n :, 1], marker='o', s=100, c='none', alpha=1.0, linewidth=1, edgecolors=\n 'purple', label='support set')\n", (1609, 1784), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2425), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_support[:, 0]', 'x_support[:, 1]'], {'marker': '"""o"""', 's': '(200)', 'c': '"""none"""', 'alpha': '(1.0)', 'linewidth': '(1)', 'edgecolors': '"""orange"""', 'facecolors': '"""none"""', 'label': '"""custom support set"""'}), "(x_support[:, 0], x_support[:, 1], marker='o', s=200, c='none',\n alpha=1.0, linewidth=1, edgecolors='orange', facecolors='none', label=\n 'custom support set')\n", (2261, 2425), True, 'import matplotlib.pyplot as plt\n'), ((2125, 2155), 'numpy.where', 'np.where', (['(support_vectors == 1)'], {}), '(support_vectors == 1)\n', (2133, 2155), True, 'import numpy as np\n'), ((354, 366), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (363, 366), True, 'import numpy as np\n')] |
import math
from typing import Union
import cv2 as cv
import numpy as np
import ball_net as bn
import sys
count = 0
R = 60
EPS = 1e-6
EPS2 = 0.5
STATUS_INIT = 0
STATUS_STATIC = 1
STATUS_DIRECTED = 2
def pt_dist(x1, y1, x2, y2):
dx = x1 - x2
dy = y1 - y2
return math.sqrt(dx * dx + dy * dy)
class Blob:
count = 1
def __init__(self, x, y, radius, age):
self.id = Blob.count
Blob.count += 1
self.points = [[x, y]]
self.pp = [[radius, age]]
self.status = STATUS_INIT
self.v = None
self.age = age
self.nx = None
self.ny = None
def fit(self, x, y):
# get the distance from the last added point (x and y) to another Point x and y
d = pt_dist(self.points[-1][0], self.points[-1][1], x, y)
return d < R, d
def add(self, x, y, r, a):
self.points.append([x, y])
self.pp.append([r, a])
self.age = a
if len(self.points) > 2:
# if self.status == STATUS_DIRECTED and self.nx is not None:
# print("Predict", self.nx, self.ny, "vs", x, y)
dx1 = self.points[-2][0] - self.points[-3][0]
dy1 = self.points[-2][1] - self.points[-3][1]
dx2 = x - self.points[-2][0]
dy2 = y - self.points[-2][1]
d1 = pt_dist(self.points[-2][0], self.points[-2][1], x, y)
d2 = pt_dist(self.points[-2][0], self.points[-2][1], self.points[-3][0], self.points[-3][1])
if dx1 * dx2 > 0 and dy1 * dy2 > 0 and d1 > 5 and d2 > 5:
self.status = STATUS_DIRECTED
# print("Directed", self.pts)
# self.predict()
elif self.status != STATUS_DIRECTED:
self.status = STATUS_STATIC
def predict(self):
npts = np.array(self.points)
l = len(self.points) + 1
idx = np.array(range(1, l))
kx = np.polyfit(idx, npts[:, 0], 1)
fkx = np.poly1d(kx)
ky = np.polyfit(idx, npts[:, 1], 1)
fky = np.poly1d(ky)
self.nx = fkx(l)
self.ny = fky(l)
return self.nx, self.ny
Blobs = []
ball_blob: Union[Blob, None] = None
prev_ball_blob: Union[Blob, None] = None
def get_ball_blob():
return ball_blob
def find_closest_existing_blob(center_x, center_y):
global Blobs, count
rbp = []
sbp = []
for blob in Blobs:
# its fitting if the distance is below 60 (why 60?)
fit, distance = blob.fit(center_x, center_y)
if fit:
# new blob is not longer than 4 blobs away
if count - blob.age < 4:
rbp.append([blob, distance])
elif blob.status == STATUS_STATIC:
sbp.append([blob, distance])
if len(sbp) + len(rbp) == 0:
return None
if len(rbp) > 0:
# sort by distance
rbp.sort(key=lambda e: e[1])
# return blob with the lowest distance
return rbp[0][0]
else:
# sort by distance
sbp.sort(key=lambda e: e[1])
return sbp[0][0]
def handle_blob(center_x, center_y, radius):
global Blobs, count, ball_blob
blob = find_closest_existing_blob(center_x, center_y)
if blob is None:
Blobs.append(Blob(center_x, center_y, radius, count))
return
blob.add(center_x, center_y, radius, count)
if blob.status == STATUS_DIRECTED:
if not ball_blob:
ball_blob = blob
# if the current blob has more data its the new ball blob
elif len(blob.points) > len(ball_blob.points):
ball_blob = blob
def begin_gen():
global ball_blob, prev_ball_blob
prev_ball_blob = ball_blob
ball_blob = None
def end_gen():
global count, ball_blob
count += 1
def handle_blobs(mask, frame):
contours, _ = cv.findContours(mask, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)
begin_gen()
for contour in contours:
rectangle_origin_x, rectangle_origin_y, rectangle_width, rectangle_height = cv.boundingRect(contour)
rectangle_shorter_side = min(rectangle_width, rectangle_height)
rectangle_longer_side = max(rectangle_width, rectangle_height)
rectangle_ratio = rectangle_longer_side / rectangle_shorter_side
# if sides are too large or bounding rectangle of contour is not close to a 1:1 ratio, like for a ball
# then try the next one
if rectangle_shorter_side < 10 or rectangle_longer_side > 40 or rectangle_ratio > 1.5:
continue
# cut the rectangle that is bounding the blob out of the mask
cut_blob_from_mask = mask[
rectangle_origin_y: rectangle_origin_y + rectangle_height,
rectangle_origin_x: rectangle_origin_x + rectangle_width]
# cv.imshow("Cut-Blob", cut_blob)
is_blob, amount_of_non_zeroes = check_blob(cut_blob_from_mask, rectangle_width, rectangle_height)
cv.imshow('Mask', mask)
if not is_blob:
continue
probability_non_zeroes = amount_of_non_zeroes / (rectangle_width * rectangle_height)
# at least half of the pixels should be non-zeroes
if probability_non_zeroes < 0.5:
continue
# cut the bounding rectangle from the frame
cut_frame = frame[
rectangle_origin_y: rectangle_origin_y + rectangle_height,
rectangle_origin_x: rectangle_origin_x + rectangle_width]
# cv.imshow("Cut-Frame", cut_frame)
# why is this done here, whats the benefit?
# so only the real detected blob is there not the noise from cutting?
cut_c = cv.bitwise_and(cut_frame, cut_frame, mask=cut_blob_from_mask)
# cv.imshow("Cut-C", cut_c)
# is the blob a ball? Decided by the NN
if bn.check_pic(cut_c) != 0:
continue
# get data (coordinates) for the enclosing circle of the detected ball
((x, y), radius) = cv.minEnclosingCircle(contour)
# find out if the blob is directed with a previous blob and also add it to blob list
handle_blob(int(x), int(y), int(radius))
end_gen()
def check_blob(blob, width, height):
# x and y are always 0
origin_x = 0
origin_y = 0
dx = int(width / 5)
x0 = origin_x + 2 * dx
vertical_part_of_blob = blob[origin_y: origin_y + height, x0: x0 + dx]
dy = int(height / 5)
y0 = origin_y + 2 * dy
# this cuts the 3rd fifth part (horizontal) out from the blob
horizontal_part_of_blob = blob[y0: y0 + dy, origin_x: origin_x + width]
non_zeroes_in_horizontal_strip = cv.countNonZero(horizontal_part_of_blob)
non_zeroes_in_vertical_strip = cv.countNonZero(vertical_part_of_blob)
non_zeroes_in_blob = cv.countNonZero(blob)
lower_count_of_non_zeroes = min(non_zeroes_in_horizontal_strip, non_zeroes_in_vertical_strip)
upper_count_of_non_zeroes = max(non_zeroes_in_horizontal_strip, non_zeroes_in_vertical_strip)
if lower_count_of_non_zeroes > 0:
ratio_of_non_zeroes_in_both_strips = upper_count_of_non_zeroes / lower_count_of_non_zeroes
else:
ratio_of_non_zeroes_in_both_strips = 1000
ratio_of_non_zeroes_for_horizontal_and_blob = non_zeroes_in_horizontal_strip / non_zeroes_in_blob
ratio_of_non_zeroes_for_vertical_and_blob = non_zeroes_in_vertical_strip / non_zeroes_in_blob
# what are these ratios? why 1.5, 0.15
return \
ratio_of_non_zeroes_in_both_strips < 1.5 and \
ratio_of_non_zeroes_for_horizontal_and_blob > 0.15 and \
ratio_of_non_zeroes_for_vertical_and_blob > 0.15, non_zeroes_in_blob
def draw_ball(pic):
ball = get_ball_blob()
if ball is not None:
cv.circle(pic, (ball.points[-1][0], ball.points[-1][1]), 10, (0, 200, 0), 3)
else:
if prev_ball_blob is not None:
x, y = prev_ball_blob.predict()
cv.circle(pic, (int(x), int(y)), 10, (0, 200, 0), 3)
found_points = []
def draw_ball_path(pic):
ball = get_ball_blob()
# try detection with vectors and their direction (so 4 points)
if ball is not None:
# points_iterator = iter(ball.points)
sub_points_size = 4
points = ball.points
for index, point_to_draw in enumerate(points):
# point_to_draw = ball.points[index]
next_four_points = ball.points[index:index+sub_points_size]
# print(f'current point: {point_to_draw}')
# print(f'next four points {next_four_points}')
# next_two_points = list(itertools.islice(points_iterator, 2))
if len(next_four_points) == 4:
intersection = get_intersect(next_four_points[0], next_four_points[1], next_four_points[2], next_four_points[3])
y_coordinates = map(lambda point: point[1], next_four_points)
intersection_y = intersection[1]
if (intersection_y < float('inf')) and all(i <= intersection_y for i in y_coordinates):
print(f'lowest point found: {intersection}')
cv.circle(pic, (intersection[0], intersection_y), 3, (0, 0, 255), -1)
cv.circle(pic, (point_to_draw[0], point_to_draw[1]), 3, (150, 150, 150), -1)
def get_intersect(a1, a2, b1, b2):
"""
Returns the point of intersection of the lines passing through a2,a1 and b2,b1.
a1: [x, y] a point on the first line
a2: [x, y] another point on the first line
b1: [x, y] a point on the second line
b2: [x, y] another point on the second line
"""
s = np.vstack([a1,a2,b1,b2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2) # point of intersection
if z == 0: # lines are parallel
return (float('inf'), float('inf'))
return (int(x/z), int(y/z))
def draw_blobs(w, h):
pic = np.zeros((h, w, 3), np.uint8)
for b in Blobs:
clr = (200, 200, 200)
if b.status == STATUS_STATIC:
clr = (0, 200, 0)
elif b.status == STATUS_DIRECTED:
clr = (200, 0, 0)
if not b.v is None:
cv.line(pic, (b.points[0][0], b.points[0][1]), (b.points[-1][0], b.points[-1][1]), (255, 0, 0), 1)
for p in b.points:
cv.circle(pic, (p[0], p[1]), 3, clr, -1)
draw_ball(pic)
return pic
def test_clip(path):
vs = cv.VideoCapture(path)
backSub = cv.createBackgroundSubtractorMOG2()
n = 0
while (True):
ret, frame = vs.read()
if not ret or frame is None:
break
h = int(frame.shape[0] / 2)
w = int(frame.shape[1] / 2)
frame = cv.resize(frame, (w, h))
mask = backSub.apply(frame)
mask = cv.dilate(mask, None)
mask = cv.GaussianBlur(mask, (15, 15), 0)
ret, mask = cv.threshold(mask, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
handle_blobs(mask, frame)
pic = draw_blobs(w, h)
cv.imshow('frame', pic)
cv.imwrite("frames/frame-{:03d}.jpg".format(n), pic)
if cv.waitKey(10) == 27:
break
n += 1
if __name__ == "__main__":
test_clip(sys.argv[1])
# test_clip("D:/Videos/aus4.avi")
| [
"cv2.createBackgroundSubtractorMOG2",
"numpy.polyfit",
"math.sqrt",
"cv2.imshow",
"numpy.array",
"numpy.poly1d",
"numpy.cross",
"cv2.threshold",
"cv2.line",
"numpy.vstack",
"cv2.waitKey",
"ball_net.check_pic",
"numpy.ones",
"cv2.minEnclosingCircle",
"cv2.circle",
"cv2.resize",
"cv2.G... | [((279, 307), 'math.sqrt', 'math.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (288, 307), False, 'import math\n'), ((3817, 3877), 'cv2.findContours', 'cv.findContours', (['mask', 'cv.RETR_CCOMP', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)\n', (3832, 3877), True, 'import cv2 as cv\n'), ((6616, 6656), 'cv2.countNonZero', 'cv.countNonZero', (['horizontal_part_of_blob'], {}), '(horizontal_part_of_blob)\n', (6631, 6656), True, 'import cv2 as cv\n'), ((6692, 6730), 'cv2.countNonZero', 'cv.countNonZero', (['vertical_part_of_blob'], {}), '(vertical_part_of_blob)\n', (6707, 6730), True, 'import cv2 as cv\n'), ((6756, 6777), 'cv2.countNonZero', 'cv.countNonZero', (['blob'], {}), '(blob)\n', (6771, 6777), True, 'import cv2 as cv\n'), ((9549, 9576), 'numpy.vstack', 'np.vstack', (['[a1, a2, b1, b2]'], {}), '([a1, a2, b1, b2])\n', (9558, 9576), True, 'import numpy as np\n'), ((9666, 9686), 'numpy.cross', 'np.cross', (['h[0]', 'h[1]'], {}), '(h[0], h[1])\n', (9674, 9686), True, 'import numpy as np\n'), ((9723, 9743), 'numpy.cross', 'np.cross', (['h[2]', 'h[3]'], {}), '(h[2], h[3])\n', (9731, 9743), True, 'import numpy as np\n'), ((9786, 9802), 'numpy.cross', 'np.cross', (['l1', 'l2'], {}), '(l1, l2)\n', (9794, 9802), True, 'import numpy as np\n'), ((10007, 10036), 'numpy.zeros', 'np.zeros', (['(h, w, 3)', 'np.uint8'], {}), '((h, w, 3), np.uint8)\n', (10015, 10036), True, 'import numpy as np\n'), ((10522, 10543), 'cv2.VideoCapture', 'cv.VideoCapture', (['path'], {}), '(path)\n', (10537, 10543), True, 'import cv2 as cv\n'), ((10558, 10593), 'cv2.createBackgroundSubtractorMOG2', 'cv.createBackgroundSubtractorMOG2', ([], {}), '()\n', (10591, 10593), True, 'import cv2 as cv\n'), ((1818, 1839), 'numpy.array', 'np.array', (['self.points'], {}), '(self.points)\n', (1826, 1839), True, 'import numpy as np\n'), ((1923, 1953), 'numpy.polyfit', 'np.polyfit', (['idx', 'npts[:, 0]', '(1)'], {}), '(idx, npts[:, 0], 1)\n', (1933, 1953), True, 'import numpy as np\n'), ((1968, 1981), 'numpy.poly1d', 'np.poly1d', (['kx'], {}), '(kx)\n', (1977, 1981), True, 'import numpy as np\n'), ((1996, 2026), 'numpy.polyfit', 'np.polyfit', (['idx', 'npts[:, 1]', '(1)'], {}), '(idx, npts[:, 1], 1)\n', (2006, 2026), True, 'import numpy as np\n'), ((2041, 2054), 'numpy.poly1d', 'np.poly1d', (['ky'], {}), '(ky)\n', (2050, 2054), True, 'import numpy as np\n'), ((4008, 4032), 'cv2.boundingRect', 'cv.boundingRect', (['contour'], {}), '(contour)\n', (4023, 4032), True, 'import cv2 as cv\n'), ((4946, 4969), 'cv2.imshow', 'cv.imshow', (['"""Mask"""', 'mask'], {}), "('Mask', mask)\n", (4955, 4969), True, 'import cv2 as cv\n'), ((5657, 5718), 'cv2.bitwise_and', 'cv.bitwise_and', (['cut_frame', 'cut_frame'], {'mask': 'cut_blob_from_mask'}), '(cut_frame, cut_frame, mask=cut_blob_from_mask)\n', (5671, 5718), True, 'import cv2 as cv\n'), ((5968, 5998), 'cv2.minEnclosingCircle', 'cv.minEnclosingCircle', (['contour'], {}), '(contour)\n', (5989, 5998), True, 'import cv2 as cv\n'), ((7710, 7786), 'cv2.circle', 'cv.circle', (['pic', '(ball.points[-1][0], ball.points[-1][1])', '(10)', '(0, 200, 0)', '(3)'], {}), '(pic, (ball.points[-1][0], ball.points[-1][1]), 10, (0, 200, 0), 3)\n', (7719, 7786), True, 'import cv2 as cv\n'), ((10798, 10822), 'cv2.resize', 'cv.resize', (['frame', '(w, h)'], {}), '(frame, (w, h))\n', (10807, 10822), True, 'import cv2 as cv\n'), ((10875, 10896), 'cv2.dilate', 'cv.dilate', (['mask', 'None'], {}), '(mask, None)\n', (10884, 10896), True, 'import cv2 as cv\n'), ((10912, 10946), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['mask', '(15, 15)', '(0)'], {}), '(mask, (15, 15), 0)\n', (10927, 10946), True, 'import cv2 as cv\n'), ((10967, 11028), 'cv2.threshold', 'cv.threshold', (['mask', '(0)', '(255)', '(cv.THRESH_BINARY | cv.THRESH_OTSU)'], {}), '(mask, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n', (10979, 11028), True, 'import cv2 as cv\n'), ((11103, 11126), 'cv2.imshow', 'cv.imshow', (['"""frame"""', 'pic'], {}), "('frame', pic)\n", (11112, 11126), True, 'import cv2 as cv\n'), ((5815, 5834), 'ball_net.check_pic', 'bn.check_pic', (['cut_c'], {}), '(cut_c)\n', (5827, 5834), True, 'import ball_net as bn\n'), ((9149, 9225), 'cv2.circle', 'cv.circle', (['pic', '(point_to_draw[0], point_to_draw[1])', '(3)', '(150, 150, 150)', '(-1)'], {}), '(pic, (point_to_draw[0], point_to_draw[1]), 3, (150, 150, 150), -1)\n', (9158, 9225), True, 'import cv2 as cv\n'), ((9619, 9634), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (9626, 9634), True, 'import numpy as np\n'), ((10413, 10453), 'cv2.circle', 'cv.circle', (['pic', '(p[0], p[1])', '(3)', 'clr', '(-1)'], {}), '(pic, (p[0], p[1]), 3, clr, -1)\n', (10422, 10453), True, 'import cv2 as cv\n'), ((11199, 11213), 'cv2.waitKey', 'cv.waitKey', (['(10)'], {}), '(10)\n', (11209, 11213), True, 'import cv2 as cv\n'), ((9067, 9136), 'cv2.circle', 'cv.circle', (['pic', '(intersection[0], intersection_y)', '(3)', '(0, 0, 255)', '(-1)'], {}), '(pic, (intersection[0], intersection_y), 3, (0, 0, 255), -1)\n', (9076, 9136), True, 'import cv2 as cv\n'), ((10275, 10378), 'cv2.line', 'cv.line', (['pic', '(b.points[0][0], b.points[0][1])', '(b.points[-1][0], b.points[-1][1])', '(255, 0, 0)', '(1)'], {}), '(pic, (b.points[0][0], b.points[0][1]), (b.points[-1][0], b.points[-\n 1][1]), (255, 0, 0), 1)\n', (10282, 10378), True, 'import cv2 as cv\n')] |
#
# full_analysis_scoring_for_new_dataset.py
#
# Run trained models on a new data set for which spectrograms have already
# been generated.
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
#%% Imports
import pandas as pd
import numpy as np
import glob
import os
import cv2
from keras.models import model_from_json
#%% Path configuration
current_dir = "./Whale_Acoustics/"
model_dir = current_dir + "Model/"
data_dir = current_dir + "Data/"
spectrogram_dir = data_dir + "Extracted_Spectrogram_Full_Analysis/"
output_dir = current_dir + "Output/"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#%% Enumerate spectrograms to score
spectrogram_filenames = glob.glob(spectrogram_dir + '/*.png')
print("Total number of Spectrograms: ", len(spectrogram_filenames))
#%% Load models
with open(model_dir + 'cnn_architecture_all_data.json', 'r') as f:
model_cnn = model_from_json(f.read())
model_cnn.load_weights(model_dir + 'cnn_weights_all_data.h5')
with open(model_dir + 'vgg16_architecture_all_data.json', 'r') as f:
model_vgg16 = model_from_json(f.read())
model_vgg16.load_weights(model_dir + 'vgg16_weights_all_data.h5')
with open(model_dir + 'ResNet50_architecture_all_data.json', 'r') as f:
model_ResNet50 = model_from_json(f.read())
model_ResNet50.load_weights(model_dir + 'ResNet50_weights_all_data.h5')
with open(model_dir + 'DenseNet121_architecture_all_data.json', 'r') as f:
model_DenseNet121 = model_from_json(f.read())
model_DenseNet121.load_weights(model_dir + 'DenseNet121_weights_all_data.h5')
#%% Run models on spectrograms
ncol, nrow = 300, 300
full_analysis_score = pd.DataFrame()
full_analysis_score['spectrogram_filename'] = spectrogram_filenames
full_analysis_score['audio_filename'] = ''
full_analysis_score['spectrogram_start_second'] = ''
full_analysis_score['predicted_probability'] = 0.0
opt_weights = pd.read_excel(output_dir + 'opt_weights.xlsx', header = None)[0].values.tolist()
for index, row in full_analysis_score.iterrows():
if (index % 10000 == 0):
print(index)
audio_filename, spectrogram_start_second = row['spectrogram_filename'].split('\\')[1].split('_')[0:2]
img = cv2.imread(row['spectrogram_filename'])
img = cv2.resize(img, (ncol, nrow))
img_reshaped = []
img_reshaped.append(img)
predict_prob_cnn = model_cnn.predict(np.asarray(img_reshaped) / 255.0).tolist()[0][0]
predict_prob_vgg16 = model_vgg16.predict(np.asarray(img_reshaped) / 255.0).tolist()[0][0]
predict_prob_ResNet50 = model_ResNet50.predict(np.asarray(img_reshaped) / 255.0).tolist()[0][0]
predict_prob_DenseNet121 = model_DenseNet121.predict(np.asarray(img_reshaped) / 255.0).tolist()[0][0]
## the opmized weight for each model was computed in previous step
predicted_probability = sum([x*y for x,y in zip([predict_prob_cnn, predict_prob_vgg16, predict_prob_ResNet50, predict_prob_DenseNet121], opt_weights)])
full_analysis_score.at(index, 'audio_filename', audio_filename)
full_analysis_score.at(index, 'spectrogram_start_second', spectrogram_start_second)
full_analysis_score.at(index, 'predicted_probability', predicted_probability)
full_analysis_score.to_excel(output_dir + 'full_analysis_ouptut_predicted_scores.xlsx', index=False)
| [
"os.path.exists",
"os.makedirs",
"numpy.asarray",
"pandas.read_excel",
"pandas.DataFrame",
"cv2.resize",
"cv2.imread",
"glob.glob"
] | [((732, 769), 'glob.glob', 'glob.glob', (["(spectrogram_dir + '/*.png')"], {}), "(spectrogram_dir + '/*.png')\n", (741, 769), False, 'import glob\n'), ((1683, 1697), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1695, 1697), True, 'import pandas as pd\n'), ((609, 635), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (623, 635), False, 'import os\n'), ((641, 664), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (652, 664), False, 'import os\n'), ((2226, 2265), 'cv2.imread', 'cv2.imread', (["row['spectrogram_filename']"], {}), "(row['spectrogram_filename'])\n", (2236, 2265), False, 'import cv2\n'), ((2276, 2305), 'cv2.resize', 'cv2.resize', (['img', '(ncol, nrow)'], {}), '(img, (ncol, nrow))\n', (2286, 2305), False, 'import cv2\n'), ((1928, 1987), 'pandas.read_excel', 'pd.read_excel', (["(output_dir + 'opt_weights.xlsx')"], {'header': 'None'}), "(output_dir + 'opt_weights.xlsx', header=None)\n", (1941, 1987), True, 'import pandas as pd\n'), ((2398, 2422), 'numpy.asarray', 'np.asarray', (['img_reshaped'], {}), '(img_reshaped)\n', (2408, 2422), True, 'import numpy as np\n'), ((2492, 2516), 'numpy.asarray', 'np.asarray', (['img_reshaped'], {}), '(img_reshaped)\n', (2502, 2516), True, 'import numpy as np\n'), ((2592, 2616), 'numpy.asarray', 'np.asarray', (['img_reshaped'], {}), '(img_reshaped)\n', (2602, 2616), True, 'import numpy as np\n'), ((2698, 2722), 'numpy.asarray', 'np.asarray', (['img_reshaped'], {}), '(img_reshaped)\n', (2708, 2722), True, 'import numpy as np\n')] |
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import run_utils as utils
import numpy as np
import argparse
from dpbench_datagen.gaussian_elim import gen_matrix, gen_vec, gen_data_to_file
def run(name, sizes=1, step=2, nopt=2 ** 2):
parser = argparse.ArgumentParser()
parser.add_argument("--steps", type=int, default=sizes, help="Number of steps")
parser.add_argument("--step", type=int, default=step, help="Factor for each step")
parser.add_argument("--size", type=int, default=nopt, help="Initial data size")
parser.add_argument(
"--repeat", type=int, default=1, help="Iterations inside measured region"
)
parser.add_argument(
"--test",
required=False,
action="store_true",
help="Check for correctness by comparing output with naieve Python version",
)
parser.add_argument(
"--usm",
required=False,
action="store_true",
help="Use USM Shared or data transfer",
)
parser.add_argument(
"--json",
required=False,
default=__file__.replace("py", "json"),
help="output json data filename",
)
args = parser.parse_args()
nopt = args.size
repeat = args.repeat
clean_string = ["make", "clean"]
utils.run_command(clean_string, verbose=True)
if args.usm:
build_string = ["make", "comp"]
utils.run_command(build_string, verbose=True)
exec_name = "./gaussian_comp"
else:
build_string = ["make"]
utils.run_command(build_string, verbose=True)
exec_name = "./gaussian"
if args.test:
reference_result = [5.02e-02, 5.00e-04, 5.00e-04, 5.02e-02]
ref_size = 4
# run dpcpp
gen_data_to_file(ref_size, 1.0)
# run the C program
run_cmd = [exec_name, str(ref_size), str(1), "-t"]
utils.run_command(run_cmd, verbose=True)
# read output of dpcpp
result = np.fromfile("result.bin", np.float32)
if np.allclose(result, reference_result):
print(
"Test succeeded. Python result: ",
reference_result,
" DPC++ result: ",
result,
"\n",
)
else:
print(
"Test failed. Python result: ",
reference_result,
" DPC++ result: ",
result,
"\n",
)
return
for _ in range(sizes):
# generate input data
# value = 1.0 for the vector of coefficients (b)
gen_data_to_file(nopt, 1.0)
# run the C program
run_cmd = [exec_name, str(nopt), str(repeat)]
utils.run_command(run_cmd, verbose=True)
nopt *= step
repeat -= step
if repeat < 1:
repeat = 1
if os.path.isfile("./gaussian"):
os.remove("./gaussian")
if os.path.isfile("./gaussian_comp"):
os.remove("./gaussian_comp")
if os.path.isfile("m_data.bin"):
os.remove("m_data.bin")
if os.path.isfile("v_data.bin"):
os.remove("m_data.bin")
if __name__ == "__main__":
run("Gaussian elimination dpcpp")
| [
"dpbench_datagen.gaussian_elim.gen_data_to_file",
"run_utils.run_command",
"numpy.fromfile",
"numpy.allclose",
"argparse.ArgumentParser",
"os.path.isfile",
"os.remove"
] | [((289, 314), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (312, 314), False, 'import argparse\n'), ((1302, 1347), 'run_utils.run_command', 'utils.run_command', (['clean_string'], {'verbose': '(True)'}), '(clean_string, verbose=True)\n', (1319, 1347), True, 'import run_utils as utils\n'), ((2875, 2903), 'os.path.isfile', 'os.path.isfile', (['"""./gaussian"""'], {}), "('./gaussian')\n", (2889, 2903), False, 'import os\n'), ((2945, 2978), 'os.path.isfile', 'os.path.isfile', (['"""./gaussian_comp"""'], {}), "('./gaussian_comp')\n", (2959, 2978), False, 'import os\n'), ((3025, 3053), 'os.path.isfile', 'os.path.isfile', (['"""m_data.bin"""'], {}), "('m_data.bin')\n", (3039, 3053), False, 'import os\n'), ((3095, 3123), 'os.path.isfile', 'os.path.isfile', (['"""v_data.bin"""'], {}), "('v_data.bin')\n", (3109, 3123), False, 'import os\n'), ((1414, 1459), 'run_utils.run_command', 'utils.run_command', (['build_string'], {'verbose': '(True)'}), '(build_string, verbose=True)\n', (1431, 1459), True, 'import run_utils as utils\n'), ((1548, 1593), 'run_utils.run_command', 'utils.run_command', (['build_string'], {'verbose': '(True)'}), '(build_string, verbose=True)\n', (1565, 1593), True, 'import run_utils as utils\n'), ((1764, 1795), 'dpbench_datagen.gaussian_elim.gen_data_to_file', 'gen_data_to_file', (['ref_size', '(1.0)'], {}), '(ref_size, 1.0)\n', (1780, 1795), False, 'from dpbench_datagen.gaussian_elim import gen_matrix, gen_vec, gen_data_to_file\n'), ((1891, 1931), 'run_utils.run_command', 'utils.run_command', (['run_cmd'], {'verbose': '(True)'}), '(run_cmd, verbose=True)\n', (1908, 1931), True, 'import run_utils as utils\n'), ((1981, 2018), 'numpy.fromfile', 'np.fromfile', (['"""result.bin"""', 'np.float32'], {}), "('result.bin', np.float32)\n", (1992, 2018), True, 'import numpy as np\n'), ((2031, 2068), 'numpy.allclose', 'np.allclose', (['result', 'reference_result'], {}), '(result, reference_result)\n', (2042, 2068), True, 'import numpy as np\n'), ((2617, 2644), 'dpbench_datagen.gaussian_elim.gen_data_to_file', 'gen_data_to_file', (['nopt', '(1.0)'], {}), '(nopt, 1.0)\n', (2633, 2644), False, 'from dpbench_datagen.gaussian_elim import gen_matrix, gen_vec, gen_data_to_file\n'), ((2736, 2776), 'run_utils.run_command', 'utils.run_command', (['run_cmd'], {'verbose': '(True)'}), '(run_cmd, verbose=True)\n', (2753, 2776), True, 'import run_utils as utils\n'), ((2913, 2936), 'os.remove', 'os.remove', (['"""./gaussian"""'], {}), "('./gaussian')\n", (2922, 2936), False, 'import os\n'), ((2988, 3016), 'os.remove', 'os.remove', (['"""./gaussian_comp"""'], {}), "('./gaussian_comp')\n", (2997, 3016), False, 'import os\n'), ((3063, 3086), 'os.remove', 'os.remove', (['"""m_data.bin"""'], {}), "('m_data.bin')\n", (3072, 3086), False, 'import os\n'), ((3133, 3156), 'os.remove', 'os.remove', (['"""m_data.bin"""'], {}), "('m_data.bin')\n", (3142, 3156), False, 'import os\n')] |
import deb
import numpy as np
import pdb
class ModelInputMode():
pass
class MIMFixed(ModelInputMode):
def __init__(self):
pass
def batchTrainPreprocess(self, batch, data, label_date_id, batch_seq_len=12):
input_ = batch['in'].astype(np.float16)
input_ = data.addDoty(input_)
return input_
def batchValPreprocess(self, batch, data):
input_ = ['in'].astype(np.float16)
input_ = data.addDoty(input_)
return input_
def trainingInit(self,batch,data,t_len, model_t_len):
batch['train']['shape'] = (batch['train']['size'], model_t_len) + data.patches['train']['in'].shape[2:]
batch['val']['shape'] = (batch['val']['size'], model_t_len) + data.patches['val']['in'].shape[2:]
batch['test']['shape'] = (batch['test']['size'], model_t_len) + data.patches['test']['in'].shape[2:]
deb.prints(batch['train']['shape'])
data.ds.dotyReplicateSamples()
#data.labeled_dates = 12
## deb.prints(data.labeled_dates)
# min_seq_len = t_len - data.labeled_dates + 1 # 20 - 12 + 1 = 9
# deb.prints(min_seq_len)
return batch, data, None
def valLabelSelect(self, data, label_id = -1):
return data
class MIMFixedLabelSeq(MIMFixed):
def __init__(self):
pass
def batchTrainPreprocess(self, batch, data, label_date_id, batch_seq_len=12):
input_ = batch['train']['in'][:,:label_date_id].astype(np.float16)
input_ = data.addDoty(input_)
return input_
def batchValPreprocess(self, batch, data):
input_ = batch['val']['in'].astype(np.float16)
input_ = data.addDoty(input_)
return input_
class MIMVariable(ModelInputMode):
def trainingInit(self,batch,data,t_len, model_t_len):
batch['train']['shape'] = (batch['train']['size'], model_t_len) + data.patches['train']['in'].shape[2:]
batch['val']['shape'] = (batch['val']['size'], model_t_len) + data.patches['val']['in'].shape[2:]
batch['test']['shape'] = (batch['test']['size'], model_t_len) + data.patches['test']['in'].shape[2:]
deb.prints(batch['train']['shape'])
#data.labeled_dates = 12
deb.prints(data.labeled_dates)
min_seq_len = t_len - data.labeled_dates + 1 # 20 - 12 + 1 = 9
deb.prints(min_seq_len)
data.ds.setDotyFlag(True)
return batch, data, min_seq_len
def valLabelSelect(self, data, label_id = -1):
data.patches['val']['label'] = data.patches['val']['label'][:, label_id]
data.patches['test']['label'] = data.patches['test']['label'][:, label_id]
deb.prints(data.patches['val']['label'].shape)
deb.prints(data.patches['test']['label'].shape)
return data
class MIMVarLabel(MIMVariable):
def __init__(self):
self.batch_seq_len = 12
pass
def batchTrainPreprocess(self, batch, data, label_date_id, batch_seq_len=12):
#print("Label, seq start, seq end",label_date_id,label_date_id-batch_seq_len+1,label_date_id+1)
if label_date_id+1!=0:
input_ = batch['in'][:, label_date_id-batch_seq_len+1:label_date_id+1]
else:
input_ = batch['in'][:, label_date_id-batch_seq_len+1:]
#print("exception", input_.shape)
#print(input_.shape)
#print(label_date_id, batch_seq_len, label_date_id-batch_seq_len+1, label_date_id+1)
#pdb.set_trace()
input_ = input_.astype(np.float16)
input_ = data.addDoty(input_,
bounds = [label_date_id-batch_seq_len+1, label_date_id+1])
return input_
def batchMetricSplitPreprocess(self, batch, data, label_date_id, batch_seq_len=12):
return batchTrainPreprocess(batch, data, label_date_id, batch_seq_len)
class MIMFixedLabelAllLabels(MIMVarLabel):
def valLabelSelect(self, data, label_id = -1):
return data
class MIMVarLabel_PaddedSeq(MIMVarLabel):
def batchTrainPreprocess(self, batch, ds, label_date_id, batch_seq_len=None):
sample_n = batch['in'].shape[0]
#print("Label, seq start, seq end",label_date_id,label_date_id-batch_seq_len+1,label_date_id+1)
if label_date_id+1!=0:
if label_date_id in ds.padded_dates:
unpadded_input = batch['in'][:, :label_date_id+1]
len_input_seq = unpadded_input.shape[1]
#deb.prints(len_input_seq)
input_ = np.zeros(batch['shape']).astype(np.float16)
input_[:, -len_input_seq:] = unpadded_input
else:
#print(batch['in'].shape,label_date_id-self.batch_seq_len+1,label_date_id+1)
input_ = batch['in'][:, label_date_id-self.batch_seq_len+1:label_date_id+1]
##print(input_.shape)
else:
#print(batch['in'].shape,label_date_id-self.batch_seq_len+1,label_date_id+1)
input_ = batch['in'][:, label_date_id-self.batch_seq_len+1:]
##print(input_.shape)
#print("exception", input_.shape)
input_ = input_.astype(np.float16)
input_ = ds.addDotyPadded(input_,
bounds = [label_date_id-self.batch_seq_len+1, label_date_id+1],
seq_len = self.batch_seq_len,
sample_n = sample_n)
#print(len(input_), input_[0].shape, input_[1].shape)
return input_
def batchMetricSplitPreprocess(self, batch, data, split='val'):
input_ = batch[split]['in'][:,-12:].astype(np.float16)
input_ = data.addDoty(input_, bounds=[-12, None])
return input_
# to do: replace batchMetricSplitPreprocess by iteration of all 12 labels,
# including padded first input sequences.
def valLabelSelect(self, data, label_id = -1):
return data
class MIMVarSeqLabel(MIMVariable):
def __init__(self):
pass
def batchTrainPreprocess(self, batch, data, label_date_id, batch_seq_len, t_len):
# self.t_len is 20 as an example
##label_date_id = np.random.randint(-data.labeled_dates,0) # labels can be from -1 to -12
# example: last t_step can use entire sequence: 20 + (-1+1) = 20
# example: first t_step can use sequence: 20 + (-12+1) = 9
# to do: add sep17 image
max_seq_len = t_len + (label_date_id+1) # from 9 to 20
if min_seq_len == max_seq_len:
batch_seq_len = min_seq_len
else:
batch_seq_len = np.random.randint(min_seq_len,max_seq_len+1) # from 9 to 20 in the largest case
# example: -1-20+1:-1 = -20:-1
# example: -12-9+1:-12 = -20:-12
# example: -3-11+1:-3 = -13:-3
# example: -1-18+1:-1+1 = -18:0
##print("Batch slice",label_date_id-batch_seq_len+1,label_date_id+1)
##deb.prints(label_date_id+1!=0)
##deb.prints(label_date_id)
if label_date_id+1!=0:
batch['train']['in'] = batch['train']['in'][:, label_date_id-batch_seq_len+1:label_date_id+1]
else:
batch['train']['in'] = batch['train']['in'][:, label_date_id-batch_seq_len+1:]
#deb.prints(batch['train']['in'].shape[1])
#deb.prints(batch['train']['in'].shape[1] == batch_seq_len)
#deb.prints(batch_seq_len)
#deb.prints(label_date_id)
assert batch['train']['in'].shape[1] == batch_seq_len
input_ = np.zeros(batch['train']['shape']).astype(np.float16)
input_[:, -batch_seq_len:] = batch['train']['in']
input_ = data.addDoty(input_)
return input_
| [
"numpy.random.randint",
"numpy.zeros",
"deb.prints"
] | [((878, 913), 'deb.prints', 'deb.prints', (["batch['train']['shape']"], {}), "(batch['train']['shape'])\n", (888, 913), False, 'import deb\n'), ((2119, 2154), 'deb.prints', 'deb.prints', (["batch['train']['shape']"], {}), "(batch['train']['shape'])\n", (2129, 2154), False, 'import deb\n'), ((2196, 2226), 'deb.prints', 'deb.prints', (['data.labeled_dates'], {}), '(data.labeled_dates)\n', (2206, 2226), False, 'import deb\n'), ((2306, 2329), 'deb.prints', 'deb.prints', (['min_seq_len'], {}), '(min_seq_len)\n', (2316, 2329), False, 'import deb\n'), ((2636, 2682), 'deb.prints', 'deb.prints', (["data.patches['val']['label'].shape"], {}), "(data.patches['val']['label'].shape)\n", (2646, 2682), False, 'import deb\n'), ((2692, 2739), 'deb.prints', 'deb.prints', (["data.patches['test']['label'].shape"], {}), "(data.patches['test']['label'].shape)\n", (2702, 2739), False, 'import deb\n'), ((6485, 6532), 'numpy.random.randint', 'np.random.randint', (['min_seq_len', '(max_seq_len + 1)'], {}), '(min_seq_len, max_seq_len + 1)\n', (6502, 6532), True, 'import numpy as np\n'), ((7392, 7425), 'numpy.zeros', 'np.zeros', (["batch['train']['shape']"], {}), "(batch['train']['shape'])\n", (7400, 7425), True, 'import numpy as np\n'), ((4452, 4476), 'numpy.zeros', 'np.zeros', (["batch['shape']"], {}), "(batch['shape'])\n", (4460, 4476), True, 'import numpy as np\n')] |
"""
Spatial loop information extraction and integration.
Always note that the first level in spatial loops is MAC level (pure logic), not a memory level.
Thus the spatial loops always have one more level than temporal loops.
"""
import numpy as np
class SpatialLoop(object):
def __init__(self, spatial_loop, layer_loop_info):
self.spatial_loop = spatial_loop
Bu = {}
Ku = {}
Cu = {}
OYu = {}
OXu = {}
FYu = {}
FXu = {}
unroll_size = {}
unit_count = {}
unit_unique = {}
unit_duplicate = {}
loop_levels = {}
unit_serve_scope = {}
data_serve_scope = {}
real_bw_boost = {}
real_bw_boost_high = {}
real_bw_boost_low = {}
for operand in ['W', 'I', 'O']:
'''
Initialize the list size to the number of memory levels.
Bu/Ku/.../FXu: the spatial loop unrolling size for each index
at different mem level in W/I/O mem system.
'''
Bu[operand] = [1] * spatial_loop[operand].__len__()
Ku[operand] = [1] * spatial_loop[operand].__len__()
Cu[operand] = [1] * spatial_loop[operand].__len__()
OYu[operand] = [1] * spatial_loop[operand].__len__()
OXu[operand] = [1] * spatial_loop[operand].__len__()
FYu[operand] = [1] * spatial_loop[operand].__len__()
FXu[operand] = [1] * spatial_loop[operand].__len__()
'''
unroll_size: the individual spatial unrolling at each level.
unit_count: number of MAC/mem unit at certain level in W/I/O mem system,
i.e. total spatial unrolling at each level.
unit_unique: number of unique MAC/mem unit at certain level in W/I/O mem system,
i.e. MACs operate on different values / mem units that hold different values.
unit_duplicate: number of duplicated MAC/mem unit at certain level in W/I/O mem system,
i.e. MACs operate on same values / mem units that hold same values.
loop_levels: number of mem levels in W/I/O mem system + !!! the innermost MAC logic level !!! .
'''
unroll_size[operand] = [1] * spatial_loop[operand].__len__()
unit_count[operand] = [1] * spatial_loop[operand].__len__()
unit_unique[operand] = [1] * spatial_loop[operand].__len__()
unit_duplicate[operand] = [1] * spatial_loop[operand].__len__()
loop_levels[operand] = spatial_loop[operand].__len__()
'''
unit_serve_scope: one mem at current level serves how many unit at one level below.
data_serve_scope: one data element at current level serves how many unit at one level below.
(!! this parameter can be used as spatially data-sharing hint !!)
real_bw_boost: one mem at current level serves how many unit at one level below with different data.
'''
unit_serve_scope[operand] = [1] * (spatial_loop[operand].__len__() - 1)
data_serve_scope[operand] = [1] * (spatial_loop[operand].__len__() - 1)
real_bw_boost[operand] = [1] * (spatial_loop[operand].__len__() - 1)
real_bw_boost_high[operand] = [1] * (spatial_loop[operand].__len__() - 1)
real_bw_boost_low[operand] = [1] * (spatial_loop[operand].__len__() - 1)
for level, current_level_loops in enumerate(spatial_loop[operand]):
for loop in current_level_loops:
if loop[0] == 7:
Bu[operand][level] *= loop[1]
elif loop[0] == 6:
Ku[operand][level] *= loop[1]
elif loop[0] == 5:
Cu[operand][level] *= loop[1]
elif loop[0] == 4:
OYu[operand][level] *= loop[1]
elif loop[0] == 3:
OXu[operand][level] *= loop[1]
elif loop[0] == 2:
FYu[operand][level] *= loop[1]
elif loop[0] == 1:
FXu[operand][level] *= loop[1]
else:
raise IndexError('The loop index can only be values from "1" to "7".')
unroll_size[operand][level] *= loop[1]
'''
unit_count is calculated by multiplying all the spatial loop unrolling index values (unroll_size) for W/I/O
from current level to top level.
Using ().item() to change datatype from numpy int64 to python default int.
'''
for operand in ['W', 'I', 'O']:
for level in range(loop_levels[operand]):
unit_count[operand][level] = (np.prod(unroll_size[operand][level:loop_levels[operand]])).item()
'''
unit_unique is calculated by multiplying all the relevant spatial loop unrolling index values for W/I/O
from current level to top level.
buf_replicate = unit_count/unit_unique
Using // (Floor Division) here to get an integer result out from division.
'''
for level in range(loop_levels['W']):
unit_unique['W'][level] = (np.prod(Ku['W'][level:loop_levels['W']] +
Cu['W'][level:loop_levels['W']] +
FXu['W'][level:loop_levels['W']] +
FYu['W'][level:loop_levels['W']])).item()
unit_duplicate['W'][level] = unit_count['W'][level] / unit_unique['W'][level]
for level in range(loop_levels['I']):
# only when both FY and OY are spatially unrolled, IYu should be calculated by the 'advanced' equation:
# IY = SY * (OY - 1) + SFY * (FY - 1) + 1
# when only one of them is spatially unrolled, IYu should be calculated by the 'basic' equation:
# IY = OY + FY - 1
# For example: when stride on IY dimension = 2:
# OYu 4 & FYu 1 -> IY = OY + FY - 1 = 4 + 1 - 1 = 4
# we need IX 1, 3, 5, 7. (4 elements in total)
# OYu 4 & FYu 3 -> IY = SY * (OY - 1) + SFY * (FY - 1) + 1 = 2*(4-1)+1*(3-1)+1 = 9
# we need IX 1, 2, 3, 4, 5, 6, 7, 8, 9. (9 elements in total)
if OYu['I'][level] == 1 or FYu['I'][level] == 1:
IYu = (np.prod(OYu['I'][level:loop_levels['I']]) +
np.prod(FYu['I'][level:loop_levels['I']]) - 1).item()
else:
IYu = (layer_loop_info['SY'] * (np.prod(OYu['I'][level:loop_levels['I']]) - 1) +
layer_loop_info['SFY'] * (np.prod(FYu['I'][level:loop_levels['I']]) - 1) + 1).item()
if OXu['I'][level] == 1 or FXu['I'][level] == 1:
IXu = (np.prod(OXu['I'][level:loop_levels['I']]) +
np.prod(FXu['I'][level:loop_levels['I']]) - 1).item()
else:
IXu = (layer_loop_info['SX'] * (np.prod(OXu['I'][level:loop_levels['I']]) - 1) +
layer_loop_info['SFX'] * (np.prod(FXu['I'][level:loop_levels['I']]) - 1) + 1).item()
unit_unique['I'][level] = (np.prod(Bu['I'][level:loop_levels['I']] +
Cu['I'][level:loop_levels['I']] +
[IYu] + [IXu])).item()
unit_duplicate['I'][level] = unit_count['I'][level] / unit_unique['I'][level]
for level in range(loop_levels['O']):
unit_unique['O'][level] = (np.prod(Bu['O'][level:loop_levels['O']] +
Ku['O'][level:loop_levels['O']] +
OXu['O'][level:loop_levels['O']] +
OYu['O'][level:loop_levels['O']])).item()
unit_duplicate['O'][level] = unit_count['O'][level] / unit_unique['O'][level]
'''
unit_serve_scope is calculated by dividing unit_count at current level by unit_count at one level above.
data_serve_scope is calculated by dividing unit_duplicate at current level by unit_count at one level above.
real_bw_boost can calculated by either dividing unit_unique at current level by unit_count at one level above,
or by dividing unit_serve_scope by data_serve_scope element-wise.
Note that the number of level here equals to total memory level.
MAC level is excluded naturally here.
e.g. real_bw_boost = [ 1, 2, 3, 4],
real_bw_boost_high = [ 1, 2, 6, 24],
real_bw_boost_low = [24, 24, 12, 4]
'''
for operand in ['W', 'I', 'O']:
for level in range(int(loop_levels[operand] - 1)):
unit_serve_scope[operand][level] = unit_count[operand][level] / unit_count[operand][level + 1]
data_serve_scope[operand][level] = unit_duplicate[operand][level] / unit_duplicate[operand][level + 1]
real_bw_boost[operand][level] = unit_unique[operand][level] / unit_unique[operand][level + 1]
for operand in spatial_loop.keys():
for level in range(int(loop_levels[operand] - 1)):
real_bw_boost_high[operand][level] = (np.prod(real_bw_boost[operand][0:level + 1]))
real_bw_boost_low[operand][level] = (np.prod(real_bw_boost[operand][level:loop_levels[operand]]))
'''
Simply extract spatial unrolling loops in spatial_loop_list.
'''
spatial_loop_list = []
for loop_list in spatial_loop['W']:
if not loop_list:
continue
else:
for this_loop in loop_list:
spatial_loop_list.append(this_loop)
'''
Added for LOMA
'''
# Relevant loop type numbers for each operand
relevant_loop_type_numbers = {'W': [1,2,5,6], 'I': [5,7], 'O': [3,4,6,7]}
irrelevant_loop_type_numbers = {'W': [3,4,7], 'I': [], 'O': [1,2,5]}
## Extract the relevant/irrelevant loop unrolling for each operand
su_relevant_size_dict = {'W': [], 'I': [], 'O': []}
su_irrelevant_size_dict = {'W': [], 'I': [], 'O': []}
# WEIGHT and OUTPUT and INPUT relevant
for operand in ['W', 'O', 'I']:
for level in range(0, len(spatial_loop[operand])): # start at 0 = include MAC level
su_relevant_size = 1
su_irrelevant_size = 1
for [loop_type_number, su_factor] in spatial_loop[operand][level]:
if loop_type_number in relevant_loop_type_numbers[operand]:
su_relevant_size *= su_factor
elif loop_type_number in irrelevant_loop_type_numbers[operand]:
su_irrelevant_size *= su_factor
su_relevant_size_dict[operand].append(su_relevant_size)
su_irrelevant_size_dict[operand].append(su_irrelevant_size)
# INPUT partially relevant
su_pr_size_dict_input = {1: [], 2: [], 3: [], 4: []} # 1 = FX, 2 = FY, 3 = OX, 4 = OY
pr_loops = [1,2,3,4] # 1 = FX, 2 = FY, 3 = OX, 4 = OY
for level in range(0, len(spatial_loop['I'])):
su_pr_size = {1: 1, 2: 1, 3: 1, 4: 1}
for [loop_type_number, su_factor] in spatial_loop[operand][level]:
if loop_type_number in pr_loops:
su_pr_size[loop_type_number] *= su_factor
for key in pr_loops:
su_pr_size_dict_input[key].append(su_pr_size[key])
self.Bu = Bu
self.Ku = Ku
self.Cu = Cu
self.OYu = OYu
self.OXu = OXu
self.FYu = FYu
self.FXu = FXu
self.unroll_size = unroll_size
self.unit_count = unit_count
self.unit_unique = unit_unique
self.unit_duplicate = unit_duplicate
self.loop_levels = loop_levels
self.unit_serve_scope = unit_serve_scope
self.data_serve_scope = data_serve_scope
self.real_bw_boost = real_bw_boost
self.real_bw_boost_high = real_bw_boost_high
self.real_bw_boost_low = real_bw_boost_low
self.spatial_loop_list = spatial_loop_list
self.su_relevant_size_dict = su_relevant_size_dict
self.su_irrelevant_size_dict = su_irrelevant_size_dict
self.su_pr_size_dict_input = su_pr_size_dict_input
@classmethod
def extract_loop_info(cls, spatial_loop, layer_loop_info):
return cls(spatial_loop, layer_loop_info)
| [
"numpy.prod"
] | [((9412, 9456), 'numpy.prod', 'np.prod', (['real_bw_boost[operand][0:level + 1]'], {}), '(real_bw_boost[operand][0:level + 1])\n', (9419, 9456), True, 'import numpy as np\n'), ((9511, 9570), 'numpy.prod', 'np.prod', (['real_bw_boost[operand][level:loop_levels[operand]]'], {}), '(real_bw_boost[operand][level:loop_levels[operand]])\n', (9518, 9570), True, 'import numpy as np\n'), ((5304, 5452), 'numpy.prod', 'np.prod', (["(Ku['W'][level:loop_levels['W']] + Cu['W'][level:loop_levels['W']] + FXu[\n 'W'][level:loop_levels['W']] + FYu['W'][level:loop_levels['W']])"], {}), "(Ku['W'][level:loop_levels['W']] + Cu['W'][level:loop_levels['W']] +\n FXu['W'][level:loop_levels['W']] + FYu['W'][level:loop_levels['W']])\n", (5311, 5452), True, 'import numpy as np\n'), ((7298, 7392), 'numpy.prod', 'np.prod', (["(Bu['I'][level:loop_levels['I']] + Cu['I'][level:loop_levels['I']] + [IYu] +\n [IXu])"], {}), "(Bu['I'][level:loop_levels['I']] + Cu['I'][level:loop_levels['I']] +\n [IYu] + [IXu])\n", (7305, 7392), True, 'import numpy as np\n'), ((7668, 7816), 'numpy.prod', 'np.prod', (["(Bu['O'][level:loop_levels['O']] + Ku['O'][level:loop_levels['O']] + OXu[\n 'O'][level:loop_levels['O']] + OYu['O'][level:loop_levels['O']])"], {}), "(Bu['O'][level:loop_levels['O']] + Ku['O'][level:loop_levels['O']] +\n OXu['O'][level:loop_levels['O']] + OYu['O'][level:loop_levels['O']])\n", (7675, 7816), True, 'import numpy as np\n'), ((4832, 4889), 'numpy.prod', 'np.prod', (['unroll_size[operand][level:loop_levels[operand]]'], {}), '(unroll_size[operand][level:loop_levels[operand]])\n', (4839, 4889), True, 'import numpy as np\n'), ((6486, 6527), 'numpy.prod', 'np.prod', (["OYu['I'][level:loop_levels['I']]"], {}), "(OYu['I'][level:loop_levels['I']])\n", (6493, 6527), True, 'import numpy as np\n'), ((6553, 6594), 'numpy.prod', 'np.prod', (["FYu['I'][level:loop_levels['I']]"], {}), "(FYu['I'][level:loop_levels['I']])\n", (6560, 6594), True, 'import numpy as np\n'), ((6915, 6956), 'numpy.prod', 'np.prod', (["OXu['I'][level:loop_levels['I']]"], {}), "(OXu['I'][level:loop_levels['I']])\n", (6922, 6956), True, 'import numpy as np\n'), ((6982, 7023), 'numpy.prod', 'np.prod', (["FXu['I'][level:loop_levels['I']]"], {}), "(FXu['I'][level:loop_levels['I']])\n", (6989, 7023), True, 'import numpy as np\n'), ((6673, 6714), 'numpy.prod', 'np.prod', (["OYu['I'][level:loop_levels['I']]"], {}), "(OYu['I'][level:loop_levels['I']])\n", (6680, 6714), True, 'import numpy as np\n'), ((6771, 6812), 'numpy.prod', 'np.prod', (["FYu['I'][level:loop_levels['I']]"], {}), "(FYu['I'][level:loop_levels['I']])\n", (6778, 6812), True, 'import numpy as np\n'), ((7102, 7143), 'numpy.prod', 'np.prod', (["OXu['I'][level:loop_levels['I']]"], {}), "(OXu['I'][level:loop_levels['I']])\n", (7109, 7143), True, 'import numpy as np\n'), ((7200, 7241), 'numpy.prod', 'np.prod', (["FXu['I'][level:loop_levels['I']]"], {}), "(FXu['I'][level:loop_levels['I']])\n", (7207, 7241), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from epimargin.estimators import analytical_MPVS
from epimargin.etl.commons import download_data
from epimargin.etl.covid19india import data_path, get_time_series, load_all_data
from epimargin.model import Model, ModelUnit
from epimargin.plots import PlotDevice, plot_RR_est, plot_T_anomalies
from epimargin.smoothing import convolution
from epimargin.utils import cwd
# model details
CI = 0.99
smoothing = 10
if __name__ == "__main__":
root = cwd()
data = root/"data"
output = root/"output"
if not data.exists():
data.mkdir()
if not output.exists():
output.mkdir()
# define data versions for api files
paths = {
"v3": [data_path(i) for i in (1, 2)],
"v4": [data_path(i) for i in (3, 4, 5, 6, 7, 8)]
}
# download data from india covid 19 api
for target in paths['v3'] + paths['v4']:
download_data(data, target)
df = load_all_data(
v3_paths = [data/filepath for filepath in paths['v3']],
v4_paths = [data/filepath for filepath in paths['v4']]
)
data_recency = str(df["date_announced"].max()).split()[0]
run_date = str(pd.Timestamp.now()).split()[0]
ts = get_time_series(df[df.detected_state == "Delhi"])
(
dates,
RR_pred, RR_CI_upper, RR_CI_lower,
T_pred, T_CI_upper, T_CI_lower,
total_cases, new_cases_ts,
anomalies, anomaly_dates
) = analytical_MPVS(ts.delta[ts.delta > 0], CI = CI, smoothing = convolution(window = smoothing))
#= analytical_MPVS(ts.Hospitalized[ts.Hospitalized > 0], CI = CI, smoothing = lambda ts: box_filter(ts, smoothing, 10))
np.random.seed(33)
delhi = Model([ModelUnit("Delhi", 18_000_000, I0 = T_pred[-1], RR0 = RR_pred[-1], mobility = 0)])
delhi.run(14, np.zeros((1,1)))
t_pred = [dates[-1] + pd.Timedelta(days = i) for i in range(len(delhi[0].delta_T))]
plot_RR_est(dates, RR_pred, RR_CI_upper, RR_CI_lower, CI)
PlotDevice().title("Delhi: Reproductive Number Estimate").xlabel("Date").ylabel("Rt", rotation=0, labelpad=20)
plt.show()
delhi[0].lower_CI[0] = T_CI_lower[-1]
delhi[0].upper_CI[0] = T_CI_upper[-1]
print(delhi[0].delta_T)
print(delhi[0].lower_CI)
print(delhi[0].upper_CI)
plot_T_anomalies(dates, T_pred, T_CI_upper, T_CI_lower, new_cases_ts, anomaly_dates, anomalies, CI)
plt.scatter(t_pred, delhi[0].delta_T, color = "tomato", s = 4, label = "Predicted Net Cases")
plt.fill_between(t_pred, delhi[0].lower_CI, delhi[0].upper_CI, color = "tomato", alpha = 0.3, label="99% CI (forecast)")
plt.legend()
PlotDevice().title("Delhi: Net Daily Cases").xlabel("Date").ylabel("Cases")
plt.show()
pd.DataFrame(data={
"date" : dates,
"Rt" : RR_pred,
"Rt_CI_upper": RR_CI_upper,
"Rt_CI_lower": RR_CI_lower
}).set_index("date").to_csv(output/"Rt.csv")
pd.DataFrame(data={
"date" : list(dates) + t_pred[1:],
"net_daily_cases" : T_pred + delhi[0].delta_T[1:],
"net_daily_cases_CI_upper": T_CI_upper + delhi[0].upper_CI[1:],
"net_daily_cases_CI_lower": T_CI_lower + delhi[0].lower_CI[1:]
}).set_index("date").to_csv(output/"dT.csv")
| [
"epimargin.plots.plot_RR_est",
"matplotlib.pyplot.fill_between",
"epimargin.smoothing.convolution",
"epimargin.utils.cwd",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"epimargin.etl.covid19india.load_all_data",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"epim... | [((531, 536), 'epimargin.utils.cwd', 'cwd', ([], {}), '()\n', (534, 536), False, 'from epimargin.utils import cwd\n'), ((988, 1121), 'epimargin.etl.covid19india.load_all_data', 'load_all_data', ([], {'v3_paths': "[(data / filepath) for filepath in paths['v3']]", 'v4_paths': "[(data / filepath) for filepath in paths['v4']]"}), "(v3_paths=[(data / filepath) for filepath in paths['v3']],\n v4_paths=[(data / filepath) for filepath in paths['v4']])\n", (1001, 1121), False, 'from epimargin.etl.covid19india import data_path, get_time_series, load_all_data\n'), ((1263, 1312), 'epimargin.etl.covid19india.get_time_series', 'get_time_series', (["df[df.detected_state == 'Delhi']"], {}), "(df[df.detected_state == 'Delhi'])\n", (1278, 1312), False, 'from epimargin.etl.covid19india import data_path, get_time_series, load_all_data\n'), ((1718, 1736), 'numpy.random.seed', 'np.random.seed', (['(33)'], {}), '(33)\n', (1732, 1736), True, 'import numpy as np\n'), ((1968, 2025), 'epimargin.plots.plot_RR_est', 'plot_RR_est', (['dates', 'RR_pred', 'RR_CI_upper', 'RR_CI_lower', 'CI'], {}), '(dates, RR_pred, RR_CI_upper, RR_CI_lower, CI)\n', (1979, 2025), False, 'from epimargin.plots import PlotDevice, plot_RR_est, plot_T_anomalies\n'), ((2145, 2155), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2153, 2155), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2438), 'epimargin.plots.plot_T_anomalies', 'plot_T_anomalies', (['dates', 'T_pred', 'T_CI_upper', 'T_CI_lower', 'new_cases_ts', 'anomaly_dates', 'anomalies', 'CI'], {}), '(dates, T_pred, T_CI_upper, T_CI_lower, new_cases_ts,\n anomaly_dates, anomalies, CI)\n', (2351, 2438), False, 'from epimargin.plots import PlotDevice, plot_RR_est, plot_T_anomalies\n'), ((2439, 2531), 'matplotlib.pyplot.scatter', 'plt.scatter', (['t_pred', 'delhi[0].delta_T'], {'color': '"""tomato"""', 's': '(4)', 'label': '"""Predicted Net Cases"""'}), "(t_pred, delhi[0].delta_T, color='tomato', s=4, label=\n 'Predicted Net Cases')\n", (2450, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2658), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['t_pred', 'delhi[0].lower_CI', 'delhi[0].upper_CI'], {'color': '"""tomato"""', 'alpha': '(0.3)', 'label': '"""99% CI (forecast)"""'}), "(t_pred, delhi[0].lower_CI, delhi[0].upper_CI, color=\n 'tomato', alpha=0.3, label='99% CI (forecast)')\n", (2553, 2658), True, 'import matplotlib.pyplot as plt\n'), ((2662, 2674), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2672, 2674), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2769), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2767, 2769), True, 'import matplotlib.pyplot as plt\n'), ((950, 977), 'epimargin.etl.commons.download_data', 'download_data', (['data', 'target'], {}), '(data, target)\n', (963, 977), False, 'from epimargin.etl.commons import download_data\n'), ((1857, 1873), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1865, 1873), True, 'import numpy as np\n'), ((758, 770), 'epimargin.etl.covid19india.data_path', 'data_path', (['i'], {}), '(i)\n', (767, 770), False, 'from epimargin.etl.covid19india import data_path, get_time_series, load_all_data\n'), ((804, 816), 'epimargin.etl.covid19india.data_path', 'data_path', (['i'], {}), '(i)\n', (813, 816), False, 'from epimargin.etl.covid19india import data_path, get_time_series, load_all_data\n'), ((1555, 1584), 'epimargin.smoothing.convolution', 'convolution', ([], {'window': 'smoothing'}), '(window=smoothing)\n', (1566, 1584), False, 'from epimargin.smoothing import convolution\n'), ((1756, 1828), 'epimargin.model.ModelUnit', 'ModelUnit', (['"""Delhi"""', '(18000000)'], {'I0': 'T_pred[-1]', 'RR0': 'RR_pred[-1]', 'mobility': '(0)'}), "('Delhi', 18000000, I0=T_pred[-1], RR0=RR_pred[-1], mobility=0)\n", (1765, 1828), False, 'from epimargin.model import Model, ModelUnit\n'), ((1901, 1921), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'i'}), '(days=i)\n', (1913, 1921), True, 'import pandas as pd\n'), ((1222, 1240), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (1238, 1240), True, 'import pandas as pd\n'), ((2775, 2884), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'date': dates, 'Rt': RR_pred, 'Rt_CI_upper': RR_CI_upper, 'Rt_CI_lower':\n RR_CI_lower}"}), "(data={'date': dates, 'Rt': RR_pred, 'Rt_CI_upper': RR_CI_upper,\n 'Rt_CI_lower': RR_CI_lower})\n", (2787, 2884), True, 'import pandas as pd\n'), ((2030, 2042), 'epimargin.plots.PlotDevice', 'PlotDevice', ([], {}), '()\n', (2040, 2042), False, 'from epimargin.plots import PlotDevice, plot_RR_est, plot_T_anomalies\n'), ((2679, 2691), 'epimargin.plots.PlotDevice', 'PlotDevice', ([], {}), '()\n', (2689, 2691), False, 'from epimargin.plots import PlotDevice, plot_RR_est, plot_T_anomalies\n')] |
# source KZ-PANDA/bin/activate ##too
# import the necessary packages
## conda activate KZ37
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
#import pandas
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file") # default="pech1.avi",
#ap.add_argument("-a", "--min-area", type=int, default=400, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
print("Press 'q' or 'Ctrl+C' for break")
min_area = int(input("Input min area (for example, 400): "))
height=int(input("Input 1 parameter like a odd number (for example, 21): "))
width=int(input("Input 2 parameter like a odd number (for example, 21): "))
kernel=(height,width)
#kernel=(21, 21)
N1 = int(input("Input lower color (for example, 240): "))
N2 = int(input("Input upper color (for example, 255): "))
time = []
#df = []
#df = pandas.DataFrame(columns = ["Start", "End"])
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Motionless"
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
lower_white = np.array([N1,N1,N1])
upper_white = np.array([N2,N2,N2])
mask = cv2.inRange(frame, lower_white, upper_white)
res = cv2.bitwise_and(frame,frame, mask= mask)
#cv2.imshow('res',res)
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
res = imutils.resize(res, width=500)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) #frame
gray = cv2.GaussianBlur(gray, kernel, 0) #defalt kernel=(21,21)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < min_area:#args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Motion"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
if text == "Motionless": # WTF?
time.append(datetime.datetime.now().strftime("%I:%M:%S%p"))
else:
time.append(datetime.datetime.now().strftime("%I:%M:%S%p"))
# show the frame and record if the user presses a key
#cv2.imshow("Thresh", thresh)
cv2.imshow('Result from filter',res)
cv2.imshow("Frame", frame)
#cv2.imshow("Frame Delta", frameDelta)
'''
for i in range(len(time)):
print(time[i])
'''
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
'''
for i in range(0, len(time), 2):
df = df.append({"Start":time[i], "End":time[i + 1]}, ignore_index = True)
# Creating a csv file in which time of movements will be saved
df.to_csv("Time_of_movements_motion.txt")
#df.to_csv("Time_of_movements_motion.csv")
'''
f = open('Time_of_movements_motion.txt', 'w')
f.write(str(time))
f.close()
# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
# python pech-kz_wind.py --video pech1.avi | [
"cv2.rectangle",
"time.sleep",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"imutils.video.VideoStream",
"argparse.ArgumentParser",
"cv2.threshold",
"cv2.contourArea",
"imutils.grab_contours",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.inRange",
"cv2.bitwise_and",
... | [((300, 325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (323, 325), False, 'import argparse\n'), ((4339, 4362), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4360, 4362), False, 'import cv2\n'), ((666, 681), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (676, 681), False, 'import time\n'), ((742, 773), 'cv2.VideoCapture', 'cv2.VideoCapture', (["args['video']"], {}), "(args['video'])\n", (758, 773), False, 'import cv2\n'), ((1683, 1705), 'numpy.array', 'np.array', (['[N1, N1, N1]'], {}), '([N1, N1, N1])\n', (1691, 1705), True, 'import numpy as np\n'), ((1719, 1741), 'numpy.array', 'np.array', (['[N2, N2, N2]'], {}), '([N2, N2, N2])\n', (1727, 1741), True, 'import numpy as np\n'), ((1748, 1792), 'cv2.inRange', 'cv2.inRange', (['frame', 'lower_white', 'upper_white'], {}), '(frame, lower_white, upper_white)\n', (1759, 1792), False, 'import cv2\n'), ((1800, 1840), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (1815, 1840), False, 'import cv2\n'), ((1934, 1966), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(500)'}), '(frame, width=500)\n', (1948, 1966), False, 'import imutils\n'), ((1974, 2004), 'imutils.resize', 'imutils.resize', (['res'], {'width': '(500)'}), '(res, width=500)\n', (1988, 2004), False, 'import imutils\n'), ((2013, 2050), 'cv2.cvtColor', 'cv2.cvtColor', (['res', 'cv2.COLOR_BGR2GRAY'], {}), '(res, cv2.COLOR_BGR2GRAY)\n', (2025, 2050), False, 'import cv2\n'), ((2066, 2099), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', 'kernel', '(0)'], {}), '(gray, kernel, 0)\n', (2082, 2099), False, 'import cv2\n'), ((2319, 2348), 'cv2.absdiff', 'cv2.absdiff', (['firstFrame', 'gray'], {}), '(firstFrame, gray)\n', (2330, 2348), False, 'import cv2\n'), ((2521, 2559), 'cv2.dilate', 'cv2.dilate', (['thresh', 'None'], {'iterations': '(2)'}), '(thresh, None, iterations=2)\n', (2531, 2559), False, 'import cv2\n'), ((2654, 2681), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (2675, 2681), False, 'import imutils\n'), ((3613, 3650), 'cv2.imshow', 'cv2.imshow', (['"""Result from filter"""', 'res'], {}), "('Result from filter', res)\n", (3623, 3650), False, 'import cv2\n'), ((3651, 3677), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (3661, 3677), False, 'import cv2\n'), ((2359, 2412), 'cv2.threshold', 'cv2.threshold', (['frameDelta', '(25)', '(255)', 'cv2.THRESH_BINARY'], {}), '(frameDelta, 25, 255, cv2.THRESH_BINARY)\n', (2372, 2412), False, 'import cv2\n'), ((2946, 2965), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (2962, 2965), False, 'import cv2\n'), ((2968, 3028), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (2981, 3028), False, 'import cv2\n'), ((3782, 3796), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3793, 3796), False, 'import cv2\n'), ((638, 656), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (649, 656), False, 'from imutils.video import VideoStream\n'), ((2774, 2792), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (2789, 2792), False, 'import cv2\n'), ((3223, 3246), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3244, 3246), False, 'import datetime\n'), ((3409, 3432), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3430, 3432), False, 'import datetime\n'), ((3478, 3501), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3499, 3501), False, 'import datetime\n')] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, ParameterTuple, context, set_seed
from mindspore.train import DatasetHelper, connect_network_with_dataset
import mindspore.dataset as ds
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
set_seed(2)
def _exec_preprocess(network, is_train, dataset, dataset_sink_mode, epoch_num, sink_size):
if dataset_sink_mode and not is_train:
dataset.__loop_size__ = 1
dataset_helper = DatasetHelper(
dataset, dataset_sink_mode, sink_size, epoch_num)
if dataset_sink_mode:
network = connect_network_with_dataset(network, dataset_helper)
return dataset_helper, network
def dynamic_shape_sink_process(network, dataset, is_train=True):
dataset_sink_mode = True
sink_size = 1
epoch_num = 1
dataset_helper, network = _exec_preprocess(
network, is_train, dataset, dataset_sink_mode, epoch_num, sink_size)
network.set_train(is_train)
for inputs in dataset_helper:
outputs = network(*inputs)
return outputs
def fixed_shape_process(network, dataset, is_train=True):
network.set_train(is_train)
for inputs in dataset.create_tuple_iterator():
outputs = network(*inputs)
return outputs
def dataset_generator(data_list):
for data in data_list:
yield data
def get_columns(tensor_num):
columns = []
for i in range(tensor_num):
columns.append("data" + str(i))
return columns
def compare(output, expect):
if isinstance(output, (tuple, list)):
assert isinstance(expect, (tuple, list))
for output_, expect_ in zip(output, expect):
if not compare(output_, expect_):
return False
else:
if not np.allclose(output.asnumpy(), expect.asnumpy(), rtol=1.0e-4, atol=1.0e-4):
return False
return True
class GradNetWrtX(nn.Cell):
def __init__(self, net):
super(GradNetWrtX, self).__init__()
self.net = net
self.grad_op = ops.GradOperation(
get_all=True, get_by_list=True, sens_param=True)
self.params = ParameterTuple(net.trainable_params())
def construct(self, *inputs):
gradient_function = self.grad_op(self.net, self.params)
return gradient_function(*inputs)
class ConcatNet(nn.Cell):
def __init__(self, axis):
super(ConcatNet, self).__init__()
self.op = ops.Concat(axis)
def construct(self, x1, x2):
return self.op((x1, x2))
def dynamic_concat_run(is_grad):
axis = 1
dtype = np.float32
data_list = []
for i in [2, 64]:
data = []
data.append(np.random.rand(i, 16).astype(dtype))
data.append(np.random.rand(i, 32).astype(dtype))
if is_grad:
data.append(np.random.rand(i, 48).astype(dtype))
data_list.append(tuple(data))
column_names = get_columns(len(data_list[0]))
dataset = ds.GeneratorDataset(data_list, column_names, shuffle=False)
dynamic_columns = {column_names[0]: [
None, 16], column_names[1]: [None, 32]}
if is_grad:
dynamic_columns[column_names[-1]] = [None, 48]
dataset.set_dynamic_columns(columns=dynamic_columns)
net = ConcatNet(axis)
if is_grad:
net = GradNetWrtX(net)
output = dynamic_shape_sink_process(net, dataset)
output_cmp = fixed_shape_process(net, dataset)
assert compare(output, output_cmp)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_concat_forward():
"""
Feature: Test Concat.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with fixed shape.
"""
dynamic_concat_run(False)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_concat_backward():
"""
Feature: Test backward of Concat.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with fixed shape.
"""
dynamic_concat_run(True)
class BatchNormNet(nn.Cell):
def __init__(self, c):
super(BatchNormNet, self).__init__()
self.bn = nn.BatchNorm1d(c)
def construct(self, input_data):
x = self.bn(input_data)
return x
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_bachnorm():
"""
Feature: Test BatchNorm and its backward.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with fixed shape.
"""
c = 256
dtype = np.float32
data_list = []
for i in [2, 64]:
data = []
data.append(np.random.rand(i, c).astype(dtype))
data.append(np.random.rand(i, c).astype(dtype))
data_list.append(tuple(data))
column_names = get_columns(len(data_list[0]))
dataset = ds.GeneratorDataset(data_list, column_names, shuffle=False)
dynamic_columns = {column_names[0]: [None, c], column_names[1]: [None, c]}
dataset.set_dynamic_columns(columns=dynamic_columns)
net = GradNetWrtX(BatchNormNet(c))
gradients = dynamic_shape_sink_process(net, dataset)
gradients_cmp = fixed_shape_process(net, dataset)
assert compare(gradients, gradients_cmp)
class ReshapeNet(nn.Cell):
def construct(self, x, y):
shape_of_y = ops.DynamicShape()(y)
return ops.Reshape()(x, shape_of_y)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_reshape():
"""
Feature: Test Reshape.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with fixed shape.
"""
dtype = np.float32
data_list = []
for i in [2, 96]:
data = []
data.append(np.random.rand(i, 64, 1).astype(dtype))
data.append(np.random.rand(i, 64).astype(dtype))
data_list.append(tuple(data))
column_names = get_columns(len(data_list[0]))
dataset = ds.GeneratorDataset(data_list, column_names, shuffle=False)
dynamic_columns = {column_names[0]: [
None, 64, 1], column_names[1]: [None, 64]}
dataset.set_dynamic_columns(columns=dynamic_columns)
net = ReshapeNet()
output = dynamic_shape_sink_process(net, dataset)
output_cmp = fixed_shape_process(net, dataset)
assert compare(output, output_cmp)
class ReduceSumNet(nn.Cell):
def __init__(self):
super(ReduceSumNet, self).__init__()
self.reduce = ops.ReduceSum()
def construct(self, x, y):
return self.reduce(x, y)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_reduce_sum():
"""
Feature: Test ReduceSum.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with result of the numpy compute
"""
dtype = np.float32
data_list = []
for i in [2, 96]:
data = []
data.append(np.random.rand(i, 256).astype(dtype))
data.append(np.array([1], dtype=np.int64))
data_list.append(tuple(data))
column_names = get_columns(len(data_list[0]))
dataset = ds.GeneratorDataset(data_list, column_names, shuffle=False)
dynamic_columns = {column_names[0]: [None, 256]}
dataset.set_dynamic_columns(columns=dynamic_columns)
net = ReduceSumNet()
output = dynamic_shape_sink_process(net, dataset)
# Currently, the parameter axis of ReduceSum operator is dynamic(tensor) is
# not supported under the fixed shape, so numpy is used for comparison
inputs = data_list[0]
output_cmp = np.sum(inputs[0], inputs[1][0])
assert np.allclose(output.asnumpy(), output_cmp, rtol=1.0e-4, atol=1.0e-4)
| [
"numpy.random.rand",
"mindspore.dataset.GeneratorDataset",
"mindspore.ops.Reshape",
"mindspore.context.set_context",
"mindspore.set_seed",
"mindspore.train.connect_network_with_dataset",
"mindspore.train.DatasetHelper",
"numpy.sum",
"mindspore.ops.ReduceSum",
"mindspore.ops.DynamicShape",
"numpy... | [((871, 936), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""GPU"""'}), "(mode=context.GRAPH_MODE, device_target='GPU')\n", (890, 936), False, 'from mindspore import ops, nn, ParameterTuple, context, set_seed\n'), ((937, 948), 'mindspore.set_seed', 'set_seed', (['(2)'], {}), '(2)\n', (945, 948), False, 'from mindspore import ops, nn, ParameterTuple, context, set_seed\n'), ((1141, 1204), 'mindspore.train.DatasetHelper', 'DatasetHelper', (['dataset', 'dataset_sink_mode', 'sink_size', 'epoch_num'], {}), '(dataset, dataset_sink_mode, sink_size, epoch_num)\n', (1154, 1204), False, 'from mindspore.train import DatasetHelper, connect_network_with_dataset\n'), ((3603, 3662), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['data_list', 'column_names'], {'shuffle': '(False)'}), '(data_list, column_names, shuffle=False)\n', (3622, 3662), True, 'import mindspore.dataset as ds\n'), ((5567, 5626), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['data_list', 'column_names'], {'shuffle': '(False)'}), '(data_list, column_names, shuffle=False)\n', (5586, 5626), True, 'import mindspore.dataset as ds\n'), ((6683, 6742), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['data_list', 'column_names'], {'shuffle': '(False)'}), '(data_list, column_names, shuffle=False)\n', (6702, 6742), True, 'import mindspore.dataset as ds\n'), ((7853, 7912), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['data_list', 'column_names'], {'shuffle': '(False)'}), '(data_list, column_names, shuffle=False)\n', (7872, 7912), True, 'import mindspore.dataset as ds\n'), ((8300, 8331), 'numpy.sum', 'np.sum', (['inputs[0]', 'inputs[1][0]'], {}), '(inputs[0], inputs[1][0])\n', (8306, 8331), True, 'import numpy as np\n'), ((1259, 1312), 'mindspore.train.connect_network_with_dataset', 'connect_network_with_dataset', (['network', 'dataset_helper'], {}), '(network, dataset_helper)\n', (1287, 1312), False, 'from mindspore.train import DatasetHelper, connect_network_with_dataset\n'), ((2692, 2758), 'mindspore.ops.GradOperation', 'ops.GradOperation', ([], {'get_all': '(True)', 'get_by_list': '(True)', 'sens_param': '(True)'}), '(get_all=True, get_by_list=True, sens_param=True)\n', (2709, 2758), False, 'from mindspore import ops, nn, ParameterTuple, context, set_seed\n'), ((3092, 3108), 'mindspore.ops.Concat', 'ops.Concat', (['axis'], {}), '(axis)\n', (3102, 3108), False, 'from mindspore import ops, nn, ParameterTuple, context, set_seed\n'), ((4857, 4874), 'mindspore.nn.BatchNorm1d', 'nn.BatchNorm1d', (['c'], {}), '(c)\n', (4871, 4874), False, 'from mindspore import ops, nn, ParameterTuple, context, set_seed\n'), ((7182, 7197), 'mindspore.ops.ReduceSum', 'ops.ReduceSum', ([], {}), '()\n', (7195, 7197), False, 'from mindspore import ops, nn, ParameterTuple, context, set_seed\n'), ((6039, 6057), 'mindspore.ops.DynamicShape', 'ops.DynamicShape', ([], {}), '()\n', (6055, 6057), False, 'from mindspore import ops, nn, ParameterTuple, context, set_seed\n'), ((6076, 6089), 'mindspore.ops.Reshape', 'ops.Reshape', ([], {}), '()\n', (6087, 6089), False, 'from mindspore import ops, nn, ParameterTuple, context, set_seed\n'), ((7720, 7749), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int64'}), '([1], dtype=np.int64)\n', (7728, 7749), True, 'import numpy as np\n'), ((3326, 3347), 'numpy.random.rand', 'np.random.rand', (['i', '(16)'], {}), '(i, 16)\n', (3340, 3347), True, 'import numpy as np\n'), ((3383, 3404), 'numpy.random.rand', 'np.random.rand', (['i', '(32)'], {}), '(i, 32)\n', (3397, 3404), True, 'import numpy as np\n'), ((5373, 5393), 'numpy.random.rand', 'np.random.rand', (['i', 'c'], {}), '(i, c)\n', (5387, 5393), True, 'import numpy as np\n'), ((5429, 5449), 'numpy.random.rand', 'np.random.rand', (['i', 'c'], {}), '(i, c)\n', (5443, 5449), True, 'import numpy as np\n'), ((6484, 6508), 'numpy.random.rand', 'np.random.rand', (['i', '(64)', '(1)'], {}), '(i, 64, 1)\n', (6498, 6508), True, 'import numpy as np\n'), ((6544, 6565), 'numpy.random.rand', 'np.random.rand', (['i', '(64)'], {}), '(i, 64)\n', (6558, 6565), True, 'import numpy as np\n'), ((7662, 7684), 'numpy.random.rand', 'np.random.rand', (['i', '(256)'], {}), '(i, 256)\n', (7676, 7684), True, 'import numpy as np\n'), ((3464, 3485), 'numpy.random.rand', 'np.random.rand', (['i', '(48)'], {}), '(i, 48)\n', (3478, 3485), True, 'import numpy as np\n')] |
# py bestfit.py < alphadelta
# Fits cols 2, 3 (e.g., Alpha, Delta) using
# log(Delta/Alpha) ~= a + b*day
# Square errors are weighted by 1/(1/Alpha+1/Delta).
from stuff import *
import numpy as np
import sys
mindate='0000-00-00'
if len(sys.argv)>1: mindate=sys.argv[1]
A=[];D=[];dt=[]
for x in sys.stdin:
y=x.split()
a,d=float(y[1]),float(y[2])
if y[0]>=mindate and (a>0 or d>0):
dt.append(y[0])
A.append(a)
D.append(d)
n=len(A)
A=np.array(A)
D=np.array(D)
W=A*D/(A+D)
dt0=datetoday(dt[0])
X=[datetoday(d)-dt0 for d in dt]
Y=np.log((D+1e-30)/(A+1e-30))
m=np.array([[sum(W), sum(W*X)], [sum(W*X), sum(W*X*X)]])
r=np.array([sum(W*Y),sum(W*X*Y)])
c=np.linalg.solve(m,r)
print("Continuous growth rate = %.4f/day"%(c[1]))
d=-c[0]/c[1]
d1=int(round(-c[0]/c[1]))
print("Crossover on",daytodate(datetoday(dt[0])+d1)," %+.0f hours"%((d-d1)*24))
| [
"numpy.array",
"numpy.log",
"numpy.linalg.solve"
] | [((453, 464), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (461, 464), True, 'import numpy as np\n'), ((467, 478), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (475, 478), True, 'import numpy as np\n'), ((547, 580), 'numpy.log', 'np.log', (['((D + 1e-30) / (A + 1e-30))'], {}), '((D + 1e-30) / (A + 1e-30))\n', (553, 580), True, 'import numpy as np\n'), ((669, 690), 'numpy.linalg.solve', 'np.linalg.solve', (['m', 'r'], {}), '(m, r)\n', (684, 690), True, 'import numpy as np\n')] |
"""Tests of rasterio.merge"""
import boto3
from hypothesis import given, settings
from hypothesis.strategies import floats
import numpy
import pytest
import affine
import rasterio
from rasterio.merge import merge
# Non-coincident datasets test fixture.
# Three overlapping GeoTIFFs, two to the NW and one to the SE.
@pytest.fixture(scope="function")
def test_data_dir_overlapping(tmp_path):
kwargs = {
"crs": "EPSG:4326",
"transform": affine.Affine(0.2, 0, -114, 0, -0.2, 46),
"count": 1,
"dtype": rasterio.uint8,
"driver": "GTiff",
"width": 10,
"height": 10,
"nodata": 0,
}
with rasterio.open(tmp_path.joinpath("nw1.tif"), "w", **kwargs) as dst:
data = numpy.ones((10, 10), dtype=rasterio.uint8)
dst.write(data, indexes=1)
with rasterio.open(tmp_path.joinpath("nw3.tif"), "w", **kwargs) as dst:
data = numpy.ones((10, 10), dtype=rasterio.uint8) * 3
dst.write(data, indexes=1)
kwargs["transform"] = affine.Affine(0.2, 0, -113, 0, -0.2, 45)
with rasterio.open(tmp_path.joinpath("se.tif"), "w", **kwargs) as dst:
data = numpy.ones((10, 10), dtype=rasterio.uint8) * 2
dst.write(data, indexes=1)
return tmp_path
@pytest.mark.parametrize(
"method,value",
[("first", 1), ("last", 2), ("min", 1), ("max", 3), ("sum", 6), ("count", 3)],
)
def test_merge_method(test_data_dir_overlapping, method, value):
"""Merge method produces expected values in intersection"""
inputs = sorted(list(test_data_dir_overlapping.iterdir())) # nw is first.
datasets = [rasterio.open(x) for x in inputs]
output_count = 1
arr, _ = merge(
datasets, output_count=output_count, method=method, dtype=numpy.uint64
)
numpy.testing.assert_array_equal(arr[:, 5:10, 5:10], value)
def test_issue2163():
"""Demonstrate fix for issue 2163"""
with rasterio.open("tests/data/float_raster_with_nodata.tif") as src:
data = src.read()
result, transform = merge([src])
assert numpy.allclose(data, result[:, : data.shape[1], : data.shape[2]])
def test_unsafe_casting():
"""Demonstrate fix for issue 2179"""
with rasterio.open("tests/data/float_raster_with_nodata.tif") as src:
result, transform = merge([src], dtype="uint8", nodata=0.0)
assert not result.any() # this is why it's called "unsafe".
@pytest.mark.skipif(
not (boto3.Session().get_credentials()),
reason="S3 raster access requires credentials",
)
@pytest.mark.network
@pytest.mark.slow
@settings(deadline=None, max_examples=5)
@given(
dx=floats(min_value=-0.05, max_value=0.05),
dy=floats(min_value=-0.05, max_value=0.05),
)
def test_issue2202(dx, dy):
import rasterio.merge
from shapely import wkt
from shapely.affinity import translate
aoi = wkt.loads(
r"POLYGON((11.09 47.94, 11.06 48.01, 11.12 48.11, 11.18 48.11, 11.18 47.94, 11.09 47.94))"
)
aoi = translate(aoi, dx, dy)
with rasterio.Env(AWS_NO_SIGN_REQUEST=True,):
ds = [
rasterio.open(i)
for i in [
"/vsis3/copernicus-dem-30m/Copernicus_DSM_COG_10_N47_00_E011_00_DEM/Copernicus_DSM_COG_10_N47_00_E011_00_DEM.tif",
"/vsis3/copernicus-dem-30m/Copernicus_DSM_COG_10_N48_00_E011_00_DEM/Copernicus_DSM_COG_10_N48_00_E011_00_DEM.tif",
]
]
aux_array, aux_transform = rasterio.merge.merge(datasets=ds, bounds=aoi.bounds)
from rasterio.plot import show
show(aux_array)
| [
"numpy.allclose",
"shapely.affinity.translate",
"numpy.ones",
"shapely.wkt.loads",
"rasterio.open",
"boto3.Session",
"rasterio.Env",
"hypothesis.strategies.floats",
"pytest.mark.parametrize",
"rasterio.plot.show",
"hypothesis.settings",
"affine.Affine",
"pytest.fixture",
"rasterio.merge.me... | [((320, 352), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (334, 352), False, 'import pytest\n'), ((1259, 1381), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method,value"""', "[('first', 1), ('last', 2), ('min', 1), ('max', 3), ('sum', 6), ('count', 3)]"], {}), "('method,value', [('first', 1), ('last', 2), ('min',\n 1), ('max', 3), ('sum', 6), ('count', 3)])\n", (1282, 1381), False, 'import pytest\n'), ((2567, 2606), 'hypothesis.settings', 'settings', ([], {'deadline': 'None', 'max_examples': '(5)'}), '(deadline=None, max_examples=5)\n', (2575, 2606), False, 'from hypothesis import given, settings\n'), ((1021, 1061), 'affine.Affine', 'affine.Affine', (['(0.2)', '(0)', '(-113)', '(0)', '(-0.2)', '(45)'], {}), '(0.2, 0, -113, 0, -0.2, 45)\n', (1034, 1061), False, 'import affine\n'), ((1681, 1758), 'rasterio.merge.merge', 'merge', (['datasets'], {'output_count': 'output_count', 'method': 'method', 'dtype': 'numpy.uint64'}), '(datasets, output_count=output_count, method=method, dtype=numpy.uint64)\n', (1686, 1758), False, 'from rasterio.merge import merge\n'), ((1777, 1836), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['arr[:, 5:10, 5:10]', 'value'], {}), '(arr[:, 5:10, 5:10], value)\n', (1809, 1836), False, 'import numpy\n'), ((2849, 2959), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON((11.09 47.94, 11.06 48.01, 11.12 48.11, 11.18 48.11, 11.18 47.94, 11.09 47.94))"""'], {}), "(\n 'POLYGON((11.09 47.94, 11.06 48.01, 11.12 48.11, 11.18 48.11, 11.18 47.94, 11.09 47.94))'\n )\n", (2858, 2959), False, 'from shapely import wkt\n'), ((2975, 2997), 'shapely.affinity.translate', 'translate', (['aoi', 'dx', 'dy'], {}), '(aoi, dx, dy)\n', (2984, 2997), False, 'from shapely.affinity import translate\n'), ((458, 498), 'affine.Affine', 'affine.Affine', (['(0.2)', '(0)', '(-114)', '(0)', '(-0.2)', '(46)'], {}), '(0.2, 0, -114, 0, -0.2, 46)\n', (471, 498), False, 'import affine\n'), ((742, 784), 'numpy.ones', 'numpy.ones', (['(10, 10)'], {'dtype': 'rasterio.uint8'}), '((10, 10), dtype=rasterio.uint8)\n', (752, 784), False, 'import numpy\n'), ((1613, 1629), 'rasterio.open', 'rasterio.open', (['x'], {}), '(x)\n', (1626, 1629), False, 'import rasterio\n'), ((1911, 1967), 'rasterio.open', 'rasterio.open', (['"""tests/data/float_raster_with_nodata.tif"""'], {}), "('tests/data/float_raster_with_nodata.tif')\n", (1924, 1967), False, 'import rasterio\n'), ((2030, 2042), 'rasterio.merge.merge', 'merge', (['[src]'], {}), '([src])\n', (2035, 2042), False, 'from rasterio.merge import merge\n'), ((2058, 2121), 'numpy.allclose', 'numpy.allclose', (['data', 'result[:, :data.shape[1], :data.shape[2]]'], {}), '(data, result[:, :data.shape[1], :data.shape[2]])\n', (2072, 2121), False, 'import numpy\n'), ((2203, 2259), 'rasterio.open', 'rasterio.open', (['"""tests/data/float_raster_with_nodata.tif"""'], {}), "('tests/data/float_raster_with_nodata.tif')\n", (2216, 2259), False, 'import rasterio\n'), ((2296, 2335), 'rasterio.merge.merge', 'merge', (['[src]'], {'dtype': '"""uint8"""', 'nodata': '(0.0)'}), "([src], dtype='uint8', nodata=0.0)\n", (2301, 2335), False, 'from rasterio.merge import merge\n'), ((3008, 3046), 'rasterio.Env', 'rasterio.Env', ([], {'AWS_NO_SIGN_REQUEST': '(True)'}), '(AWS_NO_SIGN_REQUEST=True)\n', (3020, 3046), False, 'import rasterio\n'), ((3437, 3489), 'rasterio.merge.merge', 'rasterio.merge.merge', ([], {'datasets': 'ds', 'bounds': 'aoi.bounds'}), '(datasets=ds, bounds=aoi.bounds)\n', (3457, 3489), False, 'import rasterio\n'), ((3538, 3553), 'rasterio.plot.show', 'show', (['aux_array'], {}), '(aux_array)\n', (3542, 3553), False, 'from rasterio.plot import show\n'), ((2622, 2661), 'hypothesis.strategies.floats', 'floats', ([], {'min_value': '(-0.05)', 'max_value': '(0.05)'}), '(min_value=-0.05, max_value=0.05)\n', (2628, 2661), False, 'from hypothesis.strategies import floats\n'), ((2670, 2709), 'hypothesis.strategies.floats', 'floats', ([], {'min_value': '(-0.05)', 'max_value': '(0.05)'}), '(min_value=-0.05, max_value=0.05)\n', (2676, 2709), False, 'from hypothesis.strategies import floats\n'), ((912, 954), 'numpy.ones', 'numpy.ones', (['(10, 10)'], {'dtype': 'rasterio.uint8'}), '((10, 10), dtype=rasterio.uint8)\n', (922, 954), False, 'import numpy\n'), ((1153, 1195), 'numpy.ones', 'numpy.ones', (['(10, 10)'], {'dtype': 'rasterio.uint8'}), '((10, 10), dtype=rasterio.uint8)\n', (1163, 1195), False, 'import numpy\n'), ((3076, 3092), 'rasterio.open', 'rasterio.open', (['i'], {}), '(i)\n', (3089, 3092), False, 'import rasterio\n'), ((2437, 2452), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (2450, 2452), False, 'import boto3\n')] |
import numpy as np
import panel as pn
import param
from bokeh.plotting import figure
class Shape(param.Parameterized):
radius = param.Number(default=1, bounds=(0, 1))
def __init__(self, **params):
super().__init__(**params)
self.figure = figure(x_range=(-1, 1), y_range=(-1, 1))
self.renderer = self.figure.line(*self._get_coords())
def _get_coords(self):
return [], []
def view(self):
return self.figure
class Circle(Shape):
n = param.Integer(default=100, precedence=-1)
def __init__(self, **params):
super().__init__(**params)
def _get_coords(self):
angles = np.linspace(0, 2*np.pi, self.n+1)
return (self.radius*np.sin(angles),
self.radius*np.cos(angles))
@param.depends('radius', watch=True)
def update(self):
xs, ys = self._get_coords()
self.renderer.data_source.data.update({'x': xs, 'y': ys})
class NGon(Circle):
n = param.Integer(default=3, bounds=(3, 10), precedence=1)
@param.depends('radius', 'n', watch=True)
def update(self):
xs, ys = self._get_coords()
self.renderer.data_source.data.update({'x': xs, 'y': ys})
shapes = [NGon(), Circle()]
class ShapeViewer(param.Parameterized):
shape = param.ObjectSelector(default=shapes[0], objects=shapes)
@param.depends('shape')
def view(self):
return self.shape.view()
@param.depends('shape', 'shape.radius')
def title(self):
return '## %s (radius=%.1f)' % (type(self.shape).__name__, self.shape.radius)
def panel(self):
return pn.Column(self.title, self.view)
def shape_viewer():
viewer = ShapeViewer()
panel = pn.Row(viewer.param, viewer.panel())
return panel
| [
"param.Number",
"param.ObjectSelector",
"bokeh.plotting.figure",
"param.Integer",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"param.depends",
"panel.Column"
] | [((136, 174), 'param.Number', 'param.Number', ([], {'default': '(1)', 'bounds': '(0, 1)'}), '(default=1, bounds=(0, 1))\n', (148, 174), False, 'import param\n'), ((499, 540), 'param.Integer', 'param.Integer', ([], {'default': '(100)', 'precedence': '(-1)'}), '(default=100, precedence=-1)\n', (512, 540), False, 'import param\n'), ((784, 819), 'param.depends', 'param.depends', (['"""radius"""'], {'watch': '(True)'}), "('radius', watch=True)\n", (797, 819), False, 'import param\n'), ((974, 1028), 'param.Integer', 'param.Integer', ([], {'default': '(3)', 'bounds': '(3, 10)', 'precedence': '(1)'}), '(default=3, bounds=(3, 10), precedence=1)\n', (987, 1028), False, 'import param\n'), ((1035, 1075), 'param.depends', 'param.depends', (['"""radius"""', '"""n"""'], {'watch': '(True)'}), "('radius', 'n', watch=True)\n", (1048, 1075), False, 'import param\n'), ((1283, 1338), 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': 'shapes[0]', 'objects': 'shapes'}), '(default=shapes[0], objects=shapes)\n', (1303, 1338), False, 'import param\n'), ((1345, 1367), 'param.depends', 'param.depends', (['"""shape"""'], {}), "('shape')\n", (1358, 1367), False, 'import param\n'), ((1427, 1465), 'param.depends', 'param.depends', (['"""shape"""', '"""shape.radius"""'], {}), "('shape', 'shape.radius')\n", (1440, 1465), False, 'import param\n'), ((267, 307), 'bokeh.plotting.figure', 'figure', ([], {'x_range': '(-1, 1)', 'y_range': '(-1, 1)'}), '(x_range=(-1, 1), y_range=(-1, 1))\n', (273, 307), False, 'from bokeh.plotting import figure\n'), ((656, 693), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(self.n + 1)'], {}), '(0, 2 * np.pi, self.n + 1)\n', (667, 693), True, 'import numpy as np\n'), ((1610, 1642), 'panel.Column', 'pn.Column', (['self.title', 'self.view'], {}), '(self.title, self.view)\n', (1619, 1642), True, 'import panel as pn\n'), ((718, 732), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (724, 732), True, 'import numpy as np\n'), ((762, 776), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (768, 776), True, 'import numpy as np\n')] |
import sys
import typing
import numpy as np
def main():
n = int(input())
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
i = np.argsort(a)
print(i[-2] + 1)
main()
| [
"numpy.argsort",
"sys.stdin.readline"
] | [((163, 176), 'numpy.argsort', 'np.argsort', (['a'], {}), '(a)\n', (173, 176), True, 'import numpy as np\n'), ((98, 118), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (116, 118), False, 'import sys\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import os
import sys
import numpy as np
import fsps
from past.utils import old_div
from cloudyfsps.ASCIItools import (writeASCII, compileASCII, checkCompiled, compiledExists)
from cloudyfsps.cloudyInputTools import *
from cloudyfsps.generalTools import calcForLogQ
from cloudyfsps.cloudyOutputTools import *
from cloudyfsps.outputFormatting import *
#import ipdb
#runMake, formatAllOutput, writeFormattedOutput)
# this code snippet goes through every step needed
# to integrate FSPS into Cloudy.
# This example uses stellar pops with a constant SFH
# as the input ionizing source.
# 1. Write an ascii file in Cloudy format with grid
# of FSPS spectra in all available ages and
# metallicities
# 2. Compile asii file into binary format required
# for Cloudy use. Assumes $CLOUDY_EXE is set to
# your /path/to/cloudy.exe
# 3. Writes Cloudy input files for a subset of grid
# parameters.
# 4. Runs Cloudy on the *.in files
# 5. Formats the various output files
zsun = 0.019
exec_write_ascii = False
exec_write_input = False
exec_run_cloudy = False
exec_write_output = True
exec_gen_FSPS_grid = True
# Function to write the ascii file.
# This is where you set the properties of the
# ionizing spectrum (SSP/CSFH, IMF, FBHB, etc)
def hden_ascii(fileout, **kwargs):
# change these parameters to modify the ionizing source grid
# default mode is to produce an ascii grid in age and Z,
# though different variables and more dimensions are possible.
sp_dict = dict(zcontinuous=1,
imf_type=2,
sfh=0,
const=0.0,
sf_start=0.0)
sp = fsps.StellarPopulation(**sp_dict)
# all ages and Zs
ages = 10.**sp.log_age
logZs = np.log10(old_div(sp.zlegend,zsun))
print (ages,logZs)
modpars = [(age, logZ) for age in ages for logZ in logZs]
lam = sp.wavelengths
all_fluxs = []
for logZ in logZs:
sp.params['logzsol'] = logZ
all_fluxs.append(sp.get_spectrum()[1]) #lsun per hz
nmod = len(modpars)
# flatten flux for writing
flat_flux = np.array([all_fluxs[j][i]
for i in range(len(ages))
for j in range(len(logZs))])
# this function is flexible, ndim can be 3/4/n.
# in this example, however, ndim is 2 (age, logz).
writeASCII(fileout, lam, flat_flux, modpars,
nx=len(lam), ndim=2, npar=2, nmod=nmod)
return
#---------------------------------------------------------------------
# ASCII FILE: WRITE AND COMPILE
#---------------------------------------------------------------------
# assumes you have $CLOUDY_EXE and $CLOUDY_DATA_PATH set as sys vars.
# name of ascii file
ascii_file = "FSPS_PDVA_test.ascii"
# or if there is an already-compiled one you want to use, specify here
compiled_ascii = "{}.mod".format(ascii_file.split(".")[0])
if exec_write_ascii:
print("Executing write ascii sequence...")
if not compiledExists(ascii_file):
print("No compiled model exists...Writing.")
hden_ascii(ascii_file)
print("Compiling {} with Cloudy".format(ascii_file))
compileASCII(ascii_file)
print("Checking to see if compilation was successful...")
if checkCompiled(ascii_file):
print("Your model {} is ready to run.".format(compiled_ascii))
else:
sys.exit()
else:
print("{} already exists.".format(compiled_ascii))
#---------------------------------------------------------------------
# WRITE CLOUDY INPUT
#---------------------------------------------------------------------
# local folder to read and write *.in, *.out files
mod_dir = '/home/prerak/codes/hden_test/redo/c17_pagb_test/'
#mod_dir = "/home/prerakgarg/redo/c17/"
mod_prefix = 'ZAU'
# GRID PARAMETERS FOR CLOUDY RUN
#--------------
# ages between 1 and 7 Myr
#ages = np.linspace(1., 7., 7)*1.e6i
'''
ages = np.array([0.5, 1, 2, 3, 4, 5, 6, 7, 10, 20])*1.e6
# stellar metallicities
logZs = np.array([-1.98, -1.5, -1.0, -0.6, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.198])
# ionization parameters between -4 and -1
logUs = np.array([-4.0, -3.5, -3.0, -2.5, -2.0, -1.5, -1.0])
'''
ages = np.array([1, 2,])*1.e6
# stellar metallicities
logZs = np.array([-1.5])
# ionization parameters between -4 and -1
logUs = np.array([-4.0])
#logUs = [-1.]
# Hydrogen density between 30 and 400
#nhs = np.arange(50., 450., 200) # density of gas, cm-3
nhs = [100]
# Other default parameters based off of Byler+2017
Rinners = np.array([19.]) # inner radius of HII region, 3pc
efrac = -1.0 # calculation is stopped when H is 10^efrac % neutral
set_name='dopita' # abundances from Dopita+2001
dust=False # don't include dust in nebula
extra_output=False # include lots of outputs
#-----------------------------------------------------------------
# iterate through all of the above parameters
# calcForLogQ just calculates Q = U*4*pi*Ri^2*nH
pars = np.array([(Z, a, U, R, calcForLogQ(logU=U, Rinner=10.0**R, nh=n), n, efrac)
for Z in logZs
for a in ages
for U in logUs
for R in Rinners
for n in nhs])
if exec_write_input:
print('Writing input files...')
writeParamFiles(dir_=mod_dir,
model_prefix=mod_prefix,
cloudy_mod=compiled_ascii,
run_cloudy=False, # don't run yet
ages=ages,
logZs=logZs,
logUs=logUs,
r_inners=Rinners,
nhs=nhs,
use_Q=True,
# if False, will use logU;
# does not matter in this case,
# since Q is calculated at
# each specified logU.
verbose=False, # don't print output to screen
set_name=set_name,
dust=dust,
extra_output=extra_output)
print('Wrote {} param files'.format(len(pars)))
else:
print('Skipping input writing.')
#---------------------------------------------------------------------
# RUN CLOUDY ON ALL INPUT FILES
#---------------------------------------------------------------------
if exec_run_cloudy:
print("Running Cloudy....")
runMake(dir_=mod_dir, n_proc=4, model_name=mod_prefix)
print("Cloudy finished.")
else:
print("Not running Cloudy. Skipping to formatting output.")
#---------------------------------------------------------------------
# FORMAT OUTPUT
#---------------------------------------------------------------------
if exec_write_output:
print("Formatting output files...\n")
formatAllOutput(mod_dir, mod_prefix, write_line_lum=False)
else:
print("\n\nNot formatting output. DONE.")
if exec_gen_FSPS_grid:
print("Creating FSPS input grids...")
writeFormattedOutput(mod_dir, mod_prefix, "")
else:
print("\n\nNot formatting FSPS output. DONE.")
| [
"cloudyfsps.ASCIItools.compileASCII",
"cloudyfsps.generalTools.calcForLogQ",
"fsps.StellarPopulation",
"past.utils.old_div",
"numpy.array",
"cloudyfsps.ASCIItools.checkCompiled",
"sys.exit",
"cloudyfsps.ASCIItools.compiledExists"
] | [((4389, 4405), 'numpy.array', 'np.array', (['[-1.5]'], {}), '([-1.5])\n', (4397, 4405), True, 'import numpy as np\n'), ((4456, 4472), 'numpy.array', 'np.array', (['[-4.0]'], {}), '([-4.0])\n', (4464, 4472), True, 'import numpy as np\n'), ((4657, 4673), 'numpy.array', 'np.array', (['[19.0]'], {}), '([19.0])\n', (4665, 4673), True, 'import numpy as np\n'), ((1791, 1824), 'fsps.StellarPopulation', 'fsps.StellarPopulation', ([], {}), '(**sp_dict)\n', (1813, 1824), False, 'import fsps\n'), ((4334, 4350), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (4342, 4350), True, 'import numpy as np\n'), ((1895, 1920), 'past.utils.old_div', 'old_div', (['sp.zlegend', 'zsun'], {}), '(sp.zlegend, zsun)\n', (1902, 1920), False, 'from past.utils import old_div\n'), ((3108, 3134), 'cloudyfsps.ASCIItools.compiledExists', 'compiledExists', (['ascii_file'], {}), '(ascii_file)\n', (3122, 3134), False, 'from cloudyfsps.ASCIItools import writeASCII, compileASCII, checkCompiled, compiledExists\n'), ((3289, 3313), 'cloudyfsps.ASCIItools.compileASCII', 'compileASCII', (['ascii_file'], {}), '(ascii_file)\n', (3301, 3313), False, 'from cloudyfsps.ASCIItools import writeASCII, compileASCII, checkCompiled, compiledExists\n'), ((3391, 3416), 'cloudyfsps.ASCIItools.checkCompiled', 'checkCompiled', (['ascii_file'], {}), '(ascii_file)\n', (3404, 3416), False, 'from cloudyfsps.ASCIItools import writeASCII, compileASCII, checkCompiled, compiledExists\n'), ((3519, 3529), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3527, 3529), False, 'import sys\n'), ((5103, 5146), 'cloudyfsps.generalTools.calcForLogQ', 'calcForLogQ', ([], {'logU': 'U', 'Rinner': '(10.0 ** R)', 'nh': 'n'}), '(logU=U, Rinner=10.0 ** R, nh=n)\n', (5114, 5146), False, 'from cloudyfsps.generalTools import calcForLogQ\n')] |
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
import numpy as np
from math import sqrt, log
from .math_tools import lognormal
def calculate_distribution(CSDS):
r"""
Returns a tuple containing a numpy array with the (log normal) CSDS
distribution and a float containing the arithmetic mean of that
distribution.
Takes a single :class:`~CSDSData` object as argument.
"""
a = CSDS.alpha_scale * log(CSDS.average) + CSDS.alpha_offset
b = sqrt(CSDS.beta_scale * log(CSDS.average) + CSDS.beta_offset)
steps = int(CSDS.maximum - CSDS.minimum) + 1
maxT = 0
smq = 0
q_log_distr = []
TQDistr = dict()
for i in range(steps):
T = max(CSDS.minimum + i, 1e-50)
q = lognormal(T, a, b)
smq += q
TQDistr[int(T)] = q
maxT = T
TQarr = np.zeros(shape=(maxT + 1,), dtype=float)
Rmean = 0
for T, q in TQDistr.items():
TQarr[T] = q / smq
Rmean += T * q
Rmean /= smq
return TQarr, Rmean
| [
"numpy.zeros",
"math.log"
] | [((925, 965), 'numpy.zeros', 'np.zeros', ([], {'shape': '(maxT + 1,)', 'dtype': 'float'}), '(shape=(maxT + 1,), dtype=float)\n', (933, 965), True, 'import numpy as np\n'), ((524, 541), 'math.log', 'log', (['CSDS.average'], {}), '(CSDS.average)\n', (527, 541), False, 'from math import sqrt, log\n'), ((593, 610), 'math.log', 'log', (['CSDS.average'], {}), '(CSDS.average)\n', (596, 610), False, 'from math import sqrt, log\n')] |
import numpy as np
import cv2
def abs_sobel_thresh(img, orient='x', thresh=(20, 100)):
"""
#---------------------
# This function applies Sobel x or y, and then
# takes an absolute value and applies a threshold.
#
"""
# Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1))
# Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 255
# Return the result
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
"""
#---------------------
# This function takes in an image and optional Sobel kernel size,
# as well as thresholds for gradient magnitude. And computes the gradient magnitude,
# applies a threshold, and creates a binary output image showing where thresholds were met.
#
"""
# Take the gradient in x and y separately
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Scale to 8-bit (0 - 255) and convert to type = np.uint8
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 255
# Return the binary image
return binary_output
def dir_thresh(img, sobel_kernel=3, thresh=(0.7, 1.3)):
"""
#---------------------
# This function applies Sobel x and y,
# then computes the direction of the gradient,
# and then applies a threshold.
#
"""
# Take the gradient in x and y separately
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the x and y gradients
# and calculate the direction of the gradient
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 255
# Return the binary image
return binary_output.astype(np.uint8)
def get_combined_gradients(img, thresh_x, thresh_y, thresh_mag, thresh_dir):
"""
#---------------------
# This function isolates lane line pixels, by focusing on pixels
# that are likely to be part of lane lines.
# I am using Red Channel, since it detects white pixels very well.
#
"""
rows, cols = img.shape[:2]
# save cropped image for documentation
temp = np.copy(img)
temp = temp[220:rows-12, 0:cols, 2]
cv2.imwrite("./output_images/02_cropped.png", temp)
R_channel = img[220:rows-12, 0:cols, 2] # focusing only on regions where lane lines are likely present
sobelx = abs_sobel_thresh(R_channel, 'x', thresh_x)
sobely = abs_sobel_thresh(R_channel, 'y', thresh_y)
mag_binary = mag_thresh(R_channel, 3, thresh_mag)
dir_binary = dir_thresh(R_channel, 15, thresh_dir)
# debug
#cv2.imshow('sobelx', sobelx)
# combine sobelx, sobely, magnitude & direction measurements
gradient_combined = np.zeros_like(dir_binary).astype(np.uint8)
gradient_combined[((sobelx > 1) & (mag_binary > 1) & (dir_binary > 1)) | ((sobelx > 1) & (sobely > 1))] = 255 # | (R > 1)] = 255
return gradient_combined
def channel_thresh(channel, thresh=(80, 255)):
"""
#---------------------
# This function takes in a channel of an image and
# returns thresholded binary image
#
"""
binary = np.zeros_like(channel)
binary[(channel > thresh[0]) & (channel <= thresh[1])] = 255
return binary
def get_combined_hls(img, th_h, th_l, th_s):
"""
#---------------------
# This function takes in an image, converts it to HLS colorspace,
# extracts individual channels, applies thresholding on them
#
"""
# convert to hls color space
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
rows, cols = img.shape[:2]
# trying to use Red channel info to improve results
#R = img[220:rows - 12, 0:cols, 2]
#_, R = cv2.threshold(R, 180, 255, cv2.THRESH_BINARY)
H = hls[220:rows - 12, 0:cols, 0]
L = hls[220:rows - 12, 0:cols, 1]
S = hls[220:rows - 12, 0:cols, 2]
h_channel = channel_thresh(H, th_h)
l_channel = channel_thresh(L, th_l)
s_channel = channel_thresh(S, th_s)
# debug
#cv2.imshow('Thresholded S channel', s_channel)
# Trying to use Red channel, it works even better than S channel sometimes,
# but in cases where there is shadow on road and road color is different,
# S channel works better.
hls_comb = np.zeros_like(s_channel).astype(np.uint8)
hls_comb[((s_channel > 1) & (l_channel == 0)) | ((s_channel == 0) & (h_channel > 1) & (l_channel > 1))] = 255
# trying to use both S channel and R channel
#hls_comb[((s_channel > 1) & (h_channel > 1)) | (R > 1)] = 255
# return combined hls image
return hls_comb
def combine_grad_hls(grad, hls):
"""
#---------------------
# This function combines gradient and hls images into one.
# For binary gradient image, if pixel is bright, set that pixel value in reulting image to 255
# For binary hls image, if pixel is bright, set that pixel value in resulting image to 255
# Edit: Assign different values to distinguish them
#
"""
result = np.zeros_like(hls).astype(np.uint8)
#result[((grad > 1) | (hls > 1))] = 255
result[(grad > 1)] = 100
result[(hls > 1)] = 255
return result
| [
"numpy.copy",
"cv2.imwrite",
"numpy.sqrt",
"numpy.absolute",
"numpy.max",
"cv2.cvtColor",
"numpy.zeros_like",
"cv2.Sobel"
] | [((694, 721), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobel'], {}), '(scaled_sobel)\n', (707, 721), True, 'import numpy as np\n'), ((1280, 1332), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'sobel_kernel'}), '(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n', (1289, 1332), False, 'import cv2\n'), ((1346, 1398), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'sobel_kernel'}), '(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n', (1355, 1398), False, 'import cv2\n'), ((1457, 1491), 'numpy.sqrt', 'np.sqrt', (['(sobelx ** 2 + sobely ** 2)'], {}), '(sobelx ** 2 + sobely ** 2)\n', (1464, 1491), True, 'import numpy as np\n'), ((1729, 1751), 'numpy.zeros_like', 'np.zeros_like', (['gradmag'], {}), '(gradmag)\n', (1742, 1751), True, 'import numpy as np\n'), ((2186, 2238), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'sobel_kernel'}), '(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n', (2195, 2238), False, 'import cv2\n'), ((2252, 2304), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'sobel_kernel'}), '(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n', (2261, 2304), False, 'import cv2\n'), ((2573, 2598), 'numpy.zeros_like', 'np.zeros_like', (['absgraddir'], {}), '(absgraddir)\n', (2586, 2598), True, 'import numpy as np\n'), ((3162, 3174), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (3169, 3174), True, 'import numpy as np\n'), ((3219, 3270), 'cv2.imwrite', 'cv2.imwrite', (['"""./output_images/02_cropped.png"""', 'temp'], {}), "('./output_images/02_cropped.png', temp)\n", (3230, 3270), False, 'import cv2\n'), ((4157, 4179), 'numpy.zeros_like', 'np.zeros_like', (['channel'], {}), '(channel)\n', (4170, 4179), True, 'import numpy as np\n'), ((4539, 4575), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HLS'], {}), '(img, cv2.COLOR_BGR2HLS)\n', (4551, 4575), False, 'import cv2\n'), ((1574, 1589), 'numpy.max', 'np.max', (['gradmag'], {}), '(gradmag)\n', (1580, 1589), True, 'import numpy as np\n'), ((2444, 2463), 'numpy.absolute', 'np.absolute', (['sobely'], {}), '(sobely)\n', (2455, 2463), True, 'import numpy as np\n'), ((2465, 2484), 'numpy.absolute', 'np.absolute', (['sobelx'], {}), '(sobelx)\n', (2476, 2484), True, 'import numpy as np\n'), ((361, 393), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {}), '(img, cv2.CV_64F, 1, 0)\n', (370, 393), False, 'import cv2\n'), ((449, 481), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {}), '(img, cv2.CV_64F, 0, 1)\n', (458, 481), False, 'import cv2\n'), ((593, 610), 'numpy.max', 'np.max', (['abs_sobel'], {}), '(abs_sobel)\n', (599, 610), True, 'import numpy as np\n'), ((3744, 3769), 'numpy.zeros_like', 'np.zeros_like', (['dir_binary'], {}), '(dir_binary)\n', (3757, 3769), True, 'import numpy as np\n'), ((5282, 5306), 'numpy.zeros_like', 'np.zeros_like', (['s_channel'], {}), '(s_channel)\n', (5295, 5306), True, 'import numpy as np\n'), ((6025, 6043), 'numpy.zeros_like', 'np.zeros_like', (['hls'], {}), '(hls)\n', (6038, 6043), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
#
# Copyright (c) 2017 <NAME><<EMAIL>> All Rights Reserved
#
#
# File: /Users/hain/ai/InsuranceQA-Machine-Learning/deep_qa_1/network.py
# Author: <NAME>
# Date: 2017-08-08:18:32:05
#
#===============================================================================
"""
A data API for learning QA.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 <NAME>. All Rights Reserved"
__author__ = "<NAME>"
__modify__ = "<NAME>"
__date__ = "2017-08-08:18:32:05"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(curdir))
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
import random
import insuranceqa_data as insuranceqa
import numpy as np
_train_data = insuranceqa.load_pairs_train()
_test_data = insuranceqa.load_pairs_test()
_valid_data = insuranceqa.load_pairs_valid()
'''
build vocab data with more placeholder
'''
vocab_data = insuranceqa.load_pairs_vocab()
vocab_size = len(vocab_data['word2id'].keys())
VOCAB_PAD_ID = vocab_size+1
VOCAB_GO_ID = vocab_size+2
vocab_data['word2id']['<PAD>'] = VOCAB_PAD_ID
vocab_data['word2id']['<GO>'] = VOCAB_GO_ID
vocab_data['id2word'][VOCAB_PAD_ID] = '<PAD>'
vocab_data['id2word'][VOCAB_GO_ID] = '<GO>'
def combine_pos_and_neg_sample(data):
'''
combine the positive answers and negative samples with the same problem
'''
qa = dict()
for x in data:
qa.setdefault(x['qid'], ["", [], []])
qa[x['qid']][0] = x['question']
if x['label'] == [1, 0]:
qa[x['qid']][1].append(x['utterance'])
else:
qa[x['qid']][2].append(x['utterance'])
result = list()
for qid in qa:
question = qa[qid][0]
for pos_a in qa[qid][1]:
for neg_a in qa[qid][2]:
result.append({'qid': qid, 'question': question, 'pos_utterance': pos_a, 'neg_utterance': neg_a})
return result
_train_data = combine_pos_and_neg_sample(_train_data)
def _get_corpus_metrics():
'''
max length of questions
'''
for cat, data in zip(["valid", "test", "train"], [_valid_data, _test_data, _train_data]):
max_len_question = 0
total_len_question = 0
max_len_utterance = 0
total_len_utterance = 0
for x in data:
total_len_question += len(x['question'])
total_len_utterance += len(x['utterance'])
if len(x['question']) > max_len_question:
max_len_question = len(x['question'])
if len(x['utterance']) > max_len_utterance:
max_len_utterance = len(x['utterance'])
print('max len of %s question : %d, average: %d' % (cat, max_len_question, total_len_question/len(data)))
print('max len of %s utterance: %d, average: %d' % (cat, max_len_utterance, total_len_utterance/len(data)))
# max length of answers
class BatchIter():
'''
Load data with mini-batch
'''
def __init__(self, data = None, batch_size = 100):
assert data is not None, "data should not be None."
self.batch_size = batch_size
self.data = data
def next(self):
random.shuffle(self.data)
index = 0
total_num = len(self.data)
while index <= total_num:
yield self.data[index:index + self.batch_size]
index += self.batch_size
def padding(lis, pad, size):
'''
right adjust a list object
'''
if size > len(lis):
lis += [pad] * (size - len(lis))
else:
lis = lis[0:size]
return lis
def pack_question_n_utterance(q, p_u, n_u=None, q_length = 20, u_length = 99):
'''
combine question and utterance as input data for feed-forward network
'''
assert len(q) > 0 and len(p_u) > 0, "question and utterance must not be empty"
q = padding(q, VOCAB_PAD_ID, q_length)
p_u = padding(p_u, VOCAB_PAD_ID, u_length)
assert len(q) == q_length, "question should be pad to q_length"
assert len(p_u) == u_length, "utterance should be pad to u_length"
if n_u is not None:
assert len(n_u) > 0, "negative utterance must not be empty"
n_u = padding(n_u, VOCAB_PAD_ID, u_length)
assert len(n_u) == u_length, "negative utterance should be pad to u_length"
return q, p_u, n_u
return q, p_u
scale_digit= lambda x: x*0.001
def __resolve_train_data(data, batch_size, question_max_length = 20, utterance_max_length = 99):
'''
resolve train data
'''
batch_iter = BatchIter(data = data, batch_size = batch_size)
for mini_batch in batch_iter.next():
qids = []
questions = []
pos_answers = []
neg_answers = []
for o in mini_batch:
q, pu, nu = pack_question_n_utterance(o['question'], o['pos_utterance'], o['neg_utterance'], question_max_length, utterance_max_length)
qids.append(o['qid'])
questions.append(list(map(scale_digit, q)))
pos_answers.append(list(map(scale_digit, pu)))
neg_answers.append(list(map(scale_digit, nu)))
if len(questions) > 0:
yield qids, questions, pos_answers, neg_answers
else:
raise StopIteration
def __resolve_valid_data(data, batch_size, question_max_length = 20, utterance_max_length = 99):
'''
resolve valid data
'''
batch_iter = BatchIter(data = data, batch_size = batch_size)
for mini_batch in batch_iter.next():
qids = []
questions = []
answers = []
labels = []
for o in mini_batch:
q, pu = pack_question_n_utterance(o['question'], o['utterance'], None, question_max_length, utterance_max_length)
qids.append(o['qid'])
questions.append(q)
answers.append(pu)
labels.append(np.argmax(o['label']))
if len(questions) > 0:
# print('data in batch:%d' % len(mini_batch))
yield qids, questions, answers, labels
else:
raise StopIteration
# export data
def load_train(batch_size = 100, question_max_length = 20, utterance_max_length = 99):
'''
load train data
'''
return __resolve_train_data(_train_data, batch_size, question_max_length, utterance_max_length)
def load_test(question_max_length = 20, utterance_max_length = 99):
'''
load test data
'''
questions = []
utterances = []
labels = []
qids = []
for o in _test_data:
qid = o['qid']
q, pu = pack_question_n_utterance(o['question'],
o['utterance'],
None,
question_max_length,
utterance_max_length)
qids.append(qid)
questions.append(list(map(scale_digit, q)))
utterances.append(list(map(scale_digit, pu)))
labels.append(0 if np.argmax(o['label']) == 1 else 1)
return zip(qids, questions, utterances, labels)
def load_valid(batch_size = 100, question_max_length = 20, utterance_max_length = 99):
'''
load valid data
'''
return __resolve_valid_data(_valid_data, batch_size, question_max_length, utterance_max_length)
def test_batch():
'''
retrieve data with mini batch
'''
for mini_batch in zip(load_train()):
for qid, q, pos_a, neg_a in mini_batch:
print(q[0])
print(pos_a[0])
print(neg_a[0])
break
break
for mini_batch in zip(load_valid()):
for qid, q, pos_a, labels in mini_batch:
print(q[0])
print(pos_a[0])
print(labels[0])
break
break
for (qid, q, pos_a, label) in zip(*load_test()):
print(q)
print(pos_a)
print(label)
break
print("VOCAB_PAD_ID", VOCAB_PAD_ID)
print("VOCAB_GO_ID", VOCAB_GO_ID)
if __name__ == '__main__':
test_batch()
| [
"sys.setdefaultencoding",
"random.shuffle",
"insuranceqa_data.load_pairs_valid",
"numpy.argmax",
"os.path.dirname",
"insuranceqa_data.load_pairs_test",
"os.path.abspath",
"insuranceqa_data.load_pairs_vocab",
"insuranceqa_data.load_pairs_train"
] | [((989, 1019), 'insuranceqa_data.load_pairs_train', 'insuranceqa.load_pairs_train', ([], {}), '()\n', (1017, 1019), True, 'import insuranceqa_data as insuranceqa\n'), ((1033, 1062), 'insuranceqa_data.load_pairs_test', 'insuranceqa.load_pairs_test', ([], {}), '()\n', (1060, 1062), True, 'import insuranceqa_data as insuranceqa\n'), ((1077, 1107), 'insuranceqa_data.load_pairs_valid', 'insuranceqa.load_pairs_valid', ([], {}), '()\n', (1105, 1107), True, 'import insuranceqa_data as insuranceqa\n'), ((1171, 1201), 'insuranceqa_data.load_pairs_vocab', 'insuranceqa.load_pairs_vocab', ([], {}), '()\n', (1199, 1201), True, 'import insuranceqa_data as insuranceqa\n'), ((711, 736), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (726, 736), False, 'import os\n'), ((757, 780), 'os.path.dirname', 'os.path.dirname', (['curdir'], {}), '(curdir)\n', (772, 780), False, 'import os\n'), ((831, 862), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (853, 862), False, 'import sys\n'), ((3385, 3410), 'random.shuffle', 'random.shuffle', (['self.data'], {}), '(self.data)\n', (3399, 3410), False, 'import random\n'), ((6042, 6063), 'numpy.argmax', 'np.argmax', (["o['label']"], {}), "(o['label'])\n", (6051, 6063), True, 'import numpy as np\n'), ((7159, 7180), 'numpy.argmax', 'np.argmax', (["o['label']"], {}), "(o['label'])\n", (7168, 7180), True, 'import numpy as np\n')] |
import cflearn
import unittest
import numpy as np
from typing import Any
from typing import Tuple
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from cftool.ml import ModelPattern
from cftool.misc import timestamp
from cfdata.tabular import TabularDataset
from sklearn.tree import DecisionTreeClassifier
from cflearn.pipeline import Pipeline
class TestTraditional(unittest.TestCase):
@staticmethod
def _train_traditional(
model: str,
dataset: TabularDataset,
sklearn_model: Any,
) -> Tuple[Pipeline, Any, np.ndarray]:
folder = f"_logs/{model}_{timestamp(ensure_different=True)}"
kwargs = {"cv_split": 0.0, "logging_folder": folder}
m = cflearn.make(model, num_epoch=1, max_epoch=2, **kwargs) # type: ignore
m0 = cflearn.make(model, num_epoch=0, max_epoch=0, **kwargs) # type: ignore
m.fit(*dataset.xy)
m0.fit(*dataset.xy)
cflearn.evaluate(*dataset.xy, pipelines={"fit": m, "init": m0})
x, y = m0.data.processed.xy
split = m0.model.get_split(x, m0.device) # type: ignore
x, sk_y = split.merge().cpu().numpy(), y.ravel() # type: ignore
sklearn_model.fit(x, sk_y)
pattern = ModelPattern(
init_method=lambda: sklearn_model,
predict_method=lambda x_: sklearn_model.predict(x_).reshape([-1, 1]),
predict_prob_method="predict_proba",
)
cflearn.evaluate(
x,
y,
metrics=["auc", "acc"],
other_patterns={"sklearn": pattern},
)
return m, m0, x
def test_nnb_gnb(self) -> None:
gnb = GaussianNB()
dataset = TabularDataset.iris()
nnb, nnb0, x = self._train_traditional("nnb", dataset, gnb)
self.assertTrue(np.allclose(nnb0.model.class_prior, gnb.class_prior_))
normal = nnb0.model.normal
self.assertTrue(np.allclose(normal.mu.data.cpu().numpy(), gnb.theta_))
self.assertTrue(np.allclose(normal.std.data.cpu().numpy() ** 2, gnb.sigma_))
self.assertTrue(np.allclose(nnb0.predict_prob(dataset.x), gnb.predict_proba(x)))
cflearn._rmtree("_logs")
def test_nnb_mnb(self) -> None:
mnb = MultinomialNB()
dataset = TabularDataset.digits()
nnb, nnb0, x = self._train_traditional("nnb", dataset, mnb)
self.assertTrue(
np.allclose(
nnb0.model.class_log_prior(numpy=True),
mnb.class_log_prior_,
)
)
self.assertTrue(
np.allclose(
nnb0.predict_prob(dataset.x),
mnb.predict_proba(x),
atol=1e-4,
)
)
cflearn._rmtree("_logs")
def test_ndt(self) -> None:
dt = DecisionTreeClassifier()
self._train_traditional("ndt", TabularDataset.iris(), dt)
self._train_traditional("ndt", TabularDataset.digits(), dt)
self._train_traditional("ndt", TabularDataset.breast_cancer(), dt)
cflearn._rmtree("_logs")
if __name__ == "__main__":
unittest.main()
| [
"cflearn.evaluate",
"numpy.allclose",
"cfdata.tabular.TabularDataset.iris",
"cfdata.tabular.TabularDataset.digits",
"cfdata.tabular.TabularDataset.breast_cancer",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.naive_bayes.MultinomialNB",
"cflearn.make",
"unittest.main",
"sklearn.naive_bayes.Gauss... | [((3112, 3127), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3125, 3127), False, 'import unittest\n'), ((743, 798), 'cflearn.make', 'cflearn.make', (['model'], {'num_epoch': '(1)', 'max_epoch': '(2)'}), '(model, num_epoch=1, max_epoch=2, **kwargs)\n', (755, 798), False, 'import cflearn\n'), ((828, 883), 'cflearn.make', 'cflearn.make', (['model'], {'num_epoch': '(0)', 'max_epoch': '(0)'}), '(model, num_epoch=0, max_epoch=0, **kwargs)\n', (840, 883), False, 'import cflearn\n'), ((963, 1026), 'cflearn.evaluate', 'cflearn.evaluate', (['*dataset.xy'], {'pipelines': "{'fit': m, 'init': m0}"}), "(*dataset.xy, pipelines={'fit': m, 'init': m0})\n", (979, 1026), False, 'import cflearn\n'), ((1464, 1551), 'cflearn.evaluate', 'cflearn.evaluate', (['x', 'y'], {'metrics': "['auc', 'acc']", 'other_patterns': "{'sklearn': pattern}"}), "(x, y, metrics=['auc', 'acc'], other_patterns={'sklearn':\n pattern})\n", (1480, 1551), False, 'import cflearn\n'), ((1682, 1694), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1692, 1694), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1713, 1734), 'cfdata.tabular.TabularDataset.iris', 'TabularDataset.iris', ([], {}), '()\n', (1732, 1734), False, 'from cfdata.tabular import TabularDataset\n'), ((2178, 2202), 'cflearn._rmtree', 'cflearn._rmtree', (['"""_logs"""'], {}), "('_logs')\n", (2193, 2202), False, 'import cflearn\n'), ((2254, 2269), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (2267, 2269), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((2288, 2311), 'cfdata.tabular.TabularDataset.digits', 'TabularDataset.digits', ([], {}), '()\n', (2309, 2311), False, 'from cfdata.tabular import TabularDataset\n'), ((2741, 2765), 'cflearn._rmtree', 'cflearn._rmtree', (['"""_logs"""'], {}), "('_logs')\n", (2756, 2765), False, 'import cflearn\n'), ((2812, 2836), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (2834, 2836), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3054, 3078), 'cflearn._rmtree', 'cflearn._rmtree', (['"""_logs"""'], {}), "('_logs')\n", (3069, 3078), False, 'import cflearn\n'), ((1827, 1880), 'numpy.allclose', 'np.allclose', (['nnb0.model.class_prior', 'gnb.class_prior_'], {}), '(nnb0.model.class_prior, gnb.class_prior_)\n', (1838, 1880), True, 'import numpy as np\n'), ((2876, 2897), 'cfdata.tabular.TabularDataset.iris', 'TabularDataset.iris', ([], {}), '()\n', (2895, 2897), False, 'from cfdata.tabular import TabularDataset\n'), ((2942, 2965), 'cfdata.tabular.TabularDataset.digits', 'TabularDataset.digits', ([], {}), '()\n', (2963, 2965), False, 'from cfdata.tabular import TabularDataset\n'), ((3010, 3040), 'cfdata.tabular.TabularDataset.breast_cancer', 'TabularDataset.breast_cancer', ([], {}), '()\n', (3038, 3040), False, 'from cfdata.tabular import TabularDataset\n'), ((635, 667), 'cftool.misc.timestamp', 'timestamp', ([], {'ensure_different': '(True)'}), '(ensure_different=True)\n', (644, 667), False, 'from cftool.misc import timestamp\n')] |
"""
Script to generate the MNIST+ dataset. The purpose of this dataset is to make a
more challenging MNIST-like dataset, with multiple factors of variation. These
factors can serve to evaluate a model's performance at learning invariant
features, or its ability to disentangle factors of variation in a multi-task
classification setting. The dataset is stored under $PYLEARN2_DATA_PATH.
The dataset variants are created as follows. For each MNIST image, we:
1. Perform a random rotation of the image (optional)
2. Rescale the image from 28x28 to 48x48, yielding variable `image`.
3.1 Extract a random patch `textured_patch` from a fixed or random image of the
Brodatz texture dataset.
3.2 Generate mask of MNIST digit outline, by thresholding MNIST digit at 0.1
3.3 Fuse MNIST digit and textured patch as follows:
textured_patch[mask] <= image[mask]; image <= textured_patch;
4. Randomly select position of light source (optional)
5. Perform embossing operation, given fixed lighting position obtained in 4.
"""
import numpy
from theano.compat.six.moves import xrange, cPickle as pickle
import pylab as pl
from copy import copy
from optparse import OptionParser
from pylearn2.datasets import mnist
from pylearn2.utils import string_utils
import warnings
try:
from PIL import Image
except ImportError:
warnings.warn("Couldn't import Image from PIL, so far make_mnistplus "
"is only supported with PIL")
OUTPUT_SIZE = 48
DOWN_SAMPLE = 1
def to_array(img):
"""
Convert PIL.Image to numpy.ndarray.
:param img: numpy.ndarray
"""
return numpy.array(img.getdata()) / 255.
def to_img(arr, os):
"""
Convert numpy.ndarray to PIL.Image
:param arr: numpy.ndarray
:param os: integer, size of output image.
"""
return Image.fromarray(arr.reshape(os, os) * 255.)
def emboss(img, azi=45., ele=18., dep=2):
"""
Perform embossing of image `img`.
:param img: numpy.ndarray, matrix representing image to emboss.
:param azi: azimuth (in degrees)
:param ele: elevation (in degrees)
:param dep: depth, (0-100)
"""
# defining azimuth, elevation, and depth
ele = (ele * 2 * numpy.pi) / 360.
azi = (azi * 2 * numpy.pi) / 360.
a = numpy.asarray(img).astype('float')
# find the gradient
grad = numpy.gradient(a)
# (it is two arrays: grad_x and grad_y)
grad_x, grad_y = grad
# getting the unit incident ray
gd = numpy.cos(ele) # length of projection of ray on ground plane
dx = gd * numpy.cos(azi)
dy = gd * numpy.sin(azi)
dz = numpy.sin(ele)
# adjusting the gradient by the "depth" factor
# (I think this is how GIMP defines it)
grad_x = grad_x * dep / 100.
grad_y = grad_y * dep / 100.
# finding the unit normal vectors for the image
leng = numpy.sqrt(grad_x**2 + grad_y**2 + 1.)
uni_x = grad_x/leng
uni_y = grad_y/leng
uni_z = 1./leng
# take the dot product
a2 = 255 * (dx*uni_x + dy*uni_y + dz*uni_z)
# avoid overflow
a2 = a2.clip(0, 255)
# you must convert back to uint8 /before/ converting to an image
return Image.fromarray(a2.astype('uint8'))
def extract_patch(textid, os, downsample):
"""
Extract a patch of texture #textid of Brodatz dataset.
:param textid: id of texture image to load.
:param os: size of MNIST+ output images.
:param downsample: integer, downsampling factor.
"""
temp = '${PYLEARN2_DATA_PATH}/textures/brodatz/D%i.gif' % textid
fname = string_utils.preprocess(temp)
img_i = Image.open(fname)
img_i = img_i.resize((img_i.size[0]/downsample,
img_i.size[1]/downsample), Image.BILINEAR)
x = numpy.random.randint(0, img_i.size[0] - os)
y = numpy.random.randint(0, img_i.size[1] - os)
patch = img_i.crop((x, y, x+os, y+os))
return patch, (x, y)
def gendata(enable, os, downsample, textid=None, seed=2313, verbose=False):
"""
Generate the MNIST+ dataset.
:param enable: dictionary of flags with keys ['texture', 'azimuth',
'rotation', 'elevation'] to enable/disable a given factor of variation.
:param textid: if enable['texture'], id number of the Brodatz texture to
load. If textid is None, we load a random texture for each MNIST image.
:param os: output size (width and height) of MNIST+ images.
:param downsample: factor by which to downsample texture.
:param seed: integer for seeding RNG.
:param verbose: bool
"""
rng = numpy.random.RandomState(seed)
data = mnist.MNIST('train')
test = mnist.MNIST('test')
data.X = numpy.vstack((data.X, test.X))
data.y = numpy.hstack((data.y, test.y))
del test
output = {}
output['data'] = numpy.zeros((len(data.X), os*os))
output['label'] = numpy.zeros(len(data.y))
if enable['azimuth']:
output['azimuth'] = numpy.zeros(len(data.y))
if enable['elevation']:
output['elevation'] = numpy.zeros(len(data.y))
if enable['rotation']:
output['rotation'] = numpy.zeros(len(data.y))
if enable['texture']:
output['texture_id'] = numpy.zeros(len(data.y))
output['texture_pos'] = numpy.zeros((len(data.y), 2))
for i in xrange(len(data.X)):
# get MNIST image
frgd_img = to_img(data.X[i], 28)
frgd_img = frgd_img.convert('L')
if enable['rotation']:
rot = rng.randint(0, 360)
output['rotation'][i] = rot
frgd_img = frgd_img.rotate(rot, Image.BILINEAR)
frgd_img = frgd_img.resize((os, os), Image.BILINEAR)
if enable['texture']:
if textid is None:
# extract patch from texture database. Note that texture #14
# does not exist.
textid = 14
while textid == 14:
textid = rng.randint(1, 113)
patch_img, (px, py) = extract_patch(textid, os, downsample)
patch_arr = to_array(patch_img)
# store output details
output['texture_id'][i] = textid
output['texture_pos'][i] = (px, py)
# generate binary mask for digit outline
frgd_arr = to_array(frgd_img)
mask_arr = frgd_arr > 0.1
# copy contents of masked-MNIST image into background texture
blend_arr = copy(patch_arr)
blend_arr[mask_arr] = frgd_arr[mask_arr]
# this now because the image to emboss
frgd_img = to_img(blend_arr, os)
azi = 45
if enable['azimuth']:
azi = rng.randint(0, 360)
output['azimuth'][i] = azi
ele = 18.
if enable['elevation']:
ele = rng.randint(0, 60)
output['elevation'][i] = ele
mboss_img = emboss(frgd_img, azi=azi, ele=ele)
mboss_arr = to_array(mboss_img)
output['data'][i] = mboss_arr
output['label'][i] = data.y[i]
if verbose:
pl.imshow(mboss_arr.reshape(os, os))
pl.gray()
pl.show()
fname = 'mnistplus'
if enable['azimuth']:
fname += "_azi"
if enable['rotation']:
fname += "_rot"
if enable['texture']:
fname += "_tex"
fp = open(fname+'.pkl','w')
pickle.dump(output, fp, protocol=pickle.HIGHEST_PROTOCOL)
fp.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-v', action='store_true', dest='verbose')
parser.add_option('--azimuth', action='store_true', dest='azimuth',
help='Enable random azimuth for light-source used in embossing.')
parser.add_option('--elevation', action='store_true', dest='elevation',
help='Enable random elevation for light-source used in embossing.')
parser.add_option('--rotation', action='store_true', dest='rotation',
help='Randomly rotate MNIST digit prior to embossing.')
parser.add_option('--texture', action='store_true', dest='texture',
help='Perform joint embossing of fused {MNIST + Texture} image.')
parser.add_option('--textid', action='store', type='int', dest='textid',
help='If specified, use a single texture ID for all MNIST images.',
default=None)
parser.add_option('--output_size', action='store', type='int', dest='os',
help='Integer specifying size of (square) output images.',
default=OUTPUT_SIZE)
parser.add_option('--downsample', action='store', type='int',
dest='downsample', default=DOWN_SAMPLE,
help='Downsampling factor for Brodatz textures.')
(opts, args) = parser.parse_args()
enable = {'texture': opts.texture,
'azimuth': opts.azimuth,
'rotation': opts.rotation,
'elevation': opts.elevation}
gendata(enable=enable, os=opts.os, downsample=opts.downsample,
verbose=opts.verbose, textid=opts.textid)
| [
"pylearn2.utils.string_utils.preprocess",
"PIL.Image.open",
"numpy.sqrt",
"theano.compat.six.moves.cPickle.dump",
"numpy.hstack",
"pylab.gray",
"numpy.asarray",
"optparse.OptionParser",
"pylearn2.datasets.mnist.MNIST",
"copy.copy",
"numpy.random.randint",
"numpy.cos",
"numpy.vstack",
"nump... | [((2299, 2316), 'numpy.gradient', 'numpy.gradient', (['a'], {}), '(a)\n', (2313, 2316), False, 'import numpy\n'), ((2432, 2446), 'numpy.cos', 'numpy.cos', (['ele'], {}), '(ele)\n', (2441, 2446), False, 'import numpy\n'), ((2560, 2574), 'numpy.sin', 'numpy.sin', (['ele'], {}), '(ele)\n', (2569, 2574), False, 'import numpy\n'), ((2799, 2842), 'numpy.sqrt', 'numpy.sqrt', (['(grad_x ** 2 + grad_y ** 2 + 1.0)'], {}), '(grad_x ** 2 + grad_y ** 2 + 1.0)\n', (2809, 2842), False, 'import numpy\n'), ((3490, 3519), 'pylearn2.utils.string_utils.preprocess', 'string_utils.preprocess', (['temp'], {}), '(temp)\n', (3513, 3519), False, 'from pylearn2.utils import string_utils\n'), ((3533, 3550), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (3543, 3550), False, 'from PIL import Image\n'), ((3681, 3724), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(img_i.size[0] - os)'], {}), '(0, img_i.size[0] - os)\n', (3701, 3724), False, 'import numpy\n'), ((3733, 3776), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(img_i.size[1] - os)'], {}), '(0, img_i.size[1] - os)\n', (3753, 3776), False, 'import numpy\n'), ((4477, 4507), 'numpy.random.RandomState', 'numpy.random.RandomState', (['seed'], {}), '(seed)\n', (4501, 4507), False, 'import numpy\n'), ((4521, 4541), 'pylearn2.datasets.mnist.MNIST', 'mnist.MNIST', (['"""train"""'], {}), "('train')\n", (4532, 4541), False, 'from pylearn2.datasets import mnist\n'), ((4554, 4573), 'pylearn2.datasets.mnist.MNIST', 'mnist.MNIST', (['"""test"""'], {}), "('test')\n", (4565, 4573), False, 'from pylearn2.datasets import mnist\n'), ((4587, 4617), 'numpy.vstack', 'numpy.vstack', (['(data.X, test.X)'], {}), '((data.X, test.X))\n', (4599, 4617), False, 'import numpy\n'), ((4631, 4661), 'numpy.hstack', 'numpy.hstack', (['(data.y, test.y)'], {}), '((data.y, test.y))\n', (4643, 4661), False, 'import numpy\n'), ((7244, 7301), 'theano.compat.six.moves.cPickle.dump', 'pickle.dump', (['output', 'fp'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(output, fp, protocol=pickle.HIGHEST_PROTOCOL)\n', (7255, 7301), True, 'from theano.compat.six.moves import xrange, cPickle as pickle\n'), ((7358, 7372), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (7370, 7372), False, 'from optparse import OptionParser\n'), ((1317, 1424), 'warnings.warn', 'warnings.warn', (['"""Couldn\'t import Image from PIL, so far make_mnistplus is only supported with PIL"""'], {}), '(\n "Couldn\'t import Image from PIL, so far make_mnistplus is only supported with PIL"\n )\n', (1330, 1424), False, 'import warnings\n'), ((2507, 2521), 'numpy.cos', 'numpy.cos', (['azi'], {}), '(azi)\n', (2516, 2521), False, 'import numpy\n'), ((2536, 2550), 'numpy.sin', 'numpy.sin', (['azi'], {}), '(azi)\n', (2545, 2550), False, 'import numpy\n'), ((2229, 2247), 'numpy.asarray', 'numpy.asarray', (['img'], {}), '(img)\n', (2242, 2247), False, 'import numpy\n'), ((6325, 6340), 'copy.copy', 'copy', (['patch_arr'], {}), '(patch_arr)\n', (6329, 6340), False, 'from copy import copy\n'), ((7000, 7009), 'pylab.gray', 'pl.gray', ([], {}), '()\n', (7007, 7009), True, 'import pylab as pl\n'), ((7022, 7031), 'pylab.show', 'pl.show', ([], {}), '()\n', (7029, 7031), True, 'import pylab as pl\n')] |
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
from studio.settings.frames import STYLE, THEME_COLOR, AIDED_COLOR
from studio.settings.frames import FIGSIZE, DPI
from studio.frames.camera import Camera
from studio.charting.components.legends import captioning
plt.style.use(STYLE)
def hist_density(datasets, suptitle, title, captions1, caption2):
fig = plt.figure(figsize=FIGSIZE, dpi=DPI)
fig.suptitle(suptitle)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['left'].set_color(AIDED_COLOR)
ax1.spines['bottom'].set_color(AIDED_COLOR)
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_color(AIDED_COLOR)
ax2.spines['bottom'].set_color(AIDED_COLOR)
camera = Camera(fig)
for index in range(len(datasets)):
n = len(datasets[index])
if n < 101: step = int(n/5)
else: step = int(n/10)
for i in range(0, len(datasets[index]), step):
single_histogram(ax1, datasets[index], i+step, title, captions1[index])
single_density(ax2, datasets[index], i+step, title, caption2)
camera.snap()
return camera.animate()
def single_histogram(ax, data, i, title, caption):
max_value = max(data)
min_value = min(data)
bin_width = (max_value - min_value) / float(len(data) - 1)
n_bins = np.arange(min_value, max_value + bin_width, bin_width)
ax.hist(data[:i], n_bins,
linewidth=1.2,
edgecolor=THEME_COLOR,
color=THEME_COLOR,
alpha=0.8)
# captioning
ax, legend = captioning(ax, caption)
ax.set_title(title.format("Histogram"), fontsize=10, loc="left")
# ax.set_xlabel("X")
ax.set_ylabel("Frequency")
ax.tick_params(axis='x', colors=AIDED_COLOR)
ax.tick_params(axis='y', colors=AIDED_COLOR)
return ax
def single_density(ax, data, i, title, caption):
density = scipy.stats.gaussian_kde(data[:i])
x = np.linspace(min(data), max(data), 500)
ax.plot(x, density(x), color=THEME_COLOR)
ax.fill_between(x, density(x), 0, facecolor=THEME_COLOR, alpha=0.5)
# captioning
ax, legend = captioning(ax, caption)
ax.set_title(title.format("Density"), fontsize=10, loc="left")
ax.set_xlabel("X")
ax.set_ylabel("Density")
ax.tick_params(axis='x', colors=AIDED_COLOR)
ax.tick_params(axis='y', colors=AIDED_COLOR)
return ax
| [
"numpy.arange",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"studio.charting.components.legends.captioning",
"studio.frames.camera.Camera"
] | [((285, 305), 'matplotlib.pyplot.style.use', 'plt.style.use', (['STYLE'], {}), '(STYLE)\n', (298, 305), True, 'import matplotlib.pyplot as plt\n'), ((384, 420), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'FIGSIZE', 'dpi': 'DPI'}), '(figsize=FIGSIZE, dpi=DPI)\n', (394, 420), True, 'import matplotlib.pyplot as plt\n'), ((881, 892), 'studio.frames.camera.Camera', 'Camera', (['fig'], {}), '(fig)\n', (887, 892), False, 'from studio.frames.camera import Camera\n'), ((1482, 1536), 'numpy.arange', 'np.arange', (['min_value', '(max_value + bin_width)', 'bin_width'], {}), '(min_value, max_value + bin_width, bin_width)\n', (1491, 1536), True, 'import numpy as np\n'), ((1763, 1786), 'studio.charting.components.legends.captioning', 'captioning', (['ax', 'caption'], {}), '(ax, caption)\n', (1773, 1786), False, 'from studio.charting.components.legends import captioning\n'), ((2325, 2348), 'studio.charting.components.legends.captioning', 'captioning', (['ax', 'caption'], {}), '(ax, caption)\n', (2335, 2348), False, 'from studio.charting.components.legends import captioning\n')] |
import os
from distutils.core import setup, Extension
from distutils.sysconfig import get_config_vars
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy as np
import config
VERBOSE = 0
def path_list(paths):
while ";;" in paths:
paths = paths.replace(";;", ";")
return paths.split(";")
def lib_names(libraries):
names = []
for lib in libraries:
filename = lib.split(os.sep)[-1]
libname = filename.replace("lib", "").replace(".so", "")
names.append(libname)
return names
def strict_prototypes_workaround():
# Workaround to remove '-Wstrict-prototypes' from compiler invocation
(opt,) = get_config_vars('OPT')
os.environ['OPT'] = " ".join(
flag for flag in opt.split() if flag != '-Wstrict-prototypes'
)
if __name__ == "__main__":
strict_prototypes_workaround()
include_dirs = path_list(config.include_dirs)
include_dirs.extend(["..", "../approxik", np.get_include()])
include_dirs = list(set(include_dirs))
# TODO .replace("lib/x86_64-linux-gnu", "lib")?
library_dirs = path_list(config.library_dirs)
library_dirs = list(set(library_dirs))
extra_compile_args = config.extra_compile_args.strip().split(" ")
extra_compile_args = list(set(extra_compile_args))
if VERBOSE >= 1:
print("=== Library directories:")
print(library_dirs)
print("=== Libraries:")
print(libraries)
print("=== Extra compile args:")
print(extra_compile_args)
libraries = lib_names(path_list(config.libraries))
extension = Extension(
name="approxik",
sources=["approxik.pyx"],
include_dirs=include_dirs,
libraries=libraries,
library_dirs=library_dirs,
define_macros=[("NDEBUG", '1')],
language="c++",
extra_compile_args=extra_compile_args +
["-Wno-cpp", "-Wno-unused-function"])
setup(ext_modules=cythonize(extension))
| [
"Cython.Build.cythonize",
"distutils.sysconfig.get_config_vars",
"config.extra_compile_args.strip",
"numpy.get_include",
"distutils.core.Extension"
] | [((688, 710), 'distutils.sysconfig.get_config_vars', 'get_config_vars', (['"""OPT"""'], {}), "('OPT')\n", (703, 710), False, 'from distutils.sysconfig import get_config_vars\n'), ((1609, 1877), 'distutils.core.Extension', 'Extension', ([], {'name': '"""approxik"""', 'sources': "['approxik.pyx']", 'include_dirs': 'include_dirs', 'libraries': 'libraries', 'library_dirs': 'library_dirs', 'define_macros': "[('NDEBUG', '1')]", 'language': '"""c++"""', 'extra_compile_args': "(extra_compile_args + ['-Wno-cpp', '-Wno-unused-function'])"}), "(name='approxik', sources=['approxik.pyx'], include_dirs=\n include_dirs, libraries=libraries, library_dirs=library_dirs,\n define_macros=[('NDEBUG', '1')], language='c++', extra_compile_args=\n extra_compile_args + ['-Wno-cpp', '-Wno-unused-function'])\n", (1618, 1877), False, 'from distutils.core import setup, Extension\n'), ((981, 997), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (995, 997), True, 'import numpy as np\n'), ((1213, 1246), 'config.extra_compile_args.strip', 'config.extra_compile_args.strip', ([], {}), '()\n', (1244, 1246), False, 'import config\n'), ((1964, 1984), 'Cython.Build.cythonize', 'cythonize', (['extension'], {}), '(extension)\n', (1973, 1984), False, 'from Cython.Build import cythonize\n')] |
import numpy as np
import gym
from gym import spaces
from collections import deque
class FramesStack(gym.Wrapper):
def __init__(self, env, num_stack=4):
super(FramesStack, self).__init__(env)
self.num_stack = num_stack
self.obs_shape = self.env.observation_space.shape
self._buffer = deque([], maxlen=self.num_stack)
self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8,
shape=(self.num_stack * self.obs_shape[0],) + self.obs_shape[1:])
def _get_observation(self):
assert len(self._buffer) == self.num_stack
return LazyFrames(list(self._buffer))
def reset(self, **kwargs):
observation, reward, done, info = self.env.reset(**kwargs)
null_observation = np.zeros(self.obs_shape, dtype=np.uint8)
for _ in range(self.num_stack - 1):
self._buffer.append(null_observation)
self._buffer.append(observation)
# reward = None
# done = None
# info = {
# "reward_run": None,
# "reward_ctrl": None,
# "state": None
# }
return (self._get_observation(), reward, done, info)
def step(self, action, **kwargs):
observation, reward, done, info = self.env.step(action, **kwargs)
self._buffer.append(observation)
return (self._get_observation(), reward, done, info)
class RollAxisObservationWrapper(gym.ObservationWrapper):
def __init__(self, env):
super(RollAxisObservationWrapper, self).__init__(env)
obs_shape = self.env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8,
shape=(obs_shape[-1],) + obs_shape[:-1])
def observation(self, observation):
return np.rollaxis(observation, axis=2)
def reset(self):
observation, reward, done, info = self.env.reset()
return self.observation(observation), reward, done, info
class LazyFrames(object):
def __init__(self, frames):
self._frames = frames
self._out = None
@property
def out(self):
if self._out is None:
self._out = np.concatenate(list(map(lambda frame: np.expand_dims(frame, axis=0), self._frames)), axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self.out
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
if self._frames is None:
return len(self.out)
else:
return len(self._frames)
| [
"collections.deque",
"numpy.rollaxis",
"gym.spaces.Box",
"numpy.zeros",
"numpy.expand_dims"
] | [((321, 353), 'collections.deque', 'deque', (['[]'], {'maxlen': 'self.num_stack'}), '([], maxlen=self.num_stack)\n', (326, 353), False, 'from collections import deque\n'), ((387, 501), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'dtype': 'np.uint8', 'shape': '((self.num_stack * self.obs_shape[0],) + self.obs_shape[1:])'}), '(low=0, high=255, dtype=np.uint8, shape=(self.num_stack * self.\n obs_shape[0],) + self.obs_shape[1:])\n', (397, 501), False, 'from gym import spaces\n'), ((765, 805), 'numpy.zeros', 'np.zeros', (['self.obs_shape'], {'dtype': 'np.uint8'}), '(self.obs_shape, dtype=np.uint8)\n', (773, 805), True, 'import numpy as np\n'), ((1627, 1715), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'dtype': 'np.uint8', 'shape': '((obs_shape[-1],) + obs_shape[:-1])'}), '(low=0, high=255, dtype=np.uint8, shape=(obs_shape[-1],) +\n obs_shape[:-1])\n', (1637, 1715), False, 'from gym import spaces\n'), ((1780, 1812), 'numpy.rollaxis', 'np.rollaxis', (['observation'], {'axis': '(2)'}), '(observation, axis=2)\n', (1791, 1812), True, 'import numpy as np\n'), ((2199, 2228), 'numpy.expand_dims', 'np.expand_dims', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (2213, 2228), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: bstarly
"""
import scipy
import matplotlib.pyplot as plt
from scipy.fft import fft
import numpy as np
import pandas as pd
signal_length = 20 #[ seconds ]
def calc_euclidean(x, y):
return np.sqrt(np.sum((x - y) ** 2))
def calc_mape(x,y):
return np.mean(np.abs((x - y) / x))
plt.figure(figsize=(12, 10), dpi=300)
#read signal from a single file
dataf = pd.read_csv(f'./VF2_1/test0_no tool.csv')
y = dataf['3'].to_list()
sample_rate = len(y)/signal_length # sampling rate [Hz]
dt = 1.0/ sample_rate # time between two samples [s]
df = 1/ signal_length # frequency between points in frequency domain [Hz]
t = np.arange(0, signal_length , dt) #the time vector
n_t=len(t) # length of time vector
# plot input data y against time
plt.subplot (1, 1, 1)
plt.plot (t,y, label ='input data ')
plt.xlabel ('time [s]')
plt.ylabel ('signal ')
# read signal from multiple files
for i in range (1,8):
dataf = pd.read_csv(f'test{str(i)}.csv')
y = dataf['3'].to_list()
sample_rate = len(y)/signal_length # sampling rate [Hz]
dt = 1.0/ sample_rate # time between two samples [s]
df = 1/ signal_length # frequency between points in frequency domain [Hz]
t = np.arange(0, signal_length , dt) #the time vector
n_t=len(t) # length of time vector
# plot input data y against time
plt.subplot (7, 1, i)
plt.plot (t,y, label ='input data ')
plt.xlabel ('time [s]')
plt.ylabel ('signal ')
plt.show() #and display plot on screen
#FIND EUCLIDEAN AND MAPE SCORES between reference and test
colnames=['TIME', 'X', 'Y', 'Z', 'Avg']
refDF = pd.read_csv(f'test1.csv', names=colnames, skiprows=1)
size = refDF.shape[0]
s1 = refDF['Avg'][:size]
for i in range (2,8):
dataf = pd.read_csv(f'test{str(i)}.csv', names=colnames, skiprows=1)
s2 = dataf['Avg'][:size]
euc_dist = calc_euclidean(s1, s2)
mape_dist = calc_mape(s1, s2)
if i!=2:
pct_euc_change = abs(euc_dist - prev_euc_dist) / prev_euc_dist
pct_mape_change = abs(mape_dist - prev_mape_dist) / prev_mape_dist
else:
pct_mape_change = 0
pct_euc_change = 0
print(f" Test {i}: Euclidean= {euc_dist}, %change={pct_euc_change} and MAPE = {mape_dist}, %change = {pct_mape_change}")
prev_mape_dist = mape_dist
prev_euc_dist = euc_dist
| [
"numpy.abs",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((325, 362), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)', 'dpi': '(300)'}), '(figsize=(12, 10), dpi=300)\n', (335, 362), True, 'import matplotlib.pyplot as plt\n'), ((405, 446), 'pandas.read_csv', 'pd.read_csv', (['f"""./VF2_1/test0_no tool.csv"""'], {}), "(f'./VF2_1/test0_no tool.csv')\n", (416, 446), True, 'import pandas as pd\n'), ((664, 695), 'numpy.arange', 'np.arange', (['(0)', 'signal_length', 'dt'], {}), '(0, signal_length, dt)\n', (673, 695), True, 'import numpy as np\n'), ((783, 803), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (794, 803), True, 'import matplotlib.pyplot as plt\n'), ((805, 840), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y'], {'label': '"""input data """'}), "(t, y, label='input data ')\n", (813, 840), True, 'import matplotlib.pyplot as plt\n'), ((842, 864), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (852, 864), True, 'import matplotlib.pyplot as plt\n'), ((866, 887), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""signal """'], {}), "('signal ')\n", (876, 887), True, 'import matplotlib.pyplot as plt\n'), ((1494, 1504), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1502, 1504), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1696), 'pandas.read_csv', 'pd.read_csv', (['f"""test1.csv"""'], {'names': 'colnames', 'skiprows': '(1)'}), "(f'test1.csv', names=colnames, skiprows=1)\n", (1654, 1696), True, 'import pandas as pd\n'), ((1232, 1263), 'numpy.arange', 'np.arange', (['(0)', 'signal_length', 'dt'], {}), '(0, signal_length, dt)\n', (1241, 1263), True, 'import numpy as np\n'), ((1363, 1383), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(7)', '(1)', 'i'], {}), '(7, 1, i)\n', (1374, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1389, 1424), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y'], {'label': '"""input data """'}), "(t, y, label='input data ')\n", (1397, 1424), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1452), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (1440, 1452), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""signal """'], {}), "('signal ')\n", (1468, 1479), True, 'import matplotlib.pyplot as plt\n'), ((241, 261), 'numpy.sum', 'np.sum', (['((x - y) ** 2)'], {}), '((x - y) ** 2)\n', (247, 261), True, 'import numpy as np\n'), ((303, 322), 'numpy.abs', 'np.abs', (['((x - y) / x)'], {}), '((x - y) / x)\n', (309, 322), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
import argparse
import numpy as np
from pandas import read_csv
from gmeterpy.corrections.vgrad import fit_floating_gravity
from gmeterpy.plotting.vgfit import plot_fit
from gmeterpy.corrections.vgrad import generate_report
parser = argparse.ArgumentParser(description='Second-order polynomial fit of the vertical gravity gradients')
parser.add_argument('-i', metavar='in-file', type=argparse.FileType('rt'), required=True)
parser.add_argument('-n', '--name', default='')
#parser.add_argument('--plot', dest='plot', action='store_true')
#parser.add_argument('--no-plot', dest='plot', action='store_false')
#parser.set_defaults(plot=True)
opt = parser.parse_args()
data = read_csv(opt.i)
df, res = fit_floating_gravity(data, deg=2)
a, b = res.params[-2], res.params[-1]
se_a = res.bse[-2]
se_b = res.bse[-1]
covab = res.cov_params()['a']['b']
df['resid'] = res.resid
gp = np.poly1d([b, a, 0])
make_plot = True
make_report = True
station = opt.name
if make_plot:
h_min = min(df.h)
al = (gp(h_min) - gp(1.0)) / (h_min - 1.0)
fig = plot_fit(df, lambda x: gp(x) - al*x, (se_a, se_b, covab), station)
title = '{} ({:.1f} $\mu Gal/m$ substructed)'.format(station, al)
fig.gca().set_title(title, fontsize=14)
plot_file = station + '_all.png'
fig.savefig(plot_file)
if make_report:
report_file = 'vg_' + station + '_fit_all.txt'
generate_report(report_file, data, res, gp, (se_a, se_b, covab), station)
| [
"argparse.FileType",
"argparse.ArgumentParser",
"pandas.read_csv",
"gmeterpy.corrections.vgrad.fit_floating_gravity",
"numpy.poly1d",
"gmeterpy.corrections.vgrad.generate_report"
] | [((275, 380), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Second-order polynomial fit of the vertical gravity gradients"""'}), "(description=\n 'Second-order polynomial fit of the vertical gravity gradients')\n", (298, 380), False, 'import argparse\n'), ((716, 731), 'pandas.read_csv', 'read_csv', (['opt.i'], {}), '(opt.i)\n', (724, 731), False, 'from pandas import read_csv\n'), ((743, 776), 'gmeterpy.corrections.vgrad.fit_floating_gravity', 'fit_floating_gravity', (['data'], {'deg': '(2)'}), '(data, deg=2)\n', (763, 776), False, 'from gmeterpy.corrections.vgrad import fit_floating_gravity\n'), ((919, 939), 'numpy.poly1d', 'np.poly1d', (['[b, a, 0]'], {}), '([b, a, 0])\n', (928, 939), True, 'import numpy as np\n'), ((1409, 1482), 'gmeterpy.corrections.vgrad.generate_report', 'generate_report', (['report_file', 'data', 'res', 'gp', '(se_a, se_b, covab)', 'station'], {}), '(report_file, data, res, gp, (se_a, se_b, covab), station)\n', (1424, 1482), False, 'from gmeterpy.corrections.vgrad import generate_report\n'), ((426, 449), 'argparse.FileType', 'argparse.FileType', (['"""rt"""'], {}), "('rt')\n", (443, 449), False, 'import argparse\n')] |
'''
<NAME>
A recursive descent parser for STL files
'''
import re
import numpy
__all__ = ['load', 'ParseError']
class ParseError(Exception):
def __init__(self, line, value):
super(ParseError, self).__init__('line %d, %s' % (line, value))
self.line = line
self.value = value
def enumerate_char_from_file(in_file, buffer_size = 4096):
while True:
chunk = in_file.read(buffer_size)
for c in chunk:
yield c
if len(chunk) < buffer_size:
return
def tokenize(in_file):
line_id = 1
token = None
for c in enumerate_char_from_file(in_file):
if c == '\n':
line_id += 1
if c.isspace():
if token is not None:
yield line_id, ''.join(token)
token = None
else:
if token is None:
token = [c]
else:
token.append(c)
if token is not None:
yield line_id, ''.join(token)
keyword_set = frozenset(['solid', 'facet', 'normal', 'outer', 'loop', 'vertex', 'endloop', 'endfacet', 'endsolid'])
def enumerate_lexeme(in_file):
float_point_re = re.compile(r'[-+]?[0-9]*\.?[0-9]+(\e[-+]?[0-9]+)?')
for line_id, token in tokenize(in_file):
float_point_match = float_point_re.match(token)
if token in keyword_set:
yield line_id, token, token
elif float_point_match and len(float_point_match.group(0)) == len(token):
yield line_id, token, float(token)
else:
yield line_id, token, token
class Parser(object):
def __init__(self, lexer):
self.lexer = lexer
self.symbol = None
self.line_id = None
def next(self):
line_id, token, value = self.lexer.next()
self.line_id = line_id
self.lookahead = (token, value)
def accept(self, symbol = None):
if symbol in keyword_set:
return self.lookahead[0] == symbol
elif symbol is str:
return True
elif symbol is float:
return isinstance(self.lookahead[1], float)
return True
def consume(self, symbol = None):
if not self.accept(symbol):
raise ParseError(self.line_id, 'unexpected symbol "%s", excepted "%s"' % (self.lookahead[0], symbol))
def parse_vector(self):
ret = numpy.zeros(3)
for i in range(3):
self.next()
self.consume(float)
ret[i] = self.lookahead[1]
return ret
def parse_triangle(self):
self.consume('outer')
self.next()
self.consume('loop')
self.next()
vertex_list = numpy.zeros((3, 3))
for i in range(3):
vertex_list[i] = self.parse_vertex()
self.next()
self.consume('endloop')
return vertex_list
def parse_vertex(self):
self.consume('vertex')
return self.parse_vector()
def parse_normal(self):
self.consume('normal')
return self.parse_vector()
def parse_facet(self):
self.consume('facet')
self.next()
if self.accept('normal'):
normal = self.parse_normal()
self.next()
vertex_list = self.parse_triangle()
elif self.accept('outer'):
vertex_list = self.parse_triangle()
normal = self.parse_normal()
self.next()
self.consume('endfacet')
return vertex_list, normal
def parse_facet_list(self):
while True:
if self.accept('facet'):
yield self.parse_facet()
try:
self.next()
except StopIteration:
raise ParseError(self.line_id, 'unexpected end of file, excepted "endsolid"')
else:
self.accept('endsolid')
return
def parse_mesh(self):
self.next()
self.consume('solid')
self.next()
if self.accept('facet'):
name = None
else:
name = self.lookahead[1]
self.next()
for vertex_list, normal in self.parse_facet_list():
yield vertex_list, normal
self.consume('endsolid')
if name is not None:
self.consume()
def load(in_file):
parser = Parser(enumerate_lexeme(in_file))
for vertex_list, normal in parser.parse_mesh():
yield vertex_list, normal
| [
"numpy.zeros",
"re.compile"
] | [((995, 1047), 're.compile', 're.compile', (['"""[-+]?[0-9]*\\\\.?[0-9]+(\\\\e[-+]?[0-9]+)?"""'], {}), "('[-+]?[0-9]*\\\\.?[0-9]+(\\\\e[-+]?[0-9]+)?')\n", (1005, 1047), False, 'import re\n'), ((2018, 2032), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (2029, 2032), False, 'import numpy\n'), ((2255, 2274), 'numpy.zeros', 'numpy.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2266, 2274), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Import Modules
################################################################################
from UDFManager import UDFManager
import os
import sys
import math
import cmath
import numpy as np
import platform
import subprocess
import scipy.signal as signal
#
import CognacUtility as CU
from CognacBasicAnalysis import *
from CognacGeometryAnalysis import CognacGeometryAnalysis
################################################################################
################################################################################
def evaluate_nw():
target = file_select()
#
calc_cond, chain_list = make_chain_list(target)
# ポリマー鎖関連の特性情報を計算
ec = EvaluateChain(calc_cond, chain_list, target)
ec.eval_chain()
return
##############################
# 対象となる udf ファイルを選択
def file_select():
param = sys.argv
if len(param) == 1:
print("usage: python", param[0], "Honya_out.udf")
exit(1)
elif not os.access(param[1],os.R_OK):
print(param[1], "not exists.")
exit(1)
else:
target = param[1]
return target
# 計算条件から、ホモポリマーとネットワークを判断し、chain_list を読み出す。
def make_chain_list(target):
# 計算対象の条件を読み取る
if not os.access('target_condition.udf', os.R_OK):
print("'target_condition.udf' is not exists.")
exit(1)
else:
cond_u = UDFManager('target_condition.udf')
nw_type = cond_u.get('TargetCond.Model.TargetModel')
func = cond_u.get('TargetCond.NetWork.N_Strands')
n_seg = cond_u.get('TargetCond.NetWork.N_Segments')
l_bond= cond_u.get('SimulationCond.l_bond')
cn = cond_u.get('TargetCond.Strand.Characteristic_Ratio')
nu = cond_u.get('TargetCond.System.Nu')
calc_cond = [n_seg, l_bond, cn, func, nu, nw_type]
#
sc = SelectChain(target)
if len(calc_cond) == 3:
# ホモポリマーのリストを作成
end_list, chain_list = sc.make_chain_list()
elif len(calc_cond) == 6:
# ネットワークストランドのリストを作成
# ss = Init_Strand_Select(target)
jp_list, jp_pair_list, chain_list = sc.make_strand_list()
return calc_cond, chain_list
##############################################################################
class SelectChain:
def __init__(self, target):
self.uobj = UDFManager(target)
# ##########################
# # ホモポリマーのリストを作成
# def make_chain_list(self):
# atom_list = self.uobj.get("Set_of_Molecules.molecule[].atom[]")
# self.uobj.jump(-1)
# chain_list = []
# end_list = []
# for i, target_chain in enumerate(atom_list):
# tmp = []
# for j, atom in enumerate(target_chain):
# tmp.append(j)
# end_list.append([i, [tmp[0], tmp[-1]]])
# chain_list.append([i, tmp])
# return end_list, chain_list
########################################
# 架橋点およびストランドの構成アトムのリスト
def make_strand_list(self):
jp_list = self.make_jp_list()
#
jp_pair_list = []
strand_list = []
for target_jp in jp_list:
jp_pair, strand = self.make_jp_pair(target_jp, jp_list)
for i in jp_pair:
jp_pair_list.append(i)
if len(strand) > 0:
for i in strand:
strand_list.append(i)
return jp_list, jp_pair_list, strand_list
# 架橋点のリストを作成
def make_jp_list(self):
self.uobj.jump(-1)
jp_list = []
#
mols = self.uobj.get("Set_of_Molecules.molecule[]")
for i, mol in enumerate(mols):
for j, atom in enumerate(mol[1]):
tmp = []
if atom[1] == 'JP_A' or atom[1] == 'JP_B':
jp_list.append([i, j])
return jp_list
# 架橋点どうしのペアを作成
def make_jp_pair(self, target_jp, jp_list):
molecule = target_jp[0]
start_jp = target_jp[1]
jp_pair = []
strand = []
bonds = self.uobj.get("Set_of_Molecules.molecule[].bond[]")
tmp_bonds = bonds[molecule]
#
for i, bond in enumerate(tmp_bonds):
tmp = []
if ((bond[1] == start_jp) or (bond[2] == start_jp)) and (i < len(tmp_bonds) - 1):
if bond[1] == start_jp:
adj = bond[2]
else:
adj = bond[1]
tmp.append(start_jp)
tmp.append(adj)
tmp_id = i + 1
while tmp_bonds[tmp_id][0] == "bond_Strand":
if tmp_bonds[tmp_id][1] == adj:
adj = tmp_bonds[tmp_id][2]
elif tmp_bonds[tmp_id][2] == adj:
adj = tmp_bonds[tmp_id][1]
tmp.append(adj)
tmp_id += 1
#
if tmp_bonds[tmp_id][1] == adj:
end_jp = tmp_bonds[tmp_id][2]
elif tmp_bonds[tmp_id][2] == adj:
end_jp = tmp_bonds[tmp_id][1]
if len(tmp)>2:
tmp.append(end_jp)
jp_pair.append([molecule, [start_jp, end_jp]])
strand.append([molecule, tmp])
return jp_pair, strand
###############################################################################
###############################################################################
class EvaluateChain:
def __init__(self, calc_cond, chain_list, target):
self.calc_cond = calc_cond
self.n_seg = int(calc_cond[0])
self.l_bond = float(calc_cond[1])
#
self.chain_list = chain_list
#
self.target = target
self.target_name = target.split('.')[0]
self.uobj = UDFManager(target)
def eval_chain(self):
bond_list = []
angle_list = []
Rx_list = []
Ry_list = []
Rz_list = []
R_list = []
#
gr_list = []
cn_list = []
#
if self.target.split('_')[0] == 'GK':
self.calc_gk()
#
# gt2 = modify(data_list)
#
return
else:
rec_size = self.uobj.totalRecord()
for rec in range(1, rec_size):
print("Reading Rec=", rec, '/', rec_size)
bond, angle, e2e_x, e2e_y, e2e_z, e2e, r2, gr, cn = self.read_chain(rec)
bond_list.extend(bond)
angle_list.extend(angle)
Rx_list.extend(e2e_x)
Ry_list.extend(e2e_y)
Rz_list.extend(e2e_z)
R_list.extend(e2e)
#
gr_list.append(gr)
cn_list.append(cn)
# 鎖に沿ったセグメント間距離の平均を計算
cn_ave, cn_part = self.calc_cn(cn_list)
#
self.make_output(bond_list, angle_list, Rx_list, Ry_list, Rz_list, R_list, gr_list, cn_list, cn_ave, cn_part)
return
####################################################################################
# Green Kubo での計算を処理
def calc_gk(self):
corr, corr_all = self.calc_corr()
mm = MakeMulti(["Corr_stress", corr, ['Time', 'sigma']], self.target_name)
mm.make_all()
mm = MakeMulti(["Corr_stress_all", corr_all, ['Time', 'sigma', 'ave']], self.target_name)
mm.make_all()
# mm = MakeMulti(["Corr_stress_semi", corr, ['Time', 'sigma']], self.target_name)
# mm.make_all()
self.irheo(corr)
# mm = MakeMulti(["Corr_stress_mod", corr_mod, ['Time', 'sigma']], self.target_name)
# mm.make_all()
return
def calc_corr(self):
self.uobj.jump(self.uobj.totalRecord() -1)
#
vol = self.uobj.get('Statistics_Data.Volume.Total_Average')
corr_all = self.uobj.get('Correlation_Functions.Stress.Correlation[]')
corr = []
prev = 0.
for data in corr_all:
time = data[0]
ave = vol*np.average(np.array(data[2:]))
if data[1] > 0:
g = data[1]
prev = data[1]
else:
g = prev
# g_mod = signal.savgol_filter(g, 3, 2)
corr.append([time, g, ave])
# corr_mod = np.stack([time, g_mod], 1)
return corr, corr_all
##################################
#
def irheo(self, data_list):
minmax = [1e-5, 1e2]
div = 10
#
# mod = self.modify(data_list)
# gt, mod_gt = self.modify_data(mod)
#
gw = self.calcgw(data_list, minmax, div)
self.save_data(gw, 'gw.dat')
#
# self.save_data(data_list, 'modified.dat')
# self.plotgtgw('modified.dat')
# cmd = "corr2gw < modified.dat > gw.dat"
# subprocess.call(cmd, shell=True)
self.plotgtgw('gw.dat')
#
return
def modify_data(self, data_list):
fine_div = 100
#
glist = []
timelist = []
for data in data_list:
time = data[0]
g = data[1]
if time == 0.0:
timelist.append(time)
glist.append(g)
else:
for i in range(1, fine_div + 1):
timelist.append(pre_time + i*(time-pre_time)/fine_div)
glist.append(pre_g + i*(g-pre_g)/fine_div)
pre_time = time
pre_g = g
#
mod_g = signal.savgol_filter(glist, 5, 3)
#
gt = np.stack([timelist, glist], 1)
mod_gt = np.stack([timelist, mod_g], 1)
#
return gt, mod_gt
def calcgw(self, gt, minmax, div):
gw = []
mag = math.log10(minmax[0])
while mag < math.log10(minmax[1]):
for i in range(div):
omega = 10**(mag+i/div)
gstar = self.gs(gt, omega)
gw.append([omega, gstar.real, abs(gstar.imag)])
mag += 1
#
return gw
def gs(self, gt, omega):
gstar = gt[0][1] + (1 - cmath.exp(-1j*omega*gt[1][0]))*(gt[1][1] - gt[0][1])/gt[1][0]/(1j*omega)
for k in range(len(gt) - 2):
gstar += (gt[k+2][1] - gt[k+1][1])*(cmath.exp(-1j*omega*gt[k+1][0]) - cmath.exp(-1j*omega*gt[k+2][0]))/(gt[k+2][0] - gt[k+1][0])/(1j*omega)
#
return gstar
#----- 計算結果をターゲットファイル名で保存
def save_data(self, target, f_data):
with open(f_data,'w') as f:
for line in target:
for data in line:
f.write(str(data) + '\t')
f.write('\n')
return
#----- 結果をプロット
def plotgtgw(self, f_data):
plt = self.make_gtgw(f_data)
#
if platform.system() == "Windows":
subprocess.call([plt], shell=True)
elif platform.system() == "Linux":
subprocess.call(['gnuplot ' + plt], shell=True)
return
# 必要なスクリプトを作成
def make_gtgw(self, f_data):
script = self.gtgw_content(f_data)
plt = f_data.replace('dat', 'plt')
with open(plt, 'w') as f:
f.write(script)
return plt
# スクリプトの中身
def gtgw_content(self, f_data):
out_png = f_data.replace('dat', 'png')
script = 'set term pngcairo font "Arial,14"\n\n'
script += 'set colorsequence classic\n\n'
script += 'data = "' + f_data + '"\n\n'
script += 'set output "' + out_png + '"\n\n'
script += 'set key left\nset size square\n'
script += '#set xrange [1:4]\n#set yrange [0:0.2]\n#set xtics 1\n#set ytics 0.1\n'
if f_data == 'modified.dat' or f_data == 'ave_all_stress.dat':
script += 'set logscale xy\n'
script += 'set format x "10^{%L}" \nset format y "10^{%L}"\n'
script += 'set xlabel "Time"\nset ylabel "Stress"\n'
script += 'plot data u 1:2 axis x1y1 w l lw 2 lt 1 ti "Stress"'
elif f_data == 'gw.dat' or f_data == 'freq_mod.dat':
script += 'set xrange [:1e2]\nset yrange [1e-4:]\nset y2range [1e-1:1e1]\nset y2tics\n'
script += 'set logscale xyy2\n'
script += '# 斜辺の傾きが -2 の三角形の準備\n'
script += 'a = 30; # グラフの中に入るように三角形の高さを調整\n'
script += 'x1=5e-4; x2=1e-3;\n'
script += 'y1=a*x1**(1);y2=a*x2**(1);\n'
script += 'set object 1 polygon from x1,y1 to x2,y1 to x2,y2 to x1,y1 fs empty border\n\n'
script += 'set format x "10^{%L}" \nset format y "10^{%L}"\nset format y2 "10^{%L}"\n'
# script += 'set label 1 sprintf("{/Symbol l} = %.1f", deform) at graph 0.6, 0.9\n\n'
script += 'set xlabel "Frequency"\nset ylabel "G' + "', G''" + '"\nset y2label "tan{/Symbol d}"\n\n'
script += 'plot '
script += 'data u 1:2 w lp lt 1 ti "G' + "'" + '", \\\n'
script += 'data u 1:3 w lp lt 2 ti "G' + "''" + '", \\\n'
script += 'data u 1:($3/$2) axis x1y2 w lp lt 3 ti "tan{/Symbol d}"'
script += '\n\nreset'
return script
def modify(self, data_list):
a = 0.057
tau = 190
fitstart = 500
mod_gt = []
for data in data_list:
time = float(data[0])
g = float(data[1])
if time < fitstart:
# if g > 0:
mod_gt.append([time, g])
else:
break
time = fitstart
while time < 1e5:
tmp = a*np.exp(-time/tau)
if tmp > 1e-10:
mod_gt.append([time, tmp])
time += 10**int(np.log10(time))/100
else:
break
# save_data(mod_gt, 'mod_gt.dat')
return mod_gt
###################################################################
# 鎖に沿ったセグメント間距離の平均を計算
def calc_cn(self, cn_list):
cn_ave = []
cn_part = []
#
l_part = len(cn_list)//10
# データの分割
multi = 0
part_cn = []
tmp = []
for i, part in enumerate(cn_list):
if i < l_part*(multi + 1):
tmp.append(part)
else:
part_cn.append(tmp)
tmp = []
tmp.append(part)
multi += 1
# 各パートごとに平均
for part in part_cn:
tmp = [ [i + 1, 0] for i in range(len(cn_list[0]))]
count = 0
cn_part_ave = []
for data in part:
for i, el in enumerate(data):
tmp[i][1] += el[1]
count += 1
for data in tmp:
cn_part_ave.append([data[0], data[1]/count])
cn_part.append(cn_part_ave)
# パートごとの平均をさらに平均
tmp = [ [i + 1, 0] for i in range(len(cn_list[0]))]
count = 0
for data in cn_part:
for i, el in enumerate(data):
tmp[i][1] += el[1]
count += 1
for data in tmp:
cn_ave.append([data[0], data[1]/count])
return cn_ave, cn_part
def make_output(self, bond_list, angle_list, Rx_list, Ry_list, Rz_list, R_list, gr_list, cn_list, cn_ave, cn_part):
# 結果をヒストグラムで出力
hist_list = [
["bond", bond_list, 200, "True", ['bond length', 'Freq.'], 'box'],
["angle", angle_list, 200, "True", ['angle [deg]', 'Freq.'], 'box'],
["Rx", Rx_list, 200, "True", ['|Rx|', 'Freq.'], [self.n_seg - 1, self.l_bond] ],
["Ry", Ry_list, 200, "True", ['|Ry|', 'Freq.'], [self.n_seg - 1, self.l_bond] ],
["Rz", Rz_list, 200, "True", ['|Rz|', 'Freq.'], [self.n_seg - 1, self.l_bond] ],
["R", R_list, 200, "True", ['|R|', 'Freq.'], [self.n_seg - 1, self.l_bond] ]
]
for cond in hist_list:
mh = MakeHist(cond, self.target_name)
mh.make_hist_all()
# マルチ形式での出力
multi_list = [
["gr", gr_list, ['Distance', 'g(r)']],
["CN", cn_list, ['|i-j|', 'C_{|i-j|}']],
["CN_part", cn_part, ['|i-j|', 'C_{|i-j|}']],
["CN_ave", cn_ave, ['|i-j|', 'C_{|i-j|}']]
]
for cond in multi_list:
mm = MakeMulti(cond, self.target_name)
mm.make_all()
return
# ポリマー鎖関連の特性情報
def read_chain(self, rec):
# 初期化
self.uobj.jump(rec)
self.bound_setup()
CU.setCell(tuple(self.uobj.get("Structure.Unit_Cell.Cell_Size")))
# ステップの数に対応した空リストを作成
r2_ij = [[] for i in range(len(self.chain_list[0][1]))]
#
e2e_x = []
e2e_y = []
e2e_z = []
e2e_list = []
r2_list = []
bond_list = []
cn = []
#
xp = [[] for i in range(len(self.chain_list[0][1]))]
#
ba = CognacBasicAnalysis(self.target, rec)
for chain in self.chain_list:
mol = chain[0]
c_len = len(chain[1])
atom = self.uobj.get("Set_of_Molecules.molecule[].atom[]", [mol, chain[1][2]])[1]
#
for step in range(1, c_len):
for start in range(c_len - step):
if len(self.calc_cond) == 3: # ポリマー鎖の場合
e2e_vec = ba.vector([mol, chain[1][start]], [mol, chain[1][start + step]])
elif len(self.calc_cond) == 6: # ストランドの場合
end1 = tuple(self.uobj.get("Structure.Position.mol[].atom[]", [mol, chain[1][start]]))
end2 = tuple(self.uobj.get("Structure.Position.mol[].atom[]", [mol, chain[1][start + step]]))
e2e_vec = CU.distanceWithBoundary(end1, end2)
e2e_dist = np.linalg.norm(np.array(e2e_vec))
r2 = e2e_dist**2
r2_ij[step].append(r2)
if step == 1:
bond_list.append(e2e_dist)
if step == c_len -1:
e2e_x.append(e2e_vec[0])
e2e_y.append(e2e_vec[1])
e2e_z.append(e2e_vec[2])
#
e2e_list.append(e2e_dist)
r2_list.append(r2)
#
# for p in range(c_len):
# xp[p].append(np.linalg.norm(ba.Xp(mol, p)))
#
# xp_list = []
# for i in range(c_len):
# xp_list.append([i+1, np.average(np.array(xp[i]))])
# print(xp_list)
# gr
cg = CognacGeometryAnalysis(self.target, rec)
gr = cg.gr([atom])
# cn
for i in range(1, len(r2_ij)):
cn.append([i, np.average(np.array(r2_ij[i]))/(i*self.l_bond**2)])
# angle
anglename = self.uobj.get("Molecular_Attributes.Angle_Potential[].Name")
tmp = np.array(ba.angle(anglename[0]))
angle_list = list(tmp[~np.isnan(tmp)])
# print(cn[:3])
# print(r2_list[:3])
return bond_list, angle_list, e2e_x, e2e_y, e2e_z, e2e_list, r2_list, gr, cn
# 周期境界条件の設定
def bound_setup(self):
axis = self.uobj.get("Simulation_Conditions.Boundary_Conditions")
boundarylist = [0,0,0]
#
for i in range(0,3):
if axis[i] == "NONE" :
boundarylist[i] = 0
elif axis[i] == "PERIODIC" :
boundarylist[i] = 1
elif axis[i] == "REFLECTIVE1" :
boundarylist[i] = 2
elif axis[i] == "REFLECTIVE2" :
boundarylist[i] = 3
CU.setBoundary(tuple(boundarylist))
return
##############################################################################################
##############################################################################################
class MakeHist:
def __init__(self, cond_list, target_name):
# cond_list = [base_name, data_list, bins, normalize, Legend, option]
self.list = cond_list[1]
self.bins = cond_list[2]
self.dir = os.path.join(target_name, cond_list[0])
self.base = cond_list[0]
self.norm = cond_list[3]
#
self.f_dat = cond_list[0] + "_hist.dat"
self.f_plt = cond_list[0] + ".plt"
self.f_png = cond_list[0] + ".png"
self.leg = cond_list[4]
self.option = cond_list[5]
# ヒストグラムのグラフの作成
def make_hist_all(self):
# ヒストグラムのデータ作成
bin_width, hist_data = self.make_hist_data()
# ヒストグラムのデータを書き出し
self.write_data(hist_data, bin_width)
# グラフを作成
self.make_graph(bin_width)
return
# ヒストグラムのデータ作成
def make_hist_data(self):
# ヒストグラムを作成
weights = np.ones(len(self.list))/float(len(self.list))
if self.norm:
val, x = np.histogram(self.list, bins=self.bins, weights= weights)
else:
val, x = np.histogram(self.list, bins=self.bins)
# グラフ用にデータを変更
bin_width = (x[1]-x[0])
mod_x = (x + bin_width/2)[:-1]
hist_data = np.stack([mod_x, val], axis = 1)
return bin_width, hist_data
# ヒストグラムのデータを書き出し
def write_data(self, hist_data, bin_width):
os.makedirs(self.dir, exist_ok=True)
with open(os.path.join(self.dir, self.f_dat), 'w') as f:
f.write("# Histgram data:\n\n")
for line in hist_data:
f.write(str(line[0]) + '\t' + str(line[1]) + '\n')
return
# グラフを作成
def make_graph(self, bin_width):
self.make_script(bin_width)
cwd = os.getcwd()
os.chdir(self.dir)
if platform.system() == "Windows":
subprocess.call(self.f_plt, shell=True)
elif platform.system() == "Linux":
subprocess.call('gnuplot ' + self.f_plt, shell=True)
os.chdir(cwd)
return
# 必要なスクリプトを作成
def make_script(self, bin_width):
with open(os.path.join(self.dir, self.f_plt), 'w') as f:
script = self.script_content(bin_width)
f.write(script)
return
# スクリプトの中身
def script_content(self, bin_width):
script = 'set term pngcairo font "Arial,14" \nset colorsequence classic \n'
#
script += '# \ndata = "' + self.f_dat + '" \nset output "' + self.f_png + ' "\n'
#
script += '#\nset size square\n#set xrange [0:]\n#set yrange [0:100]\n'
script += '#\nset xlabel "' + self.leg[0] + '"\nset ylabel "' + self.leg[1] + '"\n\n'
#
if self.base == "Rx" or self.base == "Ry" or self.base == "Rz":
if type(self.option) == list:
n_seg = self.option[0]
bond = self.option[1]
script += 'N = ' + str(n_seg) + '\n'
script += 'bond = ' + str(bond) + '\n'
script += 'CN=1.7\n'
script += 'C=1\n\n'
#
script += 'f(x) = C*(3/(2*pi*N*CN*bond**2))**(3/2)*exp(-3*x**2/(2*N*CN*bond**2))\n\n'
script += 'fit f(x) data via C, CN\n\n'
script += '#\nset label 1 sprintf("C_N=%.3f", CN) at graph 0.7, 0.8\n\n'
#
script += 'set style fill solid 0.5\nset boxwidth ' + str(bin_width) + '\n'
script += '#\nplot data w boxes noti'
script += ', \\\n f(x)'
#
if self.base == "R":
if (type(self.option) == list) and len(self.option) == 4:
n_seg = self.option[0]
bond = self.option[1]
cn = self.option[2]
func = self.option[3]
elif (type(self.option) == list) and len(self.option) == 2:
n_seg = self.option[0]
bond = self.option[1]
cn = 1.7
func = 0
else:
n_seg = 39
bond = 0.97
cn = 1.7
func = 4
script += 'N = ' + str(n_seg) + '\n'
script += 'bond = ' + str(bond) + '\n'
script += 'CN = ' + str(cn) + '\n'
script += 'f = ' + str(func) + '\n'
script += 'C = 0.02\n\n'
script += 'f(x, CN) = C*4.*pi*x**2.*(3./(2.*pi*N*CN*bond**2.))**(3./2.)*exp(-3.*x**2./(2.*N*CN*bond**2.))\n'
script += 'fit f(x, CN) data via CN, C\n\n'
script += '#\nset label 1 sprintf("C_N=%.3f", CN) at graph 0.7, 0.8\n\n'
script += 'set style fill solid 0.5\nset boxwidth ' + str(bin_width) + '\n'
script += '#\nplot data w boxes noti'
script += ', \\\n f(x, CN)'
#
if self.base == "angle":
if self.option != "box":
script += 'plot data u 1:($2/(3.142*sin(3.142*$1/180))) w l noti'
else:
script += 'set style fill solid 0.5\nset boxwidth ' + str(bin_width) + '\n'
script += 'plot data u 1:($2/(3.142*sin(3.142*$1/180))) w boxes noti'
#
if self.base == "bond":
script += 'set style fill solid 0.5\nset boxwidth ' + str(bin_width) + '\n'
script += '#\nplot data w boxes noti'
#
elif self.option == "box":
script += 'set style fill solid 0.5\nset boxwidth ' + str(bin_width) + '\n'
script += '#\nplot data w boxes noti'
return script
#############################################################################################
class MakeMulti:
def __init__(self, cond_list, target_name):
# cond_list = [base_name, data_list, Legend]
self.list = cond_list[1]
self.dir = os.path.join(target_name, cond_list[0])
self.base = cond_list[0]
self.repeat = len(cond_list[1])
#
self.f_dat = cond_list[0] + ".dat"
self.f_plt = cond_list[0] + ".plt"
self.f_png = cond_list[0] + ".png"
self.leg = cond_list[2]
############################################################
# マルチリストのグラフの作成
def make_all(self):
# データを書き出し
self.write_data()
# グラフを作成
self.make_graph()
return
##############################
# データを書き出し
def write_data(self):
os.makedirs(self.dir, exist_ok=True)
with open(os.path.join(self.dir, self.f_dat), 'w') as f:
f.write("# data:\n")
if self.base == 'CN_ave' or self.base == 'Corr_stress' or self.base == 'Corr_stress_semi' or self.base == 'Corr_stress_mod' or self.base == 'Corr_stress_all':
for line in self.list:
for data in line:
f.write(str(data) + '\t')
f.write('\n')
else:
for i, data in enumerate(self.list):
f.write("\n\n# " + str(i) +":\n\n")
for line in data:
# print(line)
f.write(str(line[0]) + '\t' + str(line[1]) + '\n')
return
###########################################################
# グラフを作成
def make_graph(self):
self.make_script()
cwd = os.getcwd()
os.chdir(self.dir)
if platform.system() == "Windows":
subprocess.call(self.f_plt, shell=True)
elif platform.system() == "Linux":
subprocess.call('gnuplot ' + self.f_plt, shell=True)
os.chdir(cwd)
return
#######################
# 必要なスクリプトを作成
def make_script(self):
with open(os.path.join(self.dir, self.f_plt), 'w') as f:
script = self.script_content()
f.write(script)
return
#################
# スクリプトの中身
def script_content(self):
script = 'set term pngcairo font "Arial,14" \nset colorsequence classic \n'
#
script += '# \ndata = "' + self.f_dat + '" \nset output "' + self.f_png + ' "\n'
#
script += '#\nset size square\n#set xrange [1:]\n#set yrange [1:]\n'
script += '#\nset xlabel "' + self.leg[0] + '"\nset ylabel "' + self.leg[1] + '"\n\n'
#
if self.base == "CN" or self.base == "CN_ave" or self.base == "CN_part":
script += '#\nset xrange [1:]\nset yrange [1:]\n'
script += 'set key bottom\n\n'
script += 'ct = 0.274\n'
script += "f(x) = (1+ct)/(1-ct) -(2*ct*(1-ct**x))/(1-ct)**2/x\n\n"
script += 'plot '
if self.base == "CN":
for i in range(self.repeat):
script += 'data ind ' + str(i) + ' w l lc ' + str(i) + ' noti, \\\n'
elif self.base == "CN_part":
for i in range(self.repeat):
script += 'data ind ' + str(i) + ' w l lc ' + str(i) + ' ti "part:' + str(i) + '", \\\n'
else:
script += 'data w l ti "averaged", \\\n'
script += 'f(x) w l lw 2 ti "FreeRotationalModel"'
elif self.base == 'Corr_stress' or self.base == 'Corr_stress_mod':
script += 'set logscale xy \n\nset format x "10^{%L}" \nset format y "10^{%L}"\n\n'
script += 'plot '
script += 'data w l ti "Stress" \\\n'
elif self.base == 'Corr_stress_semi':
script += 'set logscale y \n\n#set format x "10^{%L}" \nset format y "10^{%L}"\n\n'
script += 'a = 1\ntau =1000\n\ns = 100\ne = 1000\n\n'
script += 'f(x) = a*exp(-1*x/tau) \n'
script += 'fit [s:e] f(x) data usi 1:2 via a,tau\n\n'
script += 'set label 1 sprintf("Fitted \\nA = %.1e \\n{/Symbol t} = %.1e \\nFitting Region: %d to %d", a, tau, s, e) at graph 0.35, 0.75\n\n'
script += 'plot '
script += 'data w l ti "Stress", \\\n'
script += '[s:e] f(x) noti'
elif self.base == 'Corr_stress_all':
script += 'set logscale xy \n\nset format x "10^{%L}" \nset format y "10^{%L}"\n\n'
script += 'plot data u 1:2 w l ti "G_t", \\\n'
script += 'data u 1:3 w l ti "xy", \\\n'
script += 'data u 1:4 w l ti "yz", \\\n'
script += 'data u 1:5 w l ti "zx", \\\n'
script += 'data u 1:6 w l ti "xx-yy", \\\n'
script += 'data u 1:7 w l ti "yy-zz"'
else:
script += 'plot '
for i in range(self.repeat):
script += 'data ind ' + str(i) + ' w l lc ' + str(i) + 'noti, \\\n'
return script
#######################################################
| [
"numpy.log10",
"scipy.signal.savgol_filter",
"numpy.array",
"cmath.exp",
"math.log10",
"numpy.histogram",
"numpy.exp",
"numpy.stack",
"platform.system",
"subprocess.call",
"CognacUtility.distanceWithBoundary",
"UDFManager.UDFManager",
"os.access",
"numpy.isnan",
"CognacGeometryAnalysis.C... | [((1256, 1298), 'os.access', 'os.access', (['"""target_condition.udf"""', 'os.R_OK'], {}), "('target_condition.udf', os.R_OK)\n", (1265, 1298), False, 'import os\n'), ((1377, 1411), 'UDFManager.UDFManager', 'UDFManager', (['"""target_condition.udf"""'], {}), "('target_condition.udf')\n", (1387, 1411), False, 'from UDFManager import UDFManager\n'), ((2210, 2228), 'UDFManager.UDFManager', 'UDFManager', (['target'], {}), '(target)\n', (2220, 2228), False, 'from UDFManager import UDFManager\n'), ((4908, 4926), 'UDFManager.UDFManager', 'UDFManager', (['target'], {}), '(target)\n', (4918, 4926), False, 'from UDFManager import UDFManager\n'), ((7806, 7839), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['glist', '(5)', '(3)'], {}), '(glist, 5, 3)\n', (7826, 7839), True, 'import scipy.signal as signal\n'), ((7851, 7881), 'numpy.stack', 'np.stack', (['[timelist, glist]', '(1)'], {}), '([timelist, glist], 1)\n', (7859, 7881), True, 'import numpy as np\n'), ((7893, 7923), 'numpy.stack', 'np.stack', (['[timelist, mod_g]', '(1)'], {}), '([timelist, mod_g], 1)\n', (7901, 7923), True, 'import numpy as np\n'), ((8003, 8024), 'math.log10', 'math.log10', (['minmax[0]'], {}), '(minmax[0])\n', (8013, 8024), False, 'import math\n'), ((15014, 15054), 'CognacGeometryAnalysis.CognacGeometryAnalysis', 'CognacGeometryAnalysis', (['self.target', 'rec'], {}), '(self.target, rec)\n', (15036, 15054), False, 'from CognacGeometryAnalysis import CognacGeometryAnalysis\n'), ((16295, 16334), 'os.path.join', 'os.path.join', (['target_name', 'cond_list[0]'], {}), '(target_name, cond_list[0])\n', (16307, 16334), False, 'import os\n'), ((17134, 17164), 'numpy.stack', 'np.stack', (['[mod_x, val]'], {'axis': '(1)'}), '([mod_x, val], axis=1)\n', (17142, 17164), True, 'import numpy as np\n'), ((17265, 17301), 'os.makedirs', 'os.makedirs', (['self.dir'], {'exist_ok': '(True)'}), '(self.dir, exist_ok=True)\n', (17276, 17301), False, 'import os\n'), ((17570, 17581), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17579, 17581), False, 'import os\n'), ((17584, 17602), 'os.chdir', 'os.chdir', (['self.dir'], {}), '(self.dir)\n', (17592, 17602), False, 'import os\n'), ((17778, 17791), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (17786, 17791), False, 'import os\n'), ((20848, 20887), 'os.path.join', 'os.path.join', (['target_name', 'cond_list[0]'], {}), '(target_name, cond_list[0])\n', (20860, 20887), False, 'import os\n'), ((21338, 21374), 'os.makedirs', 'os.makedirs', (['self.dir'], {'exist_ok': '(True)'}), '(self.dir, exist_ok=True)\n', (21349, 21374), False, 'import os\n'), ((22046, 22057), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (22055, 22057), False, 'import os\n'), ((22060, 22078), 'os.chdir', 'os.chdir', (['self.dir'], {}), '(self.dir)\n', (22068, 22078), False, 'import os\n'), ((22254, 22267), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (22262, 22267), False, 'import os\n'), ((1043, 1071), 'os.access', 'os.access', (['param[1]', 'os.R_OK'], {}), '(param[1], os.R_OK)\n', (1052, 1071), False, 'import os\n'), ((8039, 8060), 'math.log10', 'math.log10', (['minmax[1]'], {}), '(minmax[1])\n', (8049, 8060), False, 'import math\n'), ((8830, 8847), 'platform.system', 'platform.system', ([], {}), '()\n', (8845, 8847), False, 'import platform\n'), ((8865, 8899), 'subprocess.call', 'subprocess.call', (['[plt]'], {'shell': '(True)'}), '([plt], shell=True)\n', (8880, 8899), False, 'import subprocess\n'), ((16927, 16983), 'numpy.histogram', 'np.histogram', (['self.list'], {'bins': 'self.bins', 'weights': 'weights'}), '(self.list, bins=self.bins, weights=weights)\n', (16939, 16983), True, 'import numpy as np\n'), ((17005, 17044), 'numpy.histogram', 'np.histogram', (['self.list'], {'bins': 'self.bins'}), '(self.list, bins=self.bins)\n', (17017, 17044), True, 'import numpy as np\n'), ((17608, 17625), 'platform.system', 'platform.system', ([], {}), '()\n', (17623, 17625), False, 'import platform\n'), ((17643, 17682), 'subprocess.call', 'subprocess.call', (['self.f_plt'], {'shell': '(True)'}), '(self.f_plt, shell=True)\n', (17658, 17682), False, 'import subprocess\n'), ((22084, 22101), 'platform.system', 'platform.system', ([], {}), '()\n', (22099, 22101), False, 'import platform\n'), ((22119, 22158), 'subprocess.call', 'subprocess.call', (['self.f_plt'], {'shell': '(True)'}), '(self.f_plt, shell=True)\n', (22134, 22158), False, 'import subprocess\n'), ((8907, 8924), 'platform.system', 'platform.system', ([], {}), '()\n', (8922, 8924), False, 'import platform\n'), ((8940, 8987), 'subprocess.call', 'subprocess.call', (["['gnuplot ' + plt]"], {'shell': '(True)'}), "(['gnuplot ' + plt], shell=True)\n", (8955, 8987), False, 'import subprocess\n'), ((11138, 11157), 'numpy.exp', 'np.exp', (['(-time / tau)'], {}), '(-time / tau)\n', (11144, 11157), True, 'import numpy as np\n'), ((17314, 17348), 'os.path.join', 'os.path.join', (['self.dir', 'self.f_dat'], {}), '(self.dir, self.f_dat)\n', (17326, 17348), False, 'import os\n'), ((17690, 17707), 'platform.system', 'platform.system', ([], {}), '()\n', (17705, 17707), False, 'import platform\n'), ((17723, 17775), 'subprocess.call', 'subprocess.call', (["('gnuplot ' + self.f_plt)"], {'shell': '(True)'}), "('gnuplot ' + self.f_plt, shell=True)\n", (17738, 17775), False, 'import subprocess\n'), ((17864, 17898), 'os.path.join', 'os.path.join', (['self.dir', 'self.f_plt'], {}), '(self.dir, self.f_plt)\n', (17876, 17898), False, 'import os\n'), ((21387, 21421), 'os.path.join', 'os.path.join', (['self.dir', 'self.f_dat'], {}), '(self.dir, self.f_dat)\n', (21399, 21421), False, 'import os\n'), ((22166, 22183), 'platform.system', 'platform.system', ([], {}), '()\n', (22181, 22183), False, 'import platform\n'), ((22199, 22251), 'subprocess.call', 'subprocess.call', (["('gnuplot ' + self.f_plt)"], {'shell': '(True)'}), "('gnuplot ' + self.f_plt, shell=True)\n", (22214, 22251), False, 'import subprocess\n'), ((22354, 22388), 'os.path.join', 'os.path.join', (['self.dir', 'self.f_plt'], {}), '(self.dir, self.f_plt)\n', (22366, 22388), False, 'import os\n'), ((6697, 6715), 'numpy.array', 'np.array', (['data[2:]'], {}), '(data[2:])\n', (6705, 6715), True, 'import numpy as np\n'), ((15336, 15349), 'numpy.isnan', 'np.isnan', (['tmp'], {}), '(tmp)\n', (15344, 15349), True, 'import numpy as np\n'), ((14488, 14505), 'numpy.array', 'np.array', (['e2e_vec'], {}), '(e2e_vec)\n', (14496, 14505), True, 'import numpy as np\n'), ((8278, 8313), 'cmath.exp', 'cmath.exp', (['(-1.0j * omega * gt[1][0])'], {}), '(-1.0j * omega * gt[1][0])\n', (8287, 8313), False, 'import cmath\n'), ((8421, 8460), 'cmath.exp', 'cmath.exp', (['(-1.0j * omega * gt[k + 1][0])'], {}), '(-1.0j * omega * gt[k + 1][0])\n', (8430, 8460), False, 'import cmath\n'), ((8455, 8494), 'cmath.exp', 'cmath.exp', (['(-1.0j * omega * gt[k + 2][0])'], {}), '(-1.0j * omega * gt[k + 2][0])\n', (8464, 8494), False, 'import cmath\n'), ((11226, 11240), 'numpy.log10', 'np.log10', (['time'], {}), '(time)\n', (11234, 11240), True, 'import numpy as np\n'), ((14421, 14456), 'CognacUtility.distanceWithBoundary', 'CU.distanceWithBoundary', (['end1', 'end2'], {}), '(end1, end2)\n', (14444, 14456), True, 'import CognacUtility as CU\n'), ((15144, 15162), 'numpy.array', 'np.array', (['r2_ij[i]'], {}), '(r2_ij[i])\n', (15152, 15162), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
class LinearRegression:
"""Linear regression algorithm"""
def fit(self, X, y, lamb=0, add_intercept=True, iters=1000, lr=0.006):
"""Fits the training data using normal equation"""
if add_intercept:
X = np.column_stack((np.ones((X.shape[0], 1), dtype=X.dtype), X))
n, p = X.shape
self.X = X
self.y = y
self.Theta = np.random.randn(n + 1, 1)
# self.Theta = np.linalg.inv(X.T @ X) @ X.T @ y
loss_prime = lambda x, y, theta: (x @ theta - y).T @ x
self._gradient_descent(iters=iters, loss_prime=loss_prime, lr=lr)
def predict(self, X, add_intercept=True):
"""Makes predictions on the given data"""
if add_intercept:
X = np.column_stack((np.ones((X.shape[0], 1), dtype=X.dtype), X))
return X @ self.Theta
def _gradient_descent(self, iters, loss_prime, lr):
"""Gradient descent algorithm"""
for _ in range(iters):
grad = loss_prime(self.X, self.y, self.Theta)
self.Theta -= lr * grad.T
class LogReg:
def __init__(self, r=0.5):
self.r = r
def loss(self, x, y):
expo = np.exp(self.Theta @ x)
if y == 1:
return expo/(1+expo)
else:
return 1/(1+expo)
class LogisticRegressionClassifier:
"""Logistic regression"""
def __init__(self, r=0.5):
self.r = r
def fit(self, X, y, iters=500, lr=0.01):
"""Fits the training data"""
self.y = y
self.X = np.column_stack((np.ones((X.shape[0], 1), dtype=X.dtype), X))
self.m, self.n = X.shape
self.Theta = np.random.randn(self.n + 1, 1)
loss_prime = lambda x, y, theta: 1 / self.m * (self._sigmoid(x @ theta)
- y).T @ x
self._gradient_descent(iters=iters, loss_prime=loss_prime, lr=lr)
def predict(self, X):
"""Makes prediction"""
X = np.column_stack((np.ones((X.shape[0], 1), dtype=X.dtype), X))
return np.array([[1 if res > self.r else -1]
for res in self._sigmoid(X @ self.Theta)])
def _sigmoid(self, Z):
"""Sigmoid function"""
return np.exp(Z) / (1 + np.exp(Z))
def _gradient_descent(self, iters, loss_prime, lr):
"""Gradient descent algorithm"""
for _ in range(iters):
grad = loss_prime(self.X, self.y, self.Theta)
self.Theta -= lr * grad.T
| [
"numpy.exp",
"numpy.random.randn",
"numpy.ones"
] | [((426, 451), 'numpy.random.randn', 'np.random.randn', (['(n + 1)', '(1)'], {}), '(n + 1, 1)\n', (441, 451), True, 'import numpy as np\n'), ((1217, 1239), 'numpy.exp', 'np.exp', (['(self.Theta @ x)'], {}), '(self.Theta @ x)\n', (1223, 1239), True, 'import numpy as np\n'), ((1688, 1718), 'numpy.random.randn', 'np.random.randn', (['(self.n + 1)', '(1)'], {}), '(self.n + 1, 1)\n', (1703, 1718), True, 'import numpy as np\n'), ((2266, 2275), 'numpy.exp', 'np.exp', (['Z'], {}), '(Z)\n', (2272, 2275), True, 'import numpy as np\n'), ((1589, 1628), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'X.dtype'}), '((X.shape[0], 1), dtype=X.dtype)\n', (1596, 1628), True, 'import numpy as np\n'), ((2026, 2065), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'X.dtype'}), '((X.shape[0], 1), dtype=X.dtype)\n', (2033, 2065), True, 'import numpy as np\n'), ((2283, 2292), 'numpy.exp', 'np.exp', (['Z'], {}), '(Z)\n', (2289, 2292), True, 'import numpy as np\n'), ((299, 338), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'X.dtype'}), '((X.shape[0], 1), dtype=X.dtype)\n', (306, 338), True, 'import numpy as np\n'), ((809, 848), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'X.dtype'}), '((X.shape[0], 1), dtype=X.dtype)\n', (816, 848), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
'''
3D Copter-Hover class
Copyright (C) 2021 <NAME>
MIT License
'''
from time import sleep
from numpy import radians
import threading
from utils import _make_parser
from hover import _Hover
from rendering.threed import ThreeDHoverRenderer
from pidcontrollers import AngularVelocityPidController
from pidcontrollers import PositionHoldPidController
class Hover3D(_Hover):
def __init__(self, obs_size=12):
_Hover.__init__(self, obs_size, 4)
# Pre-convert max-angle degrees to radians
self.max_angle = radians(self.MAX_ANGLE)
# For generating CSV file
self.STATE_NAMES = ['X', 'dX', 'Y', 'dY', 'Z', 'dZ',
'Phi', 'dPhi', 'Theta', 'dTheta', 'Psi', 'dPsi']
# Add PID controllers for heuristic demo
self.roll_rate_pid = AngularVelocityPidController()
self.pitch_rate_pid = AngularVelocityPidController()
self.yaw_rate_pid = AngularVelocityPidController()
self.x_poshold_pid = PositionHoldPidController()
self.y_poshold_pid = PositionHoldPidController()
def reset(self):
return _Hover._reset(self)
def render(self, mode='human'):
'''
Returns None because we run viewer on a separate thread
'''
return None
def demo_pose(self, args):
x, y, z, phi, theta, viewer = args
while viewer.is_open():
self._reset(pose=(x, y, z, phi, theta), perturb=False)
self.render()
sleep(.01)
self.close()
def heuristic(self, state, nopid):
'''
PID controller
'''
x, dx, y, dy, z, dz, phi, dphi, theta, dtheta, _, dpsi = state
roll_todo = 0
pitch_todo = 0
yaw_todo = 0
if not nopid:
roll_rate_todo = self.roll_rate_pid.getDemand(dphi)
y_pos_todo = self.x_poshold_pid.getDemand(y, dy)
pitch_rate_todo = self.pitch_rate_pid.getDemand(-dtheta)
x_pos_todo = self.y_poshold_pid.getDemand(x, dx)
roll_todo = roll_rate_todo + y_pos_todo
pitch_todo = pitch_rate_todo + x_pos_todo
yaw_todo = self.yaw_rate_pid.getDemand(-dpsi)
hover_todo = self.altpid.getDemand(z, dz)
t, r, p, y = (hover_todo+1)/2, roll_todo, pitch_todo, yaw_todo
# Use mixer to set motors
return [t-r-p-y, t+r+p-y, t+r-p+y, t-r+p+y]
def _get_motors(self, motors):
return motors
def _get_state(self, state):
return state
# End of Hover3D classes -------------------------------------------------
def make_parser():
'''
Exported function to support command-line parsing in scripts.
You can add your own arguments, then call parse() to get args.
'''
# Start with general-purpose parser from _Hover superclass
parser = _make_parser()
# Add 3D-specific argument support
parser.add_argument('--view', required=False, default='30,120',
help='Elevation, azimuth for view perspective')
group = parser.add_mutually_exclusive_group()
group.add_argument('--vision', action='store_true',
help='Use vision sensor')
group.add_argument('--dvs', action='store_true',
help='Use Dynamic Vision Sensor')
group.add_argument('--nodisplay', action='store_true',
help='Suppress display')
return parser
def parse(parser):
args = parser.parse_args()
viewangles = tuple((int(s) for s in args.view.split(',')))
return args, viewangles
def main():
parser = make_parser()
parser.add_argument('--freeze', dest='pose', required=False,
default=None, help='Freeze in pose x,y,z,phi,theta')
args, viewangles = parse(parser)
env = Hover3D()
if not args.nodisplay:
viewer = ThreeDHoverRenderer(env, viewangles=viewangles)
threadfun = env.demo_heuristic
threadargs = args.seed, args.nopid, args.csvfilename
if args.pose is not None:
try:
x, y, z, phi, theta = (float(s) for s in args.pose.split(','))
except Exception:
print('POSE must be x,y,z,phi,theta')
exit(1)
threadfun = env.demo_pose
threadargs = (x, y, z, phi, theta, viewer)
thread = threading.Thread(target=threadfun, args=threadargs)
thread.start()
if not args.nodisplay:
viewer.start()
if __name__ == '__main__':
main()
| [
"numpy.radians",
"utils._make_parser",
"hover._Hover._reset",
"hover._Hover.__init__",
"time.sleep",
"pidcontrollers.AngularVelocityPidController",
"rendering.threed.ThreeDHoverRenderer",
"pidcontrollers.PositionHoldPidController",
"threading.Thread"
] | [((2982, 2996), 'utils._make_parser', '_make_parser', ([], {}), '()\n', (2994, 2996), False, 'from utils import _make_parser\n'), ((4513, 4564), 'threading.Thread', 'threading.Thread', ([], {'target': 'threadfun', 'args': 'threadargs'}), '(target=threadfun, args=threadargs)\n', (4529, 4564), False, 'import threading\n'), ((471, 505), 'hover._Hover.__init__', '_Hover.__init__', (['self', 'obs_size', '(4)'], {}), '(self, obs_size, 4)\n', (486, 505), False, 'from hover import _Hover\n'), ((586, 609), 'numpy.radians', 'radians', (['self.MAX_ANGLE'], {}), '(self.MAX_ANGLE)\n', (593, 609), False, 'from numpy import radians\n'), ((869, 899), 'pidcontrollers.AngularVelocityPidController', 'AngularVelocityPidController', ([], {}), '()\n', (897, 899), False, 'from pidcontrollers import AngularVelocityPidController\n'), ((931, 961), 'pidcontrollers.AngularVelocityPidController', 'AngularVelocityPidController', ([], {}), '()\n', (959, 961), False, 'from pidcontrollers import AngularVelocityPidController\n'), ((991, 1021), 'pidcontrollers.AngularVelocityPidController', 'AngularVelocityPidController', ([], {}), '()\n', (1019, 1021), False, 'from pidcontrollers import AngularVelocityPidController\n'), ((1052, 1079), 'pidcontrollers.PositionHoldPidController', 'PositionHoldPidController', ([], {}), '()\n', (1077, 1079), False, 'from pidcontrollers import PositionHoldPidController\n'), ((1110, 1137), 'pidcontrollers.PositionHoldPidController', 'PositionHoldPidController', ([], {}), '()\n', (1135, 1137), False, 'from pidcontrollers import PositionHoldPidController\n'), ((1180, 1199), 'hover._Hover._reset', '_Hover._reset', (['self'], {}), '(self)\n', (1193, 1199), False, 'from hover import _Hover\n'), ((4044, 4091), 'rendering.threed.ThreeDHoverRenderer', 'ThreeDHoverRenderer', (['env'], {'viewangles': 'viewangles'}), '(env, viewangles=viewangles)\n', (4063, 4091), False, 'from rendering.threed import ThreeDHoverRenderer\n'), ((1580, 1591), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (1585, 1591), False, 'from time import sleep\n')] |
from collections import namedtuple
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from rlpyt.agents.base import AgentStep, BaseAgent
from rlpyt.distributions.gaussian import DistInfoStd, Gaussian
from rlpyt.models.qpg.mlp import PiMlpModel, QofMuMlpModel, VMlpModel
from rlpyt.models.utils import update_state_dict
from rlpyt.utils.buffer import buffer_to
from rlpyt.utils.collections import namedarraytuple
from rlpyt.utils.logging import logger
from rlpyt.utils.quick_args import save__init__args
# from torch.nn.parallel import DistributedDataParallelCPU as DDPC # Deprecated
MIN_LOG_STD = -20
MAX_LOG_STD = 2
AgentInfo = namedarraytuple("AgentInfo", ["dist_info"])
Models = namedtuple("Models", ["pi", "q1", "q2", "v"])
class SacAgent(BaseAgent):
"""TO BE DEPRECATED."""
def __init__(
self,
ModelCls=PiMlpModel, # Pi model.
QModelCls=QofMuMlpModel,
VModelCls=VMlpModel,
model_kwargs=None, # Pi model.
q_model_kwargs=None,
v_model_kwargs=None,
initial_model_state_dict=None, # All models.
action_squash=1.0, # Max magnitude (or None).
pretrain_std=0.75, # With squash 0.75 is near uniform.
):
if model_kwargs is None:
model_kwargs = dict(hidden_sizes=[256, 256])
if q_model_kwargs is None:
q_model_kwargs = dict(hidden_sizes=[256, 256])
if v_model_kwargs is None:
v_model_kwargs = dict(hidden_sizes=[256, 256])
super().__init__(
ModelCls=ModelCls,
model_kwargs=model_kwargs,
initial_model_state_dict=initial_model_state_dict,
)
save__init__args(locals())
self.min_itr_learn = 0 # Get from algo.
def initialize(self, env_spaces, share_memory=False, global_B=1, env_ranks=None):
_initial_model_state_dict = self.initial_model_state_dict
self.initial_model_state_dict = None # Don't let base agent try to load.
super().initialize(
env_spaces, share_memory, global_B=global_B, env_ranks=env_ranks
)
self.initial_model_state_dict = _initial_model_state_dict
self.q1_model = self.QModelCls(**self.env_model_kwargs, **self.q_model_kwargs)
self.q2_model = self.QModelCls(**self.env_model_kwargs, **self.q_model_kwargs)
self.v_model = self.VModelCls(**self.env_model_kwargs, **self.v_model_kwargs)
self.target_v_model = self.VModelCls(
**self.env_model_kwargs, **self.v_model_kwargs
)
self.target_v_model.load_state_dict(self.v_model.state_dict())
if self.initial_model_state_dict is not None:
self.load_state_dict(self.initial_model_state_dict)
assert len(env_spaces.action.shape) == 1
self.distribution = Gaussian(
dim=env_spaces.action.shape[0],
squash=self.action_squash,
min_std=np.exp(MIN_LOG_STD),
max_std=np.exp(MAX_LOG_STD),
)
def to_device(self, cuda_idx=None):
super().to_device(cuda_idx)
self.q1_model.to(self.device)
self.q2_model.to(self.device)
self.v_model.to(self.device)
self.target_v_model.to(self.device)
def data_parallel(self):
device_id = super().data_parallel
self.q1_model = DDP(
self.q1_model,
device_ids=None if device_id is None else [device_id], # 1 GPU.
output_device=device_id,
)
self.q2_model = DDP(
self.q2_model,
device_ids=None if device_id is None else [device_id], # 1 GPU.
output_device=device_id,
)
self.v_model = DDP(
self.v_model,
device_ids=None if device_id is None else [device_id], # 1 GPU.
output_device=device_id,
)
return device_id
def give_min_itr_learn(self, min_itr_learn):
self.min_itr_learn = min_itr_learn # From algo.
def make_env_to_model_kwargs(self, env_spaces):
assert len(env_spaces.action.shape) == 1
return dict(
observation_shape=env_spaces.observation.shape,
action_size=env_spaces.action.shape[0],
)
def q(self, observation, prev_action, prev_reward, action):
model_inputs = buffer_to(
(observation, prev_action, prev_reward, action), device=self.device
)
q1 = self.q1_model(*model_inputs)
q2 = self.q2_model(*model_inputs)
return q1.cpu(), q2.cpu()
def v(self, observation, prev_action, prev_reward):
model_inputs = buffer_to(
(observation, prev_action, prev_reward), device=self.device
)
v = self.v_model(*model_inputs)
return v.cpu()
def pi(self, observation, prev_action, prev_reward):
model_inputs = buffer_to(
(observation, prev_action, prev_reward), device=self.device
)
mean, log_std = self.model(*model_inputs)
dist_info = DistInfoStd(mean=mean, log_std=log_std)
action, log_pi = self.distribution.sample_loglikelihood(dist_info)
# action = self.distribution.sample(dist_info)
# log_pi = self.distribution.log_likelihood(action, dist_info)
log_pi, dist_info = buffer_to((log_pi, dist_info), device="cpu")
return action, log_pi, dist_info # Action stays on device for q models.
def target_v(self, observation, prev_action, prev_reward):
model_inputs = buffer_to(
(observation, prev_action, prev_reward), device=self.device
)
target_v = self.target_v_model(*model_inputs)
return target_v.cpu()
@torch.no_grad()
def step(self, observation, prev_action, prev_reward):
model_inputs = buffer_to(
(observation, prev_action, prev_reward), device=self.device
)
mean, log_std = self.model(*model_inputs)
dist_info = DistInfoStd(mean=mean, log_std=log_std)
action = self.distribution.sample(dist_info)
agent_info = AgentInfo(dist_info=dist_info)
action, agent_info = buffer_to((action, agent_info), device="cpu")
return AgentStep(action=action, agent_info=agent_info)
def update_target(self, tau=1):
update_state_dict(self.target_v_model, self.v_model.state_dict(), tau)
@property
def models(self):
return Models(pi=self.model, q1=self.q1_model, q2=self.q2_model, v=self.v_model)
def pi_parameters(self):
return self.model.parameters()
def q1_parameters(self):
return self.q1_model.parameters()
def q2_parameters(self):
return self.q2_model.parameters()
def v_parameters(self):
return self.v_model.parameters()
def train_mode(self, itr):
super().train_mode(itr)
self.q1_model.train()
self.q2_model.train()
self.v_model.train()
def sample_mode(self, itr):
super().sample_mode(itr)
self.q1_model.eval()
self.q2_model.eval()
self.v_model.eval()
if itr == 0:
logger.log(f"Agent at itr {itr}, sample std: {self.pretrain_std}")
if itr == self.min_itr_learn:
logger.log(f"Agent at itr {itr}, sample std: learned.")
std = None if itr >= self.min_itr_learn else self.pretrain_std
self.distribution.set_std(std) # If None: std from policy dist_info.
def eval_mode(self, itr):
super().eval_mode(itr)
self.q1_model.eval()
self.q2_model.eval()
self.v_model.eval()
self.distribution.set_std(0.0) # Deterministic (dist_info std ignored).
def state_dict(self):
return dict(
model=self.model.state_dict(), # Pi model.
q1_model=self.q1_model.state_dict(),
q2_model=self.q2_model.state_dict(),
v_model=self.v_model.state_dict(),
target_v_model=self.target_v_model.state_dict(),
)
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict["model"])
self.q1_model.load_state_dict(state_dict["q1_model"])
self.q2_model.load_state_dict(state_dict["q2_model"])
self.v_model.load_state_dict(state_dict["v_model"])
self.target_v_model.load_state_dict(state_dict["target_v_model"])
| [
"collections.namedtuple",
"rlpyt.agents.base.AgentStep",
"rlpyt.utils.logging.logger.log",
"rlpyt.distributions.gaussian.DistInfoStd",
"numpy.exp",
"torch.no_grad",
"rlpyt.utils.buffer.buffer_to",
"rlpyt.utils.collections.namedarraytuple",
"torch.nn.parallel.DistributedDataParallel"
] | [((678, 721), 'rlpyt.utils.collections.namedarraytuple', 'namedarraytuple', (['"""AgentInfo"""', "['dist_info']"], {}), "('AgentInfo', ['dist_info'])\n", (693, 721), False, 'from rlpyt.utils.collections import namedarraytuple\n'), ((731, 776), 'collections.namedtuple', 'namedtuple', (['"""Models"""', "['pi', 'q1', 'q2', 'v']"], {}), "('Models', ['pi', 'q1', 'q2', 'v'])\n", (741, 776), False, 'from collections import namedtuple\n'), ((5695, 5710), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5708, 5710), False, 'import torch\n'), ((3352, 3454), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.q1_model'], {'device_ids': '(None if device_id is None else [device_id])', 'output_device': 'device_id'}), '(self.q1_model, device_ids=None if device_id is None else [device_id],\n output_device=device_id)\n', (3355, 3454), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((3532, 3634), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.q2_model'], {'device_ids': '(None if device_id is None else [device_id])', 'output_device': 'device_id'}), '(self.q2_model, device_ids=None if device_id is None else [device_id],\n output_device=device_id)\n', (3535, 3634), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((3711, 3812), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['self.v_model'], {'device_ids': '(None if device_id is None else [device_id])', 'output_device': 'device_id'}), '(self.v_model, device_ids=None if device_id is None else [device_id],\n output_device=device_id)\n', (3714, 3812), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((4331, 4409), 'rlpyt.utils.buffer.buffer_to', 'buffer_to', (['(observation, prev_action, prev_reward, action)'], {'device': 'self.device'}), '((observation, prev_action, prev_reward, action), device=self.device)\n', (4340, 4409), False, 'from rlpyt.utils.buffer import buffer_to\n'), ((4630, 4700), 'rlpyt.utils.buffer.buffer_to', 'buffer_to', (['(observation, prev_action, prev_reward)'], {'device': 'self.device'}), '((observation, prev_action, prev_reward), device=self.device)\n', (4639, 4700), False, 'from rlpyt.utils.buffer import buffer_to\n'), ((4867, 4937), 'rlpyt.utils.buffer.buffer_to', 'buffer_to', (['(observation, prev_action, prev_reward)'], {'device': 'self.device'}), '((observation, prev_action, prev_reward), device=self.device)\n', (4876, 4937), False, 'from rlpyt.utils.buffer import buffer_to\n'), ((5030, 5069), 'rlpyt.distributions.gaussian.DistInfoStd', 'DistInfoStd', ([], {'mean': 'mean', 'log_std': 'log_std'}), '(mean=mean, log_std=log_std)\n', (5041, 5069), False, 'from rlpyt.distributions.gaussian import DistInfoStd, Gaussian\n'), ((5299, 5343), 'rlpyt.utils.buffer.buffer_to', 'buffer_to', (['(log_pi, dist_info)'], {'device': '"""cpu"""'}), "((log_pi, dist_info), device='cpu')\n", (5308, 5343), False, 'from rlpyt.utils.buffer import buffer_to\n'), ((5512, 5582), 'rlpyt.utils.buffer.buffer_to', 'buffer_to', (['(observation, prev_action, prev_reward)'], {'device': 'self.device'}), '((observation, prev_action, prev_reward), device=self.device)\n', (5521, 5582), False, 'from rlpyt.utils.buffer import buffer_to\n'), ((5793, 5863), 'rlpyt.utils.buffer.buffer_to', 'buffer_to', (['(observation, prev_action, prev_reward)'], {'device': 'self.device'}), '((observation, prev_action, prev_reward), device=self.device)\n', (5802, 5863), False, 'from rlpyt.utils.buffer import buffer_to\n'), ((5956, 5995), 'rlpyt.distributions.gaussian.DistInfoStd', 'DistInfoStd', ([], {'mean': 'mean', 'log_std': 'log_std'}), '(mean=mean, log_std=log_std)\n', (5967, 5995), False, 'from rlpyt.distributions.gaussian import DistInfoStd, Gaussian\n'), ((6130, 6175), 'rlpyt.utils.buffer.buffer_to', 'buffer_to', (['(action, agent_info)'], {'device': '"""cpu"""'}), "((action, agent_info), device='cpu')\n", (6139, 6175), False, 'from rlpyt.utils.buffer import buffer_to\n'), ((6191, 6238), 'rlpyt.agents.base.AgentStep', 'AgentStep', ([], {'action': 'action', 'agent_info': 'agent_info'}), '(action=action, agent_info=agent_info)\n', (6200, 6238), False, 'from rlpyt.agents.base import AgentStep, BaseAgent\n'), ((7102, 7168), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Agent at itr {itr}, sample std: {self.pretrain_std}"""'], {}), "(f'Agent at itr {itr}, sample std: {self.pretrain_std}')\n", (7112, 7168), False, 'from rlpyt.utils.logging import logger\n'), ((7219, 7274), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Agent at itr {itr}, sample std: learned."""'], {}), "(f'Agent at itr {itr}, sample std: learned.')\n", (7229, 7274), False, 'from rlpyt.utils.logging import logger\n'), ((2950, 2969), 'numpy.exp', 'np.exp', (['MIN_LOG_STD'], {}), '(MIN_LOG_STD)\n', (2956, 2969), True, 'import numpy as np\n'), ((2991, 3010), 'numpy.exp', 'np.exp', (['MAX_LOG_STD'], {}), '(MAX_LOG_STD)\n', (2997, 3010), True, 'import numpy as np\n')] |
import os
import logging
import pandas as pd
import numpy as np
import urllib.request
import requests
import re
import io
import us
import zipfile
import json
from datetime import datetime
from libs.datasets import NYTimesDataset
from libs.datasets import combined_datasets
from libs.datasets.timeseries import TimeseriesDataset
from libs.datasets.dataset_utils import AggregationLevel
from libs.datasets.common_fields import CommonFields
from pyseir.utils import get_run_artifact_path, RunArtifact, ewma_smoothing
from functools import lru_cache
from enum import Enum
log = logging.getLogger(__name__)
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "pyseir_data")
MIN_CUMULATIVE_DATAPOINTS_TO_CONVERT = (
3 # We wait until the third datapoint to have 2 deltas to forecast
)
class HospitalizationCategory(Enum):
HOSPITALIZED = "hospitalized"
ICU = "icu"
def __str__(self):
return str(self.value)
class HospitalizationDataType(Enum):
CUMULATIVE_HOSPITALIZATIONS = "cumulative_hospitalizations"
CURRENT_HOSPITALIZATIONS = "current_hospitalizations"
def hampel_filter__low_outliers_only(input_series, window_size=5, n_sigmas=2):
"""
Filter out points with median absolute deviation greater than n_sigma from a
nearest set of window-size neighbors. This is a very conservative filter to
clean out some case / death data like Arkansas. We apply this only to drops
in counts that should be positive (e.g. Arkansas).
Parameters
----------
input_series: array
window_size: int
n_sigmas: float
Returns
-------
"""
n = len(input_series)
new_series = input_series.copy()
k = 1.4826 # scale factor for Gaussian distribution
indices = []
# possibly use np.nanmedian
for i in range(window_size, n - window_size):
x0 = np.median(input_series[(i - window_size) : (i + window_size)])
S0 = k * np.median(np.abs(input_series[(i - window_size) : (i + window_size)] - x0))
if -(input_series[i] - x0) > n_sigmas * S0:
new_series[i] = x0
indices.append(i)
return new_series, indices
def load_zip_get_file(url, file, decoder="utf-8"):
"""
Load a zipfile from a URL and extract a single file. Note that this is
not ideal and may fail for large files since the files must fit in memory.
Parameters
----------
url: str
URL to read from.
file: str
Filename to pull out of the zipfile.
decoder: str
Usually None for raw bytes or 'utf-8', or 'latin1'
Returns
-------
file_buffer: io.BytesIO or io.StringIO
The file buffer for the requested file if decoder is None else return
a decoded StringIO.
"""
remotezip = urllib.request.urlopen(url)
zipinmemory = io.BytesIO(remotezip.read())
zf = zipfile.ZipFile(zipinmemory)
byte_string = zf.read(file)
if decoder:
string = byte_string.decode(decoder)
return io.StringIO(string)
else:
return io.BytesIO(byte_string)
def cache_county_case_data():
"""
Cache county covid case data from NYT in #PYSEIR_HOME/data.
"""
log.info("Downloading covid case data")
# NYT dataset
county_case_data = load_county_case_data()
county_case_data.to_pickle(os.path.join(DATA_DIR, "covid_case_timeseries.pkl"))
def cache_mobility_data():
"""
Pulled from https://github.com/descarteslabs/DL-COVID-19
"""
log.info("Downloading mobility data.")
url = "https://raw.githubusercontent.com/descarteslabs/DL-COVID-19/master/DL-us-mobility-daterow.csv"
dtypes_mapping = {
"country_code": str,
"admin_level": int,
"admin1": str,
"admin2": str,
"fips": str,
"samples": int,
"m50": float,
"m50_index": float,
}
df = pd.read_csv(filepath_or_buffer=url, parse_dates=["date"], dtype=dtypes_mapping)
df__m50 = df.query("admin_level == 2")[["fips", "date", "m50"]]
df__m50_index = df.query("admin_level == 2")[["fips", "date", "m50_index"]]
df__m50__final = df__m50.groupby("fips").agg(list).reset_index()
df__m50_index__final = df__m50_index.groupby("fips").agg(list).reset_index()
df__m50__final["m50"] = df__m50__final["m50"].apply(lambda x: np.array(x))
df__m50_index__final["m50_index"] = df__m50_index__final["m50_index"].apply(
lambda x: np.array(x)
)
df__m50__final.to_pickle(os.path.join(DATA_DIR, "mobility_data__m50.pkl"))
df__m50_index__final.to_pickle(os.path.join(DATA_DIR, "mobility_data__m50_index.pkl"))
def cache_public_implementations_data():
"""
Pulled from https://github.com/JieYingWu/COVID-19_US_County-level_Summaries
"""
log.info("Downloading public implementations data")
url = "https://raw.githubusercontent.com/JieYingWu/COVID-19_US_County-level_Summaries/master/raw_data/national/public_implementations_fips.csv"
data = requests.get(url, verify=True).content.decode("utf-8")
data = re.sub(r",(\d+)-(\w+)", r",\1-\2-2020", data) # NOTE: This assumes the year 2020
date_cols = [
"stay at home",
">50 gatherings",
">500 gatherings",
"public schools",
"restaurant dine-in",
"entertainment/gym",
"Federal guidelines",
"foreign travel ban",
]
df = pd.read_csv(io.StringIO(data), parse_dates=date_cols, dtype="str").drop(
["Unnamed: 1", "Unnamed: 2"], axis=1
)
df.columns = [
col.replace(">", "").replace(" ", "_").replace("/", "_").lower() for col in df.columns
]
df.fips = df.fips.apply(lambda x: x.zfill(5))
df.to_pickle(os.path.join(DATA_DIR, "public_implementations_data.pkl"))
@lru_cache(maxsize=32)
def load_county_case_data():
"""
Return county level case data.
Returns
-------
: pd.DataFrame
"""
county_case_data = (
NYTimesDataset.local().timeseries().get_data(AggregationLevel.COUNTY, country="USA")
)
return county_case_data
@lru_cache(maxsize=1)
def load_state_case_data():
"""
Return county level case data.
Returns
-------
: pd.DataFrame
"""
state_case_data = (
NYTimesDataset.local().timeseries().get_data(AggregationLevel.STATE, country="USA")
)
return state_case_data
@lru_cache(maxsize=32)
def load_county_metadata():
"""
Return county level metadata such as age distributions, populations etc..
Returns
-------
: pd.DataFrame
"""
county_metadata = pd.read_json(
os.path.join(DATA_DIR, "county_metadata.json"), dtype={"fips": "str"}
)
# Fix state names
county_metadata.loc[:, "state"] = county_metadata["fips"].apply(
lambda x: us.states.lookup(x[:2]).name
)
return county_metadata
@lru_cache(maxsize=32)
def load_county_metadata_by_state(state=None):
"""
Generate a dataframe that contains county metadata aggregated at state
level.
Parameters
----------
state: str or list(str)
Name of state to load the metadata for.
Returns
-------
state_metadata: pd.DataFrame
"""
# aggregate into state level metadata
state_metadata = load_county_metadata()
if state is not None:
state = [state] if isinstance(state, str) else list(state)
else:
state = state_metadata["state"].unique()
state = [s.title() for s in state]
state_metadata = state_metadata[state_metadata.state.isin(state)]
density_measures = ["housing_density", "population_density"]
for col in density_measures:
state_metadata.loc[:, col] = state_metadata[col] * state_metadata["total_population"]
age_dist = state_metadata.groupby("state")["age_distribution"].apply(
lambda l: np.stack(np.array(l)).sum(axis=0)
)
density_info = state_metadata.groupby("state").agg(
{
"population_density": lambda x: sum(x),
"housing_density": lambda x: sum(x),
"total_population": lambda x: sum(x),
"fips": list,
}
)
age_bins = state_metadata[["state", "age_bin_edges"]].groupby("state").first()
state_metadata = pd.concat([age_dist, density_info, age_bins], axis=1)
for col in density_measures:
state_metadata[col] /= state_metadata["total_population"]
return state_metadata
@lru_cache(maxsize=32)
def load_ensemble_results(fips):
"""
Retrieve ensemble results for a given state or county fips code.
Parameters
----------
fips: str
State or county FIPS to load.
Returns
-------
ensemble_results: dict
"""
output_filename = get_run_artifact_path(fips, RunArtifact.ENSEMBLE_RESULT)
if os.path.exists(output_filename):
with open(output_filename) as f:
fit_results = json.load(f)
return fit_results
return None
@lru_cache(maxsize=32)
def load_county_metadata_by_fips(fips):
"""
Generate a dictionary for a county which includes county metadata.
Parameters
----------
fips: str
Returns
-------
county_metadata: dict
Dictionary of metadata for the county. The keys are:
['state', 'county', 'total_population', 'population_density',
'housing_density', 'age_distribution', 'age_bin_edges']
"""
county_metadata = load_county_metadata()
county_metadata_merged = county_metadata.set_index("fips").loc[fips].to_dict()
for key, value in county_metadata_merged.items():
if np.isscalar(value) and not isinstance(value, str):
county_metadata_merged[key] = float(value)
return county_metadata_merged
@lru_cache(maxsize=32)
def get_all_fips_codes_for_a_state(state: str):
"""Returns a list of fips codes for a state
Arguments:
state {str} -- the full state name
Returns:
fips [list] -- a list of fips codes for a state
"""
df = load_county_metadata()
all_fips = df[df["state"].str.lower() == state.lower()].fips
return all_fips
@lru_cache(maxsize=32)
def load_new_case_data_by_fips(
fips, t0, include_testing_correction=False, testing_correction_smoothing_tau=5
):
"""
Get data for new cases.
Parameters
----------
fips: str
County fips to lookup.
t0: datetime
Datetime to offset by.
include_testing_correction: bool
If True, include a correction for new expanded or decreaseed test
coverage.
testing_correction_smoothing_tau: float
expected_positives_from_test_increase is smoothed based on an
exponentially weighted moving average of decay factor specified here.
Returns
-------
times: array(float)
List of float days since t0 for the case and death counts below
observed_new_cases: array(int)
Array of new cases observed each day.
observed_new_deaths: array(int)
Array of new deaths observed each day.
"""
_county_case_data = load_county_case_data()
county_case_data = _county_case_data[_county_case_data["fips"] == fips]
times_new = (county_case_data["date"] - t0).dt.days.iloc[1:]
observed_new_cases = (
county_case_data["cases"].values[1:] - county_case_data["cases"].values[:-1]
)
if include_testing_correction:
df_new_tests = load_new_test_data_by_fips(
fips, t0, smoothing_tau=testing_correction_smoothing_tau
)
df_cases = pd.DataFrame({"times": times_new, "new_cases": observed_new_cases})
df_cases = df_cases.merge(df_new_tests, how="left", on="times")
df_cases["new_cases"] -= df_cases["expected_positives_from_test_increase"].fillna(0)
observed_new_cases = df_cases["new_cases"].values
observed_new_deaths = (
county_case_data["deaths"].values[1:] - county_case_data["deaths"].values[:-1]
)
# Clip because there are sometimes negatives either due to data reporting or
# corrections in case count. These are always tiny so we just make
# downstream easier to work with by clipping.
return times_new, observed_new_cases.clip(min=0), observed_new_deaths.clip(min=0)
def get_hospitalization_data():
data = combined_datasets.build_us_timeseries_with_all_fields().data
# Since we're using this data for hospitalized data only, only returning
# values with hospitalization data. I think as the use cases of this data source
# expand, we may not want to drop. For context, as of 4/8 607/1821 rows contained
# hospitalization data.
has_current_hospital = data[TimeseriesDataset.Fields.CURRENT_HOSPITALIZED].notnull()
has_cumulative_hospital = data[TimeseriesDataset.Fields.CUMULATIVE_HOSPITALIZED].notnull()
return TimeseriesDataset(data[has_current_hospital | has_cumulative_hospital])
@lru_cache(maxsize=32)
def load_hospitalization_data(
fips: str,
t0: datetime,
category: HospitalizationCategory = HospitalizationCategory.HOSPITALIZED,
):
"""
Obtain hospitalization data. We clip because there are sometimes negatives
either due to data reporting or corrections in case count. These are always
tiny so we just make downstream easier to work with by clipping.
Parameters
----------
fips: str
County fips to lookup.
t0: datetime
Datetime to offset by.
category: HospitalizationCategory
Returns
-------
relative_days: array(float)
List of float days since t0 for the hospitalization data.
observed_hospitalizations: array(int)
Array of new cases observed each day.
type: HospitalizationDataType
Specifies cumulative or current hospitalizations.
"""
hospitalization_data = get_hospitalization_data().get_data(
AggregationLevel.COUNTY, country="USA", fips=fips
)
if len(hospitalization_data) == 0:
return None, None, None
if (hospitalization_data[f"current_{category}"] > 0).any():
hospitalization_data = hospitalization_data[
hospitalization_data[f"current_{category}"].notnull()
]
relative_days = (hospitalization_data["date"].dt.date - t0.date()).dt.days.values
return (
relative_days,
hospitalization_data[f"current_{category}"].values.clip(min=0),
HospitalizationDataType.CURRENT_HOSPITALIZATIONS,
)
elif (hospitalization_data[f"cumulative_{category}"] > 0).any():
hospitalization_data = hospitalization_data[
hospitalization_data[f"cumulative_{category}"].notnull()
]
relative_days = (hospitalization_data["date"].dt.date - t0.date()).dt.days.values
cumulative = hospitalization_data[f"cumulative_{category}"].values.clip(min=0)
# Some minor glitches for a few states..
for i, val in enumerate(cumulative[1:]):
if cumulative[i] > cumulative[i + 1]:
cumulative[i] = cumulative[i + 1]
return relative_days, cumulative, HospitalizationDataType.CUMULATIVE_HOSPITALIZATIONS
else:
return None, None, None
@lru_cache(maxsize=32)
def load_hospitalization_data_by_state(
state: str,
t0: datetime,
category: HospitalizationCategory = HospitalizationCategory.HOSPITALIZED,
):
"""
Obtain hospitalization data. We clip because there are sometimes negatives
either due to data reporting or corrections in case count. These are always
tiny so we just make downstream easier to work with by clipping.
Parameters
----------
state: str
State to lookup.
t0: datetime
Datetime to offset by.
category: HospitalizationCategory
'icu' for just ICU or 'hospitalized' for all ICU + Acute.
Returns
-------
times: array(float) or NoneType
List of float days since t0 for the hospitalization data.
observed_hospitalizations: array(int) or NoneType
Array of new cases observed each day.
type: HospitalizationDataType
Specifies cumulative or current hospitalizations.
"""
abbr = us.states.lookup(state).abbr
hospitalization_data = combined_datasets.build_us_timeseries_with_all_fields().get_data(
AggregationLevel.STATE, country="USA", state=abbr
)
if len(hospitalization_data) == 0:
return None, None, None
if (hospitalization_data[f"current_{category}"] > 0).any():
hospitalization_data = hospitalization_data[
hospitalization_data[f"current_{category}"].notnull()
]
times_new = (hospitalization_data["date"].dt.date - t0.date()).dt.days.values
return (
times_new,
hospitalization_data[f"current_{category}"].values.clip(min=0),
HospitalizationDataType.CURRENT_HOSPITALIZATIONS,
)
elif (hospitalization_data[f"cumulative_{category}"] > 0).any():
hospitalization_data = hospitalization_data[
hospitalization_data[f"cumulative_{category}"].notnull()
]
times_new = (hospitalization_data["date"].dt.date - t0.date()).dt.days.values
cumulative = hospitalization_data[f"cumulative_{category}"].values.clip(min=0)
# Some minor glitches for a few states..
for i, val in enumerate(cumulative[1:]):
if cumulative[i] > cumulative[i + 1]:
cumulative[i] = cumulative[i + 1]
return (
times_new,
hospitalization_data[f"cumulative_{category}"].values.clip(min=0),
HospitalizationDataType.CUMULATIVE_HOSPITALIZATIONS,
)
else:
return None, None, None
def get_current_hospitalized_for_state(state: str, t0: datetime, category: HospitalizationCategory):
"""
Return the current estimate for the number of people in the given category for a given US state.
Parameters
----------
state: str
US state to lookup.
t0: datetime
Datetime to offset by.
category: HospitalizationCategory
'icu' for just ICU or 'hospitalized' for all ICU + Acute.
Returns
-------
time: float
Days since t0 for the hospitalization data.
current estimate: float
The most recent estimate for the current occupied in the requested category.
"""
abbr = us.states.lookup(state).abbr
df = combined_datasets.build_us_timeseries_with_all_fields().get_data(
AggregationLevel.STATE, country="USA", state=abbr
)
return _get_current_hospitalized(df, t0, category)
def get_current_hospitalized_for_county(fips: str, t0: datetime, category: HospitalizationCategory):
df = get_hospitalization_data().get_data(AggregationLevel.COUNTY, country="USA", fips=fips)
return _get_current_hospitalized(df, t0, category)
def _get_current_hospitalized(
df: pd.DataFrame,
t0: datetime,
category: HospitalizationCategory,
min_cumulative_datapoints_to_convert: int = MIN_CUMULATIVE_DATAPOINTS_TO_CONVERT,
):
"""
Given a DataFrame that contains values icu or hospitalization data
for a single county/state, this function returns the latest value.
When only cummulative data is available,
a small model is used to estimate the latest current value from the cummulative.
This conversion only occurs if enough data is available.
Parameters:
@param df - dataframe containing either current_ or cumulative_ values for a single county or state
@param t0 - beggining of observation period
@param category - the type of current data to be returned
@param min_cumulative_datapoints_to_convert - the required number of cummulative data points before conversion to current will be done.
Returns:
(times_new_latest, current_latest) - the date and value of the latest data for a given category.
"""
if len(df) == 0:
return None, None
# If data available in current_{} column, then return latest not-null value
if (df[f"current_{category}"] > 0).any():
df = df[df[f"current_{category}"].notnull()]
df_latest = df[f"current_{category}"].values.clip(min=0)[-1]
times_new = (df["date"].dt.date - t0.date()).dt.days.values
times_new_latest = times_new[-1]
return times_new_latest, df_latest # Return current since available
# If data is available in cumulative, try to convert to current (not just daily)
elif (df[f"cumulative_{category}"] > 0).any():
log.warning("Attempting to convert cummulative data to current.")
# Remove Null & Enforce Monotonically Increasing Cumulatives
df = df[df[f"cumulative_{category}"].notnull()]
cumulative = df[f"cumulative_{category}"].values.clip(min=0)
for i, val in enumerate(cumulative[1:]):
if cumulative[i] > cumulative[i + 1]:
cumulative[i] = cumulative[i + 1]
# Estimate Current from Derived Dailies
if len(cumulative) >= min_cumulative_datapoints_to_convert:
current_latest = estimate_current_from_cumulative(cumulative, category)
times_new = (df["date"].dt.date - t0.date()).dt.days.values
times_new_latest = times_new[-1]
return times_new_latest, current_latest # Return current estimate from cumulative
else:
return None, None # No current, not enough cumulative
else:
return None, None # No current nor cumulative
def estimate_current_from_cumulative(cumulative, category):
"""
We assume that an agency starts reporting cumulative admissions at a time not related to a
particularly abnormal patient admission. So we use the data we have collected so far, and
extrapolate backwards to estimate the ICU population at the start of reporting (which is
non-zero). This should significantly speed up settling time (and the only
movements from then on will be from changing inputs).
The simple model provided (x_{i+1} = x_{i} + new - x_{i}/avg_length_of_stay
has a steady state solution with constant input of new * avg_length_of_stay.
We initialize the model by taking the first X data points, calculating the average, and then
calculating steady state if historical data had matched current data. X is the average
length of stay. We then use that as the starting point to step forward in the model. So once we
have more than X datapoints, the forecast will shift based on underlying changes in the data.
Until we collect significant data, the estimates are sensitive to the inital values reported.
E.g. If day 1 shows 10 new ICU patients, we assume that has been happening for the last week
too and estimate accordingly. We can choose to wait for multiple points before extrapolating
to protect against surfacing noisy initial data to the user.
Parameters
----------
cumulative: array
Array like sequence of daily cumulative values (expects, but doesn't enforce monotonic)
category: str
Either 'hospitalization' or 'icu
Returns
-------
current_estimate: float
Latest estimate of currently occupied beds.
"""
average_length_of_stay = get_average_dwell_time(category)
# Calculate new admissions as the differences of daily cumulatives
daily_admits = np.diff(cumulative)
# When reporting starts, we assume there will already be a non-zero number of patients
# in the hospital/ICU. We need to estimate that starting value to initialize the model.
# If we don't, it takes ~2 times the average length of stay for the value to reach the
# expected. So our reported numbers for the first ~14 days would be too low (which we saw in
# Utah ICU data).
# We average over the inputs for up to the same number of days as the average dwell
max_window = int(np.floor(average_length_of_stay))
initial_daily_estimate = daily_admits[:max_window].mean()
# And use that steady state solution as if that data had been entered in the past to initialize.
t0_patients = initial_daily_estimate * average_length_of_stay
# Step through the model and generate a output
current_pts = []
for step, new_day in enumerate(daily_admits):
if step == 0:
yesterday = t0_patients
else:
yesterday = current_pts[-1]
today = yesterday + new_day - yesterday / average_length_of_stay
current_pts.append(today)
current_pts_latest = current_pts[-1]
return current_pts_latest
def get_average_dwell_time(category):
"""
:parameter
category: HospitalizationCategory
Whether we are asking for 'hospital' or 'icu'
:return:
average_length_of_stay: float
the average length of stay for a given category
"""
# Must be here to avoid circular import. This is required to convert
# cumulative hosps to current hosps. We also just use a dummy fips and t_list.
from pyseir.parameters.parameter_ensemble_generator import ParameterEnsembleGenerator
# Surprisingly long load time if initial call (14sec) but then fast (22ms)
params = ParameterEnsembleGenerator(
fips="06",
t_list=[],
N_samples=250 # We want close to the ensemble mean.
# Eventually replace with constants derived from the mean characteristic.
# Then we can revert back to 1.
).get_average_seir_parameters()
# TODO: This value is temporarily in this limited scope.
# Will be added into the params once I decide on some refactoring.
params["hospitalization_length_of_stay_icu_avg"] = 8.6
if category == HospitalizationCategory.HOSPITALIZED:
average_length_of_stay = (
params["hospitalization_rate_general"]
* params["hospitalization_length_of_stay_general"]
+ params["hospitalization_rate_icu"]
* (1 - params["fraction_icu_requiring_ventilator"])
* params["hospitalization_length_of_stay_icu"]
+ params["hospitalization_rate_icu"]
* params["fraction_icu_requiring_ventilator"]
* params["hospitalization_length_of_stay_icu_and_ventilator"]
) / (params["hospitalization_rate_general"] + params["hospitalization_rate_icu"])
else:
# This value is a weighted average of icu w & w/o ventilator.
# It is deterministic. Warning: This param was added in this very local scope.
average_length_of_stay = params["hospitalization_length_of_stay_icu_avg"]
return average_length_of_stay
@lru_cache(maxsize=32)
def load_new_case_data_by_state(
state, t0, include_testing_correction=False, testing_correction_smoothing_tau=5
):
"""
Get data for new cases at state level.
Parameters
----------
state: str
State full name.
t0: datetime
Datetime to offset by.
include_testing_correction: bool
If True, include a correction for new expanded or decreaseed test
coverage.
testing_correction_smoothing_tau: float
expected_positives_from_test_increase is smoothed based on an
exponentially weighted moving average of decay factor specified here.
Returns
-------
times: array(float)
List of float days since t0 for the case and death counts below
observed_new_cases: array(int)
Array of new cases observed each day.
observed_new_deaths: array(int)
Array of new deaths observed each day.
"""
_state_case_data = load_state_case_data()
state_case_data = _state_case_data[_state_case_data["state"] == us.states.lookup(state).abbr]
times_new = (state_case_data["date"] - t0).dt.days.iloc[1:]
observed_new_cases = state_case_data["cases"].values[1:] - state_case_data["cases"].values[:-1]
if include_testing_correction:
df_new_tests = load_new_test_data_by_fips(
us.states.lookup(state).fips, t0, smoothing_tau=testing_correction_smoothing_tau
)
df_cases = pd.DataFrame({"times": times_new, "new_cases": observed_new_cases})
df_cases = df_cases.merge(df_new_tests, how="left", on="times")
df_cases["new_cases"] -= df_cases["expected_positives_from_test_increase"].fillna(0)
observed_new_cases = df_cases["new_cases"].values
observed_new_deaths = (
state_case_data["deaths"].values[1:] - state_case_data["deaths"].values[:-1]
)
_, filter_idx = hampel_filter__low_outliers_only(observed_new_cases, window_size=5, n_sigmas=2)
keep_idx = np.array([i for i in range(len(times_new)) if i not in list(filter_idx)])
times_new = [int(list(times_new)[idx]) for idx in keep_idx]
return (
times_new,
np.array(observed_new_cases[keep_idx]).clip(min=0),
observed_new_deaths.clip(min=0)[keep_idx],
)
@lru_cache(maxsize=32)
def load_new_test_data_by_fips(fips, t0, smoothing_tau=5, correction_threshold=5):
"""
Return a timeseries of new tests for a geography. Note that due to reporting
discrepancies county to county, and state-to-state, these often do not go
back as far as case data.
Parameters
----------
fips: str
State or county fips code
t0: datetime
Reference datetime to use.
Returns
-------
df: pd.DataFrame
DataFrame containing columns:
- 'date',
- 'new_tests': Number of total tests performed that day
- 'increase_in_new_tests': Increase in tests performed that day vs
previous day
- 'positivity_rate':
Test positivity rate
- 'expected_positives_from_test_increase':
Number of positive detections expected just from increased test
capacity.
- times: days since t0 for this observation.
smoothing_tau: int
expected_positives_from_test_increase is smoothed based on an
exponentially weighted moving average of decay factor specified here.
correction_threshold: int
Do not apply a correction if the incident cases per day is lower than
this value. There can be instability if case counts are very low.
"""
us_timeseries = combined_datasets.build_us_timeseries_with_all_fields()
if len(fips) == 2:
df = us_timeseries.get_data(AggregationLevel.STATE, state=us.states.lookup(fips).abbr)
else:
df = us_timeseries.get_data(AggregationLevel.COUNTY, fips=fips)
df = df[
(df[CommonFields.POSITIVE_TESTS].notnull())
& (df[CommonFields.NEGATIVE_TESTS].notnull())
& ((df[CommonFields.POSITIVE_TESTS] + df[CommonFields.NEGATIVE_TESTS]) > 0)
]
df["positivity_rate"] = df[CommonFields.POSITIVE_TESTS] / (
df[CommonFields.POSITIVE_TESTS] + df[CommonFields.NEGATIVE_TESTS]
)
df["new_positive"] = np.append([0], np.diff(df[CommonFields.POSITIVE_TESTS]))
# The first derivative gets us new instead of cumulative tests while the second derivative gives us the change in new test rate.
df["new_tests"] = np.append(
[0], np.diff(df[CommonFields.POSITIVE_TESTS] + df[CommonFields.NEGATIVE_TESTS])
)
df["increase_in_new_tests"] = np.append([0], np.diff(df["new_tests"]))
# dPositive / dTotal = 0.65 * positivity_rate was empirically determined by looking at
# the increase in positives day-over-day relative to the increase in total tests across all 50 states.
df["expected_positives_from_test_increase"] = (
df["increase_in_new_tests"] * 0.65 * df["positivity_rate"]
)
df = df[
[
"date",
"new_tests",
"increase_in_new_tests",
"positivity_rate",
"expected_positives_from_test_increase",
"new_positive",
]
]
df = df[df.increase_in_new_tests.notnull() & df.positivity_rate.notnull()]
df["expected_positives_from_test_increase"] = ewma_smoothing(
df["expected_positives_from_test_increase"], smoothing_tau
)
df["expected_positives_from_test_increase"][df["new_positive"] < 5] = 0
df["times"] = [
int((date - t0).days) for date in pd.to_datetime(df["date"].values).to_pydatetime()
]
return df
def load_cdc_hospitalization_data():
"""
Return age specific hospitalization rate.
Source: https://www.cdc.gov/mmwr/volumes/69/wr/mm6912e2.htm#T1_down
Table has columns: lower_age, upper_age, mean_age, lower_{outcome type},
upper_{outcome type}, and mean_{outcome type}.
Outcome types and their meanings:
- hosp: percentage of all hospitalizations among cases
- icu: percentage of icu admission among cases
- hgen: percentage of general hospitalization (all hospitalizations - icu)
- fatality: case fatality rate
"""
return pd.read_csv(os.path.join(DATA_DIR, "cdc_hospitalization_data.csv"))
@lru_cache(maxsize=1)
def load_mobility_data_m50():
"""
Return mobility data without normalization
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, "mobility_data__m50.pkl"))
@lru_cache(maxsize=1)
def load_mobility_data_m50_index():
"""
Return mobility data with normalization: per
https://github.com/descarteslabs/DL-COVID-19 normal m50 is defined during
2020-02-17 to 2020-03-07.
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, "mobility_data__m50_index.pkl")).set_index("fips")
@lru_cache(maxsize=1)
def load_public_implementations_data():
"""
Return public implementations data
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, "public_implementations_data.pkl")).set_index(
"fips"
)
def load_contact_matrix_data_by_fips(fips):
"""
Load contact matrix for given fips.
Source: polymod survey in UK
(https://journals.plos.org/plosmedicine/article?id=10.1371/journal.pmed.0050074).
Contact matrix at each county has been adjusted by county demographics.
Parameters
----------
fips: str
State or county FIPS code.
Returns
-------
: dict
With fips as keys and values:
- 'contact_matrix': list(list)
number of contacts made by age group in rows with age groups in
columns
- 'age_bin_edges': list
lower age limits to define age groups
- 'age_distribution': list
population size of each age group
"""
fips = [fips] if isinstance(fips, str) else list(fips)
state_abbr = us.states.lookup(fips[0][:2]).abbr
path = os.path.join(DATA_DIR, "contact_matrix", "contact_matrix_fips_%s.json" % state_abbr)
contact_matrix_data = json.loads(open(path).read())
return {s: contact_matrix_data[s] for s in fips}
def load_whitelist():
"""
Load the whitelist result.
Returns
-------
whitelist: pd.DataFrame
DataFrame containing a whitelist of product features for counties.
"""
path = get_run_artifact_path(
fips="06", artifact=RunArtifact.WHITELIST_RESULT # dummy since not used for whitelist.
)
return pd.read_json(path, dtype={"fips": str})
def cache_all_data():
"""
Download all datasets locally.
"""
cache_county_case_data()
cache_mobility_data()
cache_public_implementations_data()
def get_compartment_value_on_date(fips, compartment, date, ensemble_results=None):
"""
Return the value of compartment at a specified date.
Parameters
----------
fips: str
State or County fips.
compartment: str
Name of the compartment to retrieve.
date: datetime
Date to retrieve values for.
ensemble_results: NoneType or dict
Pass in the pre-loaded simulation data to save time, else load it.
Pass in the pre-loaded simulation data to save time, else load it.
Returns
-------
value: float
Value of compartment on a given date.
"""
if ensemble_results is None:
ensemble_results = load_ensemble_results(fips)
# Circular import avoidance
from pyseir.inference.fit_results import load_inference_result
simulation_start_date = datetime.fromisoformat(load_inference_result(fips)["t0_date"])
date_idx = int((date - simulation_start_date).days)
return ensemble_results["suppression_policy__inferred"][compartment]["ci_50"][date_idx]
if __name__ == "__main__":
cache_all_data()
| [
"logging.getLogger",
"zipfile.ZipFile",
"pandas.read_csv",
"io.BytesIO",
"numpy.array",
"libs.datasets.NYTimesDataset.local",
"pandas.to_datetime",
"os.path.exists",
"numpy.isscalar",
"numpy.diff",
"pandas.DataFrame",
"io.StringIO",
"us.states.lookup",
"pandas.read_json",
"numpy.abs",
... | [((576, 603), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (593, 603), False, 'import logging\n'), ((5749, 5770), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (5758, 5770), False, 'from functools import lru_cache\n'), ((6050, 6070), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (6059, 6070), False, 'from functools import lru_cache\n'), ((6347, 6368), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (6356, 6368), False, 'from functools import lru_cache\n'), ((6831, 6852), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (6840, 6852), False, 'from functools import lru_cache\n'), ((8391, 8412), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (8400, 8412), False, 'from functools import lru_cache\n'), ((8911, 8932), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (8920, 8932), False, 'from functools import lru_cache\n'), ((9688, 9709), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (9697, 9709), False, 'from functools import lru_cache\n'), ((10063, 10084), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (10072, 10084), False, 'from functools import lru_cache\n'), ((12824, 12845), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (12833, 12845), False, 'from functools import lru_cache\n'), ((15091, 15112), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (15100, 15112), False, 'from functools import lru_cache\n'), ((26460, 26481), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (26469, 26481), False, 'from functools import lru_cache\n'), ((28720, 28741), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (28729, 28741), False, 'from functools import lru_cache\n'), ((32723, 32743), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (32732, 32743), False, 'from functools import lru_cache\n'), ((32960, 32980), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (32969, 32980), False, 'from functools import lru_cache\n'), ((33337, 33357), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (33346, 33357), False, 'from functools import lru_cache\n'), ((2868, 2896), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipinmemory'], {}), '(zipinmemory)\n', (2883, 2896), False, 'import zipfile\n'), ((3872, 3951), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'url', 'parse_dates': "['date']", 'dtype': 'dtypes_mapping'}), "(filepath_or_buffer=url, parse_dates=['date'], dtype=dtypes_mapping)\n", (3883, 3951), True, 'import pandas as pd\n'), ((5038, 5085), 're.sub', 're.sub', (['""",(\\\\d+)-(\\\\w+)"""', '""",\\\\1-\\\\2-2020"""', 'data'], {}), "(',(\\\\d+)-(\\\\w+)', ',\\\\1-\\\\2-2020', data)\n", (5044, 5085), False, 'import re\n'), ((8207, 8260), 'pandas.concat', 'pd.concat', (['[age_dist, density_info, age_bins]'], {'axis': '(1)'}), '([age_dist, density_info, age_bins], axis=1)\n', (8216, 8260), True, 'import pandas as pd\n'), ((8688, 8744), 'pyseir.utils.get_run_artifact_path', 'get_run_artifact_path', (['fips', 'RunArtifact.ENSEMBLE_RESULT'], {}), '(fips, RunArtifact.ENSEMBLE_RESULT)\n', (8709, 8744), False, 'from pyseir.utils import get_run_artifact_path, RunArtifact, ewma_smoothing\n'), ((8752, 8783), 'os.path.exists', 'os.path.exists', (['output_filename'], {}), '(output_filename)\n', (8766, 8783), False, 'import os\n'), ((12749, 12820), 'libs.datasets.timeseries.TimeseriesDataset', 'TimeseriesDataset', (['data[has_current_hospital | has_cumulative_hospital]'], {}), '(data[has_current_hospital | has_cumulative_hospital])\n', (12766, 12820), False, 'from libs.datasets.timeseries import TimeseriesDataset\n'), ((23241, 23260), 'numpy.diff', 'np.diff', (['cumulative'], {}), '(cumulative)\n', (23248, 23260), True, 'import numpy as np\n'), ((30062, 30117), 'libs.datasets.combined_datasets.build_us_timeseries_with_all_fields', 'combined_datasets.build_us_timeseries_with_all_fields', ([], {}), '()\n', (30115, 30117), False, 'from libs.datasets import combined_datasets\n'), ((31778, 31852), 'pyseir.utils.ewma_smoothing', 'ewma_smoothing', (["df['expected_positives_from_test_increase']", 'smoothing_tau'], {}), "(df['expected_positives_from_test_increase'], smoothing_tau)\n", (31792, 31852), False, 'from pyseir.utils import get_run_artifact_path, RunArtifact, ewma_smoothing\n'), ((34505, 34593), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""contact_matrix"""', "('contact_matrix_fips_%s.json' % state_abbr)"], {}), "(DATA_DIR, 'contact_matrix', 'contact_matrix_fips_%s.json' %\n state_abbr)\n", (34517, 34593), False, 'import os\n'), ((34909, 34980), 'pyseir.utils.get_run_artifact_path', 'get_run_artifact_path', ([], {'fips': '"""06"""', 'artifact': 'RunArtifact.WHITELIST_RESULT'}), "(fips='06', artifact=RunArtifact.WHITELIST_RESULT)\n", (34930, 34980), False, 'from pyseir.utils import get_run_artifact_path, RunArtifact, ewma_smoothing\n'), ((35045, 35084), 'pandas.read_json', 'pd.read_json', (['path'], {'dtype': "{'fips': str}"}), "(path, dtype={'fips': str})\n", (35057, 35084), True, 'import pandas as pd\n'), ((645, 670), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (660, 670), False, 'import os\n'), ((1864, 1920), 'numpy.median', 'np.median', (['input_series[i - window_size:i + window_size]'], {}), '(input_series[i - window_size:i + window_size])\n', (1873, 1920), True, 'import numpy as np\n'), ((3005, 3024), 'io.StringIO', 'io.StringIO', (['string'], {}), '(string)\n', (3016, 3024), False, 'import io\n'), ((3050, 3073), 'io.BytesIO', 'io.BytesIO', (['byte_string'], {}), '(byte_string)\n', (3060, 3073), False, 'import io\n'), ((3326, 3377), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""covid_case_timeseries.pkl"""'], {}), "(DATA_DIR, 'covid_case_timeseries.pkl')\n", (3338, 3377), False, 'import os\n'), ((4476, 4524), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""mobility_data__m50.pkl"""'], {}), "(DATA_DIR, 'mobility_data__m50.pkl')\n", (4488, 4524), False, 'import os\n'), ((4561, 4615), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""mobility_data__m50_index.pkl"""'], {}), "(DATA_DIR, 'mobility_data__m50_index.pkl')\n", (4573, 4615), False, 'import os\n'), ((5687, 5744), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""public_implementations_data.pkl"""'], {}), "(DATA_DIR, 'public_implementations_data.pkl')\n", (5699, 5744), False, 'import os\n'), ((6581, 6627), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""county_metadata.json"""'], {}), "(DATA_DIR, 'county_metadata.json')\n", (6593, 6627), False, 'import os\n'), ((11469, 11536), 'pandas.DataFrame', 'pd.DataFrame', (["{'times': times_new, 'new_cases': observed_new_cases}"], {}), "({'times': times_new, 'new_cases': observed_new_cases})\n", (11481, 11536), True, 'import pandas as pd\n'), ((12216, 12271), 'libs.datasets.combined_datasets.build_us_timeseries_with_all_fields', 'combined_datasets.build_us_timeseries_with_all_fields', ([], {}), '()\n', (12269, 12271), False, 'from libs.datasets import combined_datasets\n'), ((16065, 16088), 'us.states.lookup', 'us.states.lookup', (['state'], {}), '(state)\n', (16081, 16088), False, 'import us\n'), ((18262, 18285), 'us.states.lookup', 'us.states.lookup', (['state'], {}), '(state)\n', (18278, 18285), False, 'import us\n'), ((23765, 23797), 'numpy.floor', 'np.floor', (['average_length_of_stay'], {}), '(average_length_of_stay)\n', (23773, 23797), True, 'import numpy as np\n'), ((27903, 27970), 'pandas.DataFrame', 'pd.DataFrame', (["{'times': times_new, 'new_cases': observed_new_cases}"], {}), "({'times': times_new, 'new_cases': observed_new_cases})\n", (27915, 27970), True, 'import pandas as pd\n'), ((30713, 30753), 'numpy.diff', 'np.diff', (['df[CommonFields.POSITIVE_TESTS]'], {}), '(df[CommonFields.POSITIVE_TESTS])\n', (30720, 30753), True, 'import numpy as np\n'), ((30935, 31009), 'numpy.diff', 'np.diff', (['(df[CommonFields.POSITIVE_TESTS] + df[CommonFields.NEGATIVE_TESTS])'], {}), '(df[CommonFields.POSITIVE_TESTS] + df[CommonFields.NEGATIVE_TESTS])\n', (30942, 31009), True, 'import numpy as np\n'), ((31065, 31089), 'numpy.diff', 'np.diff', (["df['new_tests']"], {}), "(df['new_tests'])\n", (31072, 31089), True, 'import numpy as np\n'), ((32664, 32718), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""cdc_hospitalization_data.csv"""'], {}), "(DATA_DIR, 'cdc_hospitalization_data.csv')\n", (32676, 32718), False, 'import os\n'), ((32907, 32955), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""mobility_data__m50.pkl"""'], {}), "(DATA_DIR, 'mobility_data__m50.pkl')\n", (32919, 32955), False, 'import os\n'), ((34459, 34488), 'us.states.lookup', 'us.states.lookup', (['fips[0][:2]'], {}), '(fips[0][:2])\n', (34475, 34488), False, 'import us\n'), ((4316, 4327), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4324, 4327), True, 'import numpy as np\n'), ((4428, 4439), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4436, 4439), True, 'import numpy as np\n'), ((8852, 8864), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8861, 8864), False, 'import json\n'), ((9545, 9563), 'numpy.isscalar', 'np.isscalar', (['value'], {}), '(value)\n', (9556, 9563), True, 'import numpy as np\n'), ((16121, 16176), 'libs.datasets.combined_datasets.build_us_timeseries_with_all_fields', 'combined_datasets.build_us_timeseries_with_all_fields', ([], {}), '()\n', (16174, 16176), False, 'from libs.datasets import combined_datasets\n'), ((18300, 18355), 'libs.datasets.combined_datasets.build_us_timeseries_with_all_fields', 'combined_datasets.build_us_timeseries_with_all_fields', ([], {}), '()\n', (18353, 18355), False, 'from libs.datasets import combined_datasets\n'), ((25049, 25112), 'pyseir.parameters.parameter_ensemble_generator.ParameterEnsembleGenerator', 'ParameterEnsembleGenerator', ([], {'fips': '"""06"""', 't_list': '[]', 'N_samples': '(250)'}), "(fips='06', t_list=[], N_samples=250)\n", (25075, 25112), False, 'from pyseir.parameters.parameter_ensemble_generator import ParameterEnsembleGenerator\n'), ((36126, 36153), 'pyseir.inference.fit_results.load_inference_result', 'load_inference_result', (['fips'], {}), '(fips)\n', (36147, 36153), False, 'from pyseir.inference.fit_results import load_inference_result\n'), ((1954, 2012), 'numpy.abs', 'np.abs', (['(input_series[i - window_size:i + window_size] - x0)'], {}), '(input_series[i - window_size:i + window_size] - x0)\n', (1960, 2012), True, 'import numpy as np\n'), ((4972, 5002), 'requests.get', 'requests.get', (['url'], {'verify': '(True)'}), '(url, verify=True)\n', (4984, 5002), False, 'import requests\n'), ((5388, 5405), 'io.StringIO', 'io.StringIO', (['data'], {}), '(data)\n', (5399, 5405), False, 'import io\n'), ((6766, 6789), 'us.states.lookup', 'us.states.lookup', (['x[:2]'], {}), '(x[:2])\n', (6782, 6789), False, 'import us\n'), ((27500, 27523), 'us.states.lookup', 'us.states.lookup', (['state'], {}), '(state)\n', (27516, 27523), False, 'import us\n'), ((27793, 27816), 'us.states.lookup', 'us.states.lookup', (['state'], {}), '(state)\n', (27809, 27816), False, 'import us\n'), ((28608, 28646), 'numpy.array', 'np.array', (['observed_new_cases[keep_idx]'], {}), '(observed_new_cases[keep_idx])\n', (28616, 28646), True, 'import numpy as np\n'), ((33260, 33314), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""mobility_data__m50_index.pkl"""'], {}), "(DATA_DIR, 'mobility_data__m50_index.pkl')\n", (33272, 33314), False, 'import os\n'), ((33523, 33580), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""public_implementations_data.pkl"""'], {}), "(DATA_DIR, 'public_implementations_data.pkl')\n", (33535, 33580), False, 'import os\n'), ((5928, 5950), 'libs.datasets.NYTimesDataset.local', 'NYTimesDataset.local', ([], {}), '()\n', (5948, 5950), False, 'from libs.datasets import NYTimesDataset\n'), ((6227, 6249), 'libs.datasets.NYTimesDataset.local', 'NYTimesDataset.local', ([], {}), '()\n', (6247, 6249), False, 'from libs.datasets import NYTimesDataset\n'), ((30208, 30230), 'us.states.lookup', 'us.states.lookup', (['fips'], {}), '(fips)\n', (30224, 30230), False, 'import us\n'), ((32006, 32039), 'pandas.to_datetime', 'pd.to_datetime', (["df['date'].values"], {}), "(df['date'].values)\n", (32020, 32039), True, 'import pandas as pd\n'), ((7813, 7824), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (7821, 7824), True, 'import numpy as np\n')] |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import funnel_transformer
class SingleLayerModel(tf.keras.Model):
def __init__(self, layer):
super().__init__()
self.layer = layer
def call(self, inputs):
return self.layer(inputs)
class FunnelTransformerEncoderTest(parameterized.TestCase, tf.test.TestCase):
def tearDown(self):
super(FunnelTransformerEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
("mix_truncated_avg_rezero", "mixed_float16", tf.float16, "truncated_avg",
"ReZeroTransformer"), ("float32_truncated_avg_rezero", "float32",
tf.float32, "truncated_avg", "ReZeroTransformer"),
("mix_truncated_avg", "mixed_float16", tf.float16, "truncated_avg",
"TransformerEncoderBlock"),
("float32_truncated_avg", "float32", tf.float32, "truncated_avg",
"TransformerEncoderBlock"), ("mix_max", "mixed_float16", tf.float16,
"max", "TransformerEncoderBlock"),
("float32_max", "float32", tf.float32, "max", "TransformerEncoderBlock"),
("mix_avg", "mixed_float16", tf.float16, "avg",
"TransformerEncoderBlock"),
("float32_avg", "float32", tf.float32, "avg", "TransformerEncoderBlock"))
def test_network_creation(self, policy, pooled_dtype, pool_type,
transformer_cls):
tf.keras.mixed_precision.set_global_policy(policy)
hidden_size = 32
sequence_length = 21
pool_stride = 2
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
pool_type=pool_type,
max_sequence_length=sequence_length,
unpool_length=0,
transformer_cls=transformer_cls)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, num_layers)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
# Stride=2 compresses sequence length to half the size at each layer.
# For pool_type = max or avg,
# this configuration gives each layer of seq length: 21->11->6->3.
# For pool_type = truncated_avg,
# seq length: 21->10->5->2.
if pool_type in ["max", "avg"]:
expected_data_shape = [None, 3, hidden_size]
else:
expected_data_shape = [None, 2, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(pooled_dtype, pooled.dtype)
def test_network_creation_dense(self):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
pool_type = "avg"
hidden_size = 32
sequence_length = 21
dense_sequence_length = 3
pool_stride = 2
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
pool_type=pool_type,
max_sequence_length=sequence_length + dense_sequence_length,
unpool_length=0,
transformer_cls="TransformerEncoderBlock")
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, hidden_size), dtype=tf.float32)
dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32)
dense_type_ids = tf.keras.Input(
shape=(dense_sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
[word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, num_layers)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
# Stride=2 compresses sequence length to half the size at each layer.
# For pool_type = max or avg,
# this configuration gives each layer of seq length: 24->12->6->3.
expected_data_shape = [None, 3, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
def test_invalid_stride_and_num_layers(self):
hidden_size = 32
num_layers = 3
pool_stride = [2, 2]
unpool_length = 1
with self.assertRaisesRegex(ValueError,
"pool_stride and num_layers are not equal"):
_ = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
unpool_length=unpool_length)
@parameterized.named_parameters(
("no_stride_no_unpool", 1, 0),
("stride_list_with_unpool", [2, 3, 4], 1),
("large_stride_with_unpool", 3, 1),
("large_stride_with_large_unpool", 5, 10),
("no_stride_with_unpool", 1, 1),
)
def test_all_encoder_outputs_network_creation(self, pool_stride,
unpool_length):
hidden_size = 32
sequence_length = 21
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
unpool_length=unpool_length)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, num_layers)
if isinstance(pool_stride, int):
pool_stride = [pool_stride] * num_layers
for layer_pool_stride, data in zip(pool_stride, all_encoder_outputs):
expected_data_shape[1] = unpool_length + (
expected_data_shape[1] + layer_pool_stride - 1 -
unpool_length) // layer_pool_stride
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
@parameterized.named_parameters(
("all_sequence", None, 3, 0),
("output_range", 1, 1, 0),
("all_sequence_wit_unpool", None, 4, 1),
("output_range_with_unpool", 1, 1, 1),
("output_range_with_large_unpool", 1, 1, 2),
)
def test_network_invocation(self, output_range, out_seq_len, unpool_length):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
pool_stride = 2
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=output_range,
pool_stride=pool_stride,
unpool_length=unpool_length)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len) # output_range
# Creates a FunnelTransformerEncoder with max_sequence_length !=
# sequence_length
max_sequence_length = 128
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
pool_stride=pool_stride)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], 3)
# Creates a FunnelTransformerEncoder with embedding_width != hidden_size
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16,
pool_stride=pool_stride)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
inner_dim=1223,
inner_activation="relu",
output_dropout=0.05,
attention_dropout=0.22,
initializer="glorot_uniform",
output_range=-1,
embedding_width=16,
embedding_layer=None,
norm_first=False,
pool_type="max",
pool_stride=2,
unpool_length=0,
transformer_cls="TransformerEncoderBlock")
network = funnel_transformer.FunnelTransformerEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["inner_activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["inner_activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = funnel_transformer.FunnelTransformerEncoder.from_config(
network.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
# Tests model saving/loading.
model_path = self.get_temp_dir() + "/model"
network_wrapper = SingleLayerModel(network)
# One forward-path to ensure input_shape.
batch_size = 3
sequence_length = 21
vocab_size = 100
num_types = 12
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
_ = network_wrapper.predict([word_id_data, mask_data, type_id_data])
network_wrapper.save(model_path)
_ = tf.keras.models.load_model(model_path)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.keras.Model",
"tensorflow.keras.activations.get",
"absl.testing.parameterized.named_parameters",
"tensorflow.test.main",
"numpy.random.randint",
"tensorflow.keras.mixed_precision.set_global_policy",
"tensorflow.keras.models.load_model",
"tensorflow.keras.initializers.get",
"tensorflow.ke... | [((1208, 1963), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('mix_truncated_avg_rezero', 'mixed_float16', tf.float16, 'truncated_avg',\n 'ReZeroTransformer')", "('float32_truncated_avg_rezero', 'float32', tf.float32, 'truncated_avg',\n 'ReZeroTransformer')", "('mix_truncated_avg', 'mixed_float16', tf.float16, 'truncated_avg',\n 'TransformerEncoderBlock')", "('float32_truncated_avg', 'float32', tf.float32, 'truncated_avg',\n 'TransformerEncoderBlock')", "('mix_max', 'mixed_float16', tf.float16, 'max', 'TransformerEncoderBlock')", "('float32_max', 'float32', tf.float32, 'max', 'TransformerEncoderBlock')", "('mix_avg', 'mixed_float16', tf.float16, 'avg', 'TransformerEncoderBlock')", "('float32_avg', 'float32', tf.float32, 'avg', 'TransformerEncoderBlock')"], {}), "(('mix_truncated_avg_rezero', 'mixed_float16',\n tf.float16, 'truncated_avg', 'ReZeroTransformer'), (\n 'float32_truncated_avg_rezero', 'float32', tf.float32, 'truncated_avg',\n 'ReZeroTransformer'), ('mix_truncated_avg', 'mixed_float16', tf.float16,\n 'truncated_avg', 'TransformerEncoderBlock'), ('float32_truncated_avg',\n 'float32', tf.float32, 'truncated_avg', 'TransformerEncoderBlock'), (\n 'mix_max', 'mixed_float16', tf.float16, 'max',\n 'TransformerEncoderBlock'), ('float32_max', 'float32', tf.float32,\n 'max', 'TransformerEncoderBlock'), ('mix_avg', 'mixed_float16', tf.\n float16, 'avg', 'TransformerEncoderBlock'), ('float32_avg', 'float32',\n tf.float32, 'avg', 'TransformerEncoderBlock'))\n", (1238, 1963), False, 'from absl.testing import parameterized\n'), ((6800, 7031), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('no_stride_no_unpool', 1, 0)", "('stride_list_with_unpool', [2, 3, 4], 1)", "('large_stride_with_unpool', 3, 1)", "('large_stride_with_large_unpool', 5, 10)", "('no_stride_with_unpool', 1, 1)"], {}), "(('no_stride_no_unpool', 1, 0), (\n 'stride_list_with_unpool', [2, 3, 4], 1), ('large_stride_with_unpool', \n 3, 1), ('large_stride_with_large_unpool', 5, 10), (\n 'no_stride_with_unpool', 1, 1))\n", (6830, 7031), False, 'from absl.testing import parameterized\n'), ((8783, 9009), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('all_sequence', None, 3, 0)", "('output_range', 1, 1, 0)", "('all_sequence_wit_unpool', None, 4, 1)", "('output_range_with_unpool', 1, 1, 1)", "('output_range_with_large_unpool', 1, 1, 2)"], {}), "(('all_sequence', None, 3, 0), (\n 'output_range', 1, 1, 0), ('all_sequence_wit_unpool', None, 4, 1), (\n 'output_range_with_unpool', 1, 1, 1), ('output_range_with_large_unpool',\n 1, 1, 2))\n", (8813, 9009), False, 'from absl.testing import parameterized\n'), ((14427, 14441), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (14439, 14441), True, 'import tensorflow as tf\n'), ((1150, 1203), 'tensorflow.keras.mixed_precision.set_global_policy', 'tf.keras.mixed_precision.set_global_policy', (['"""float32"""'], {}), "('float32')\n", (1192, 1203), True, 'import tensorflow as tf\n'), ((2169, 2219), 'tensorflow.keras.mixed_precision.set_global_policy', 'tf.keras.mixed_precision.set_global_policy', (['policy'], {}), '(policy)\n', (2211, 2219), True, 'import tensorflow as tf\n'), ((2384, 2661), 'official.nlp.modeling.networks.funnel_transformer.FunnelTransformerEncoder', 'funnel_transformer.FunnelTransformerEncoder', ([], {'vocab_size': '(100)', 'hidden_size': 'hidden_size', 'num_attention_heads': '(2)', 'num_layers': 'num_layers', 'pool_stride': 'pool_stride', 'pool_type': 'pool_type', 'max_sequence_length': 'sequence_length', 'unpool_length': '(0)', 'transformer_cls': 'transformer_cls'}), '(vocab_size=100, hidden_size=\n hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=\n pool_stride, pool_type=pool_type, max_sequence_length=sequence_length,\n unpool_length=0, transformer_cls=transformer_cls)\n', (2427, 2661), False, 'from official.nlp.modeling.networks import funnel_transformer\n'), ((2805, 2861), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (2819, 2861), True, 'import tensorflow as tf\n'), ((2873, 2929), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (2887, 2929), True, 'import tensorflow as tf\n'), ((2945, 3001), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (2959, 3001), True, 'import tensorflow as tf\n'), ((4256, 4315), 'tensorflow.keras.mixed_precision.set_global_policy', 'tf.keras.mixed_precision.set_global_policy', (['"""mixed_float16"""'], {}), "('mixed_float16')\n", (4298, 4315), True, 'import tensorflow as tf\n'), ((4532, 4848), 'official.nlp.modeling.networks.funnel_transformer.FunnelTransformerEncoder', 'funnel_transformer.FunnelTransformerEncoder', ([], {'vocab_size': '(100)', 'hidden_size': 'hidden_size', 'num_attention_heads': '(2)', 'num_layers': 'num_layers', 'pool_stride': 'pool_stride', 'pool_type': 'pool_type', 'max_sequence_length': '(sequence_length + dense_sequence_length)', 'unpool_length': '(0)', 'transformer_cls': '"""TransformerEncoderBlock"""'}), "(vocab_size=100, hidden_size=\n hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=\n pool_stride, pool_type=pool_type, max_sequence_length=sequence_length +\n dense_sequence_length, unpool_length=0, transformer_cls=\n 'TransformerEncoderBlock')\n", (4575, 4848), False, 'from official.nlp.modeling.networks import funnel_transformer\n'), ((4987, 5043), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (5001, 5043), True, 'import tensorflow as tf\n'), ((5055, 5111), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (5069, 5111), True, 'import tensorflow as tf\n'), ((5127, 5183), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (5141, 5183), True, 'import tensorflow as tf\n'), ((5204, 5280), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(dense_sequence_length, hidden_size)', 'dtype': 'tf.float32'}), '(shape=(dense_sequence_length, hidden_size), dtype=tf.float32)\n', (5218, 5280), True, 'import tensorflow as tf\n'), ((5307, 5369), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(dense_sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(dense_sequence_length,), dtype=tf.int32)\n', (5321, 5369), True, 'import tensorflow as tf\n'), ((5391, 5453), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(dense_sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(dense_sequence_length,), dtype=tf.int32)\n', (5405, 5453), True, 'import tensorflow as tf\n'), ((7326, 7520), 'official.nlp.modeling.networks.funnel_transformer.FunnelTransformerEncoder', 'funnel_transformer.FunnelTransformerEncoder', ([], {'vocab_size': '(100)', 'hidden_size': 'hidden_size', 'num_attention_heads': '(2)', 'num_layers': 'num_layers', 'pool_stride': 'pool_stride', 'unpool_length': 'unpool_length'}), '(vocab_size=100, hidden_size=\n hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=\n pool_stride, unpool_length=unpool_length)\n', (7369, 7520), False, 'from official.nlp.modeling.networks import funnel_transformer\n'), ((7644, 7700), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (7658, 7700), True, 'import tensorflow as tf\n'), ((7712, 7768), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (7726, 7768), True, 'import tensorflow as tf\n'), ((7784, 7840), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (7798, 7840), True, 'import tensorflow as tf\n'), ((9292, 9541), 'official.nlp.modeling.networks.funnel_transformer.FunnelTransformerEncoder', 'funnel_transformer.FunnelTransformerEncoder', ([], {'vocab_size': 'vocab_size', 'hidden_size': 'hidden_size', 'num_attention_heads': '(2)', 'num_layers': '(3)', 'type_vocab_size': 'num_types', 'output_range': 'output_range', 'pool_stride': 'pool_stride', 'unpool_length': 'unpool_length'}), '(vocab_size=vocab_size,\n hidden_size=hidden_size, num_attention_heads=2, num_layers=3,\n type_vocab_size=num_types, output_range=output_range, pool_stride=\n pool_stride, unpool_length=unpool_length)\n', (9335, 9541), False, 'from official.nlp.modeling.networks import funnel_transformer\n'), ((9678, 9734), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (9692, 9734), True, 'import tensorflow as tf\n'), ((9746, 9802), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (9760, 9802), True, 'import tensorflow as tf\n'), ((9818, 9874), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(sequence_length,)', 'dtype': 'tf.int32'}), '(shape=(sequence_length,), dtype=tf.int32)\n', (9832, 9874), True, 'import tensorflow as tf\n'), ((10082, 10140), 'tensorflow.keras.Model', 'tf.keras.Model', (['[word_ids, mask, type_ids]', '[data, pooled]'], {}), '([word_ids, mask, type_ids], [data, pooled])\n', (10096, 10140), True, 'import tensorflow as tf\n'), ((10323, 10388), 'numpy.random.randint', 'np.random.randint', (['vocab_size'], {'size': '(batch_size, sequence_length)'}), '(vocab_size, size=(batch_size, sequence_length))\n', (10340, 10388), True, 'import numpy as np\n'), ((10414, 10470), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(batch_size, sequence_length)'}), '(2, size=(batch_size, sequence_length))\n', (10431, 10470), True, 'import numpy as np\n'), ((10490, 10554), 'numpy.random.randint', 'np.random.randint', (['num_types'], {'size': '(batch_size, sequence_length)'}), '(num_types, size=(batch_size, sequence_length))\n', (10507, 10554), True, 'import numpy as np\n'), ((10845, 11078), 'official.nlp.modeling.networks.funnel_transformer.FunnelTransformerEncoder', 'funnel_transformer.FunnelTransformerEncoder', ([], {'vocab_size': 'vocab_size', 'hidden_size': 'hidden_size', 'max_sequence_length': 'max_sequence_length', 'num_attention_heads': '(2)', 'num_layers': '(3)', 'type_vocab_size': 'num_types', 'pool_stride': 'pool_stride'}), '(vocab_size=vocab_size,\n hidden_size=hidden_size, max_sequence_length=max_sequence_length,\n num_attention_heads=2, num_layers=3, type_vocab_size=num_types,\n pool_stride=pool_stride)\n', (10888, 11078), False, 'from official.nlp.modeling.networks import funnel_transformer\n'), ((11282, 11340), 'tensorflow.keras.Model', 'tf.keras.Model', (['[word_ids, mask, type_ids]', '[data, pooled]'], {}), '([word_ids, mask, type_ids], [data, pooled])\n', (11296, 11340), True, 'import tensorflow as tf\n'), ((11552, 11805), 'official.nlp.modeling.networks.funnel_transformer.FunnelTransformerEncoder', 'funnel_transformer.FunnelTransformerEncoder', ([], {'vocab_size': 'vocab_size', 'hidden_size': 'hidden_size', 'max_sequence_length': 'max_sequence_length', 'num_attention_heads': '(2)', 'num_layers': '(3)', 'type_vocab_size': 'num_types', 'embedding_width': '(16)', 'pool_stride': 'pool_stride'}), '(vocab_size=vocab_size,\n hidden_size=hidden_size, max_sequence_length=max_sequence_length,\n num_attention_heads=2, num_layers=3, type_vocab_size=num_types,\n embedding_width=16, pool_stride=pool_stride)\n', (11595, 11805), False, 'from official.nlp.modeling.networks import funnel_transformer\n'), ((12017, 12075), 'tensorflow.keras.Model', 'tf.keras.Model', (['[word_ids, mask, type_ids]', '[data, pooled]'], {}), '([word_ids, mask, type_ids], [data, pooled])\n', (12031, 12075), True, 'import tensorflow as tf\n'), ((12960, 13013), 'official.nlp.modeling.networks.funnel_transformer.FunnelTransformerEncoder', 'funnel_transformer.FunnelTransformerEncoder', ([], {}), '(**kwargs)\n', (13003, 13013), False, 'from official.nlp.modeling.networks import funnel_transformer\n'), ((13997, 14062), 'numpy.random.randint', 'np.random.randint', (['vocab_size'], {'size': '(batch_size, sequence_length)'}), '(vocab_size, size=(batch_size, sequence_length))\n', (14014, 14062), True, 'import numpy as np\n'), ((14088, 14144), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(batch_size, sequence_length)'}), '(2, size=(batch_size, sequence_length))\n', (14105, 14144), True, 'import numpy as np\n'), ((14164, 14228), 'numpy.random.randint', 'np.random.randint', (['num_types'], {'size': '(batch_size, sequence_length)'}), '(num_types, size=(batch_size, sequence_length))\n', (14181, 14228), True, 'import numpy as np\n'), ((14357, 14395), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {}), '(model_path)\n', (14383, 14395), True, 'import tensorflow as tf\n'), ((6550, 6744), 'official.nlp.modeling.networks.funnel_transformer.FunnelTransformerEncoder', 'funnel_transformer.FunnelTransformerEncoder', ([], {'vocab_size': '(100)', 'hidden_size': 'hidden_size', 'num_attention_heads': '(2)', 'num_layers': 'num_layers', 'pool_stride': 'pool_stride', 'unpool_length': 'unpool_length'}), '(vocab_size=100, hidden_size=\n hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=\n pool_stride, unpool_length=unpool_length)\n', (6593, 6744), False, 'from official.nlp.modeling.networks import funnel_transformer\n'), ((13131, 13192), 'tensorflow.keras.activations.get', 'tf.keras.activations.get', (["expected_config['inner_activation']"], {}), "(expected_config['inner_activation'])\n", (13155, 13192), True, 'import tensorflow as tf\n'), ((13272, 13329), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (["expected_config['initializer']"], {}), "(expected_config['initializer'])\n", (13297, 13329), True, 'import tensorflow as tf\n')] |
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,sys
import math
import numpy as np
import bisect
from multiprocessing import Pool
BASE_DIR = os.path.join(os.path.dirname(__file__), '../../../data/S3DIS/prepare_label_rgb')
res = 0.05 # 5cm
max_b = 1.5
overlap = 0.75
block_min_pnum = 600
# out path
train_ori_pts_root = "../../../data/S3DIS/out_part_rgb/train_ori_pts/"
train_ori_seg_root = "../../../data/S3DIS/out_part_rgb/train_ori_seg/"
train_data_root = "../../../data/S3DIS/out_part_rgb/train_data_downsampling/"
train_label_root = "../../../data/S3DIS/out_part_rgb/train_label_downsampling/"
train_trans_root = "../../../data/S3DIS/out_part_rgb/train_trans_downsampling/"
test_ori_pts_root = "../../../data/S3DIS/out_part_rgb/test_ori_pts/"
test_ori_seg_root = "../../../data/S3DIS/out_part_rgb/test_ori_seg/"
test_data_root = "../../../data/S3DIS/out_part_rgb/test_data_downsampling/"
test_label_root = "../../../data/S3DIS/out_part_rgb/test_label_downsampling/"
test_trans_root = "../../../data/S3DIS/out_part_rgb/test_trans_downsampling/"
def pc_getbbox(pc):
x = []
y = []
z = []
for pts in pc:
x.append(pts[0])
y.append(pts[1])
z.append(pts[2])
boundary = [min(x), max(x), min(y), max(y), min(z), max(z)]
return boundary
def process_block(pf, block_list):
block_indices = {}
x_min_list = np.unique(block_list[:,0])
y_min_list = np.unique(block_list[:,1])
len_x_min_list = x_min_list.shape[0]
len_y_min_list = y_min_list.shape[0]
len_pf = len(pf)
count = 0
for i in range(len(block_list)):
block_indices[i] = []
for i,p in enumerate(pf):
x_pos = bisect.bisect_right(x_min_list,p[0])
y_pos = bisect.bisect_right(y_min_list,p[1])
k = int((x_pos-1)*len_y_min_list + (y_pos-1))
if x_pos > 1 and x_pos < len_x_min_list and y_pos > 1 and y_pos < len_y_min_list:
block_indices[k].append(i)
block_indices[k-1].append(i)
block_indices[k-len_y_min_list].append(i)
block_indices[k-len_y_min_list-1].append(i)
elif x_pos > 1 and x_pos < len_x_min_list and (y_pos == 1 or y_pos == len_y_min_list):
block_indices[k].append(i)
block_indices[k-len_y_min_list].append(i)
elif y_pos > 1 and y_pos < len_y_min_list and (x_pos == 1 or x_pos == len_x_min_list):
block_indices[k].append(i)
block_indices[k-1].append(i)
else:
block_indices[k].append(i)
if int(100*i/len_pf) - count == 1:
sys.stdout.write("[%s>%s] %s\r" % ('-'*int(100*i/len_pf), ' '*int(100-int(100*i/len_pf)),str(int(100*i/len_pf))+"%"))
sys.stdout.flush()
count+=1
print("")
print("#### ",os.getpid()," END ####")
return block_indices
def unpickle(npy_file, out_ori_pts, out_ori_seg, out_data, out_label, out_trans):
path_Areas = os.listdir(npy_file)
for Area in path_Areas:
# check the path
if not os.path.exists(out_ori_pts + "Area" + Area[-1] + "_data/01"):
print(out_ori_pts, "Not Exists! Create", out_ori_pts)
os.makedirs(out_ori_pts + "Area" + Area[-1] + "_data/01")
if not os.path.exists(out_ori_seg + "Area" + Area[-1] + "_data/01"):
print(out_ori_seg, "Not Exists! Create", out_ori_seg)
os.makedirs(out_ori_seg + "Area" + Area[-1] + "_data/01")
if not os.path.exists(out_data + "Area" + Area[-1] + "_data/01"):
print(out_data, "Not Exists! Create", out_data)
os.makedirs(out_data + "Area" + Area[-1] + "_data/01")
if not os.path.exists(out_label + "Area" + Area[-1] + "_label/01"):
print(out_label, "Not Exists! Create", out_label)
os.makedirs(out_label + "Area" + Area[-1] + "_label/01")
if not os.path.exists(os.path.join(out_trans, Area)):
print(out_trans, "Not Exists! Create", out_trans)
os.makedirs(os.path.join(out_trans, Area))
path_Rooms = os.listdir(os.path.join(npy_file, Area))
for Room in path_Rooms:
path_data = os.path.join(npy_file, Area, Room, "xyzrgb.npy")
path_seg = os.path.join(npy_file, Area, Room, "label.npy")
print("\nProcess", path_data)
pf = np.load(path_data)
sf = np.load(path_seg).astype(int)
sf = sf.reshape(int(sf.shape[0]))
pts_num = len(pf)
seg_num = len(sf)
print("pts_num", pts_num, "seg_num", seg_num)
if pts_num == seg_num:
# cut block
bbox = pc_getbbox(pf)
split_x = []
split_y = []
block_list = []
dim = [bbox[1] - bbox[0], bbox[3] - bbox[2], bbox[5] - bbox[4]]
# compute split x
if dim[0] > max_b:
block_num = int(dim[0] / (max_b - overlap))
for c in range(block_num):
split_x.append([c * (max_b - overlap), c * (max_b - overlap) + max_b])
else:
split_x.append([0, dim[0]])
# compute split y
if dim[1] > max_b:
block_num = int(dim[1] / (max_b - overlap))
for c in range(block_num):
split_y.append([c * (max_b - overlap), c * (max_b - overlap) + max_b])
else:
split_y.append([0, dim[1]])
for px in split_x:
for py in split_y:
block_list.append([px[0] + bbox[0], py[0] + bbox[2], px[1] + bbox[0], py[1] + bbox[2]])
# split to blocks
len_block_list = len(block_list)
print("need to process :", len_block_list, " blocks")
np_block_list = np.array(block_list)
block_indices = process_block(pf[:, :3], np_block_list)
block_needmerge = []
for i in range(len_block_list):
if len(block_indices[i]) < block_min_pnum:
block_needmerge.append([i, block_list[i], len(block_indices[i])])
print("reblock")
# reblock
for block_nm in block_needmerge:
print("Merge block:", block_nm)
if block_nm[2] == 0:
block_nm.append(-1)
else:
# compute the nearest block to merge
block_i = block_nm[0]
dis_list = []
x_sum = 0
y_sum = 0
for n in block_indices[block_i]:
x_sum = x_sum + pf[n][0]
y_sum = y_sum + pf[n][1]
x_avg = x_sum / block_nm[2]
y_avg = y_sum / block_nm[2]
for block in block_list:
block_center = [(block[2] + block[0]) / 2, (block[3] + block[1]) / 2]
dis = math.sqrt((block_center[0] - x_avg) ** 2 + (block_center[1] - y_avg) ** 2)
dis_list.append(dis)
merge_block = dis_list.index(min(dis_list))
block_nm.append(merge_block)
# save trans file (Camera CS to Block CS)
trans_list = []
out_trs = out_trans + Area + "/" + Room + ".trs"
# save block
for k, block in enumerate(block_list):
save_list = [k]
for block_nm in block_needmerge:
if k == block_nm[0]:
save_list = [block_nm[3], k]
if k == block_nm[3]:
save_list = [k, block_nm[0]]
# zero block
if save_list[0] == -1:
print("zero block")
continue
save_id = Room + "%03d" % save_list[0]
ori_pts = out_ori_pts + "Area" + Area[-1] + "_data/01/" + save_id + ".pts"
ori_seg = out_ori_seg + "Area" + Area[-1] + "_data/01/" + save_id + ".seg"
out_pts = out_data + "Area" + Area[-1] + "_data/01/" + save_id + ".pts"
out_seg = out_label + "Area" + Area[-1] + "_label/01/" + save_id + ".seg"
pf_block = []
sf_block = []
pf_ori = []
for save_k in save_list:
for n in block_indices[save_k]:
pf_block.append(pf[n])
sf_block.append(sf[n])
bbox_block = pc_getbbox(pf_block)
trans = [(bbox_block[1] - bbox_block[0]) / 2 + bbox_block[0],
(bbox_block[3] - bbox_block[2]) / 2 + bbox_block[2], bbox_block[4]]
trans_list.append([save_id, trans])
# save ori block pts
with open(ori_pts, "w") as f:
for pt in pf_block:
pf_ori.append([pt[0] - trans[0], pt[2] - trans[2], pt[1] - trans[1]])
f.writelines(str(float(pf_ori[-1][0])) + " " +
str(float(pf_ori[-1][1])) + " " +
str(float(pf_ori[-1][2])) + "\n")
# save ori block seg
with open(ori_seg, "w") as f:
for s in sf_block:
f.writelines(str(s) + "\n")
print("save ori seg", out_ori_seg)
# downsampling
coordmax = np.max(pf_ori, axis=0)
coordmin = np.min(pf_ori, axis=0)
nvox = np.ceil((coordmax - coordmin) / res)
vidx = np.ceil((pf_ori - coordmin) / res)
vidx = vidx[:, 0] + vidx[:, 1] * nvox[0] + vidx[:, 2] * nvox[0] * nvox[1]
uvidx, vpidx = np.unique(vidx, return_index=True)
# compute voxel label
pf_block = np.array(pf_block)[vpidx].tolist()
sf_block = np.array(sf_block)[vpidx].tolist()
#########
with open(out_pts, "w") as f:
for pt in pf_block:
f.writelines(str(float((pt[0] - trans[0]))) + " " +
str(float((pt[2] - trans[2]))) + " " +
str(float((pt[1] - trans[1]))) + " " +
str(float(pt[3]) / 255 - 0.5) + " " +
str(float(pt[4]) / 255 - 0.5) + " " +
str(float(pt[5]) / 255 - 0.5) + "\n")
print("save pts", out_pts)
with open(out_seg, "w") as f:
for s in sf_block:
f.writelines(str(s) + "\n")
print("save seg", out_seg)
# save trans
with open(out_trs, "w") as f_w:
for trans in trans_list:
f_w.writelines(trans[0])
for t in trans[1]:
f_w.writelines(" " + str(t))
f_w.writelines("\n")
print("save trans", out_trs)
else:
print("pts_num != seg_num!")
os._exit(0)
if __name__ == '__main__':
# read and split train
unpickle(BASE_DIR, train_ori_pts_root, train_ori_seg_root, train_data_root, train_label_root, train_trans_root)
# read and split test
unpickle(BASE_DIR, test_ori_pts_root, test_ori_seg_root, test_data_root, test_label_root, test_trans_root) | [
"os.path.exists",
"numpy.ceil",
"os.listdir",
"numpy.unique",
"os.makedirs",
"os.path.join",
"math.sqrt",
"numpy.max",
"os.path.dirname",
"bisect.bisect_right",
"numpy.array",
"os._exit",
"os.getpid",
"numpy.min",
"sys.stdout.flush",
"numpy.load"
] | [((259, 284), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (274, 284), False, 'import os, sys\n'), ((1506, 1533), 'numpy.unique', 'np.unique', (['block_list[:, 0]'], {}), '(block_list[:, 0])\n', (1515, 1533), True, 'import numpy as np\n'), ((1551, 1578), 'numpy.unique', 'np.unique', (['block_list[:, 1]'], {}), '(block_list[:, 1])\n', (1560, 1578), True, 'import numpy as np\n'), ((3103, 3123), 'os.listdir', 'os.listdir', (['npy_file'], {}), '(npy_file)\n', (3113, 3123), False, 'import os, sys\n'), ((1824, 1861), 'bisect.bisect_right', 'bisect.bisect_right', (['x_min_list', 'p[0]'], {}), '(x_min_list, p[0])\n', (1843, 1861), False, 'import bisect\n'), ((1878, 1915), 'bisect.bisect_right', 'bisect.bisect_right', (['y_min_list', 'p[1]'], {}), '(y_min_list, p[1])\n', (1897, 1915), False, 'import bisect\n'), ((2946, 2957), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2955, 2957), False, 'import os, sys\n'), ((2871, 2889), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2887, 2889), False, 'import os, sys\n'), ((3195, 3255), 'os.path.exists', 'os.path.exists', (["(out_ori_pts + 'Area' + Area[-1] + '_data/01')"], {}), "(out_ori_pts + 'Area' + Area[-1] + '_data/01')\n", (3209, 3255), False, 'import os, sys\n'), ((3337, 3394), 'os.makedirs', 'os.makedirs', (["(out_ori_pts + 'Area' + Area[-1] + '_data/01')"], {}), "(out_ori_pts + 'Area' + Area[-1] + '_data/01')\n", (3348, 3394), False, 'import os, sys\n'), ((3411, 3471), 'os.path.exists', 'os.path.exists', (["(out_ori_seg + 'Area' + Area[-1] + '_data/01')"], {}), "(out_ori_seg + 'Area' + Area[-1] + '_data/01')\n", (3425, 3471), False, 'import os, sys\n'), ((3553, 3610), 'os.makedirs', 'os.makedirs', (["(out_ori_seg + 'Area' + Area[-1] + '_data/01')"], {}), "(out_ori_seg + 'Area' + Area[-1] + '_data/01')\n", (3564, 3610), False, 'import os, sys\n'), ((3627, 3684), 'os.path.exists', 'os.path.exists', (["(out_data + 'Area' + Area[-1] + '_data/01')"], {}), "(out_data + 'Area' + Area[-1] + '_data/01')\n", (3641, 3684), False, 'import os, sys\n'), ((3760, 3814), 'os.makedirs', 'os.makedirs', (["(out_data + 'Area' + Area[-1] + '_data/01')"], {}), "(out_data + 'Area' + Area[-1] + '_data/01')\n", (3771, 3814), False, 'import os, sys\n'), ((3831, 3890), 'os.path.exists', 'os.path.exists', (["(out_label + 'Area' + Area[-1] + '_label/01')"], {}), "(out_label + 'Area' + Area[-1] + '_label/01')\n", (3845, 3890), False, 'import os, sys\n'), ((3968, 4024), 'os.makedirs', 'os.makedirs', (["(out_label + 'Area' + Area[-1] + '_label/01')"], {}), "(out_label + 'Area' + Area[-1] + '_label/01')\n", (3979, 4024), False, 'import os, sys\n'), ((4242, 4270), 'os.path.join', 'os.path.join', (['npy_file', 'Area'], {}), '(npy_file, Area)\n', (4254, 4270), False, 'import os, sys\n'), ((4330, 4378), 'os.path.join', 'os.path.join', (['npy_file', 'Area', 'Room', '"""xyzrgb.npy"""'], {}), "(npy_file, Area, Room, 'xyzrgb.npy')\n", (4342, 4378), False, 'import os, sys\n'), ((4403, 4450), 'os.path.join', 'os.path.join', (['npy_file', 'Area', 'Room', '"""label.npy"""'], {}), "(npy_file, Area, Room, 'label.npy')\n", (4415, 4450), False, 'import os, sys\n'), ((4516, 4534), 'numpy.load', 'np.load', (['path_data'], {}), '(path_data)\n', (4523, 4534), True, 'import numpy as np\n'), ((4056, 4085), 'os.path.join', 'os.path.join', (['out_trans', 'Area'], {}), '(out_trans, Area)\n', (4068, 4085), False, 'import os, sys\n'), ((4176, 4205), 'os.path.join', 'os.path.join', (['out_trans', 'Area'], {}), '(out_trans, Area)\n', (4188, 4205), False, 'import os, sys\n'), ((6129, 6149), 'numpy.array', 'np.array', (['block_list'], {}), '(block_list)\n', (6137, 6149), True, 'import numpy as np\n'), ((12105, 12116), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (12113, 12116), False, 'import os, sys\n'), ((4553, 4570), 'numpy.load', 'np.load', (['path_seg'], {}), '(path_seg)\n', (4560, 4570), True, 'import numpy as np\n'), ((10237, 10259), 'numpy.max', 'np.max', (['pf_ori'], {'axis': '(0)'}), '(pf_ori, axis=0)\n', (10243, 10259), True, 'import numpy as np\n'), ((10292, 10314), 'numpy.min', 'np.min', (['pf_ori'], {'axis': '(0)'}), '(pf_ori, axis=0)\n', (10298, 10314), True, 'import numpy as np\n'), ((10343, 10379), 'numpy.ceil', 'np.ceil', (['((coordmax - coordmin) / res)'], {}), '((coordmax - coordmin) / res)\n', (10350, 10379), True, 'import numpy as np\n'), ((10408, 10442), 'numpy.ceil', 'np.ceil', (['((pf_ori - coordmin) / res)'], {}), '((pf_ori - coordmin) / res)\n', (10415, 10442), True, 'import numpy as np\n'), ((10576, 10610), 'numpy.unique', 'np.unique', (['vidx'], {'return_index': '(True)'}), '(vidx, return_index=True)\n', (10585, 10610), True, 'import numpy as np\n'), ((7433, 7507), 'math.sqrt', 'math.sqrt', (['((block_center[0] - x_avg) ** 2 + (block_center[1] - y_avg) ** 2)'], {}), '((block_center[0] - x_avg) ** 2 + (block_center[1] - y_avg) ** 2)\n', (7442, 7507), False, 'import math\n'), ((10686, 10704), 'numpy.array', 'np.array', (['pf_block'], {}), '(pf_block)\n', (10694, 10704), True, 'import numpy as np\n'), ((10753, 10771), 'numpy.array', 'np.array', (['sf_block'], {}), '(sf_block)\n', (10761, 10771), True, 'import numpy as np\n')] |
from numpy import random
from time import sleep
from silico import Experiment
def experiment_f(mean, sigma, seed):
# All seeds should be initialized using a parameter for reproducibility
random.seed(seed)
# Delay for test purpose
sleep(mean/100)
# Return a dict with the results (must be pickleable)
return {"value": random.normal(mean, sigma)}
experiment = Experiment(
[
("mean", [1, 2, 4]),
("sigma", [1, 2, 3]),
("seed", list(range(20))),
],
experiment_f, # Function
"experiment-demo", # Folder where the results are stored
)
| [
"numpy.random.normal",
"numpy.random.seed",
"time.sleep"
] | [((198, 215), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (209, 215), False, 'from numpy import random\n'), ((249, 266), 'time.sleep', 'sleep', (['(mean / 100)'], {}), '(mean / 100)\n', (254, 266), False, 'from time import sleep\n'), ((344, 370), 'numpy.random.normal', 'random.normal', (['mean', 'sigma'], {}), '(mean, sigma)\n', (357, 370), False, 'from numpy import random\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 6 20:35:36 2017
@author: Administrator
"""
import numpy as np
from math import isnan
from collections import Counter, OrderedDict
def eventsSummary(dataframe):
print('Total Events: %s' %len(dataframe))
print('Total Sources: %s' %sum(dataframe.NumSources))
print('Total Articles: %s' %sum(dataframe.NumArticles))
print('Total Mentions: %s' %sum(dataframe.NumMentions))
#print('Global Tone: ', dataframe.AvgTone.describe())
#print('Global Tone: ', dataframe.GoldsteinScale.describe())
def actorNames(dataframe):
# Preprocessing: Prominent Actors
ActorNames = Counter(dataframe.Actor1Name) + Counter(dataframe.Actor2Name)
StopNames = ['COLOMBIA', 'COLOMBIAN', 'BOGOTA', 'BARRANQUILLA', 'MEDELLIN', 'BANCOLOMBIA', 'BUCARAMANGA']
for value in ActorNames.keys():
try:
if isnan(value):
del ActorNames[value]
break
except:
continue
for name in StopNames:
del ActorNames[name]
return OrderedDict(sorted(ActorNames.items(), key=lambda x: x[1], reverse=True))
def actorType1Codes(dataframe):
ActorType1Codes = Counter(dataframe.Actor1Type1Code) + Counter(dataframe.Actor2Type1Code)
for value in ActorType1Codes.keys():
try:
if isnan(value):
del ActorType1Codes[value]
break
except:
continue
return OrderedDict(sorted(ActorType1Codes.items(), key=lambda x: x[1], reverse=True))
def movingAverage(values, window):
weights = np.repeat(1.0, window)/window
sma = np.convolve(values, weights, 'valid')
return sma | [
"collections.Counter",
"numpy.convolve",
"numpy.repeat",
"math.isnan"
] | [((1704, 1741), 'numpy.convolve', 'np.convolve', (['values', 'weights', '"""valid"""'], {}), "(values, weights, 'valid')\n", (1715, 1741), True, 'import numpy as np\n'), ((668, 697), 'collections.Counter', 'Counter', (['dataframe.Actor1Name'], {}), '(dataframe.Actor1Name)\n', (675, 697), False, 'from collections import Counter, OrderedDict\n'), ((700, 729), 'collections.Counter', 'Counter', (['dataframe.Actor2Name'], {}), '(dataframe.Actor2Name)\n', (707, 729), False, 'from collections import Counter, OrderedDict\n'), ((1243, 1277), 'collections.Counter', 'Counter', (['dataframe.Actor1Type1Code'], {}), '(dataframe.Actor1Type1Code)\n', (1250, 1277), False, 'from collections import Counter, OrderedDict\n'), ((1280, 1314), 'collections.Counter', 'Counter', (['dataframe.Actor2Type1Code'], {}), '(dataframe.Actor2Type1Code)\n', (1287, 1314), False, 'from collections import Counter, OrderedDict\n'), ((1663, 1685), 'numpy.repeat', 'np.repeat', (['(1.0)', 'window'], {}), '(1.0, window)\n', (1672, 1685), True, 'import numpy as np\n'), ((914, 926), 'math.isnan', 'isnan', (['value'], {}), '(value)\n', (919, 926), False, 'from math import isnan\n'), ((1393, 1405), 'math.isnan', 'isnan', (['value'], {}), '(value)\n', (1398, 1405), False, 'from math import isnan\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.