code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from contextlib import contextmanager
import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.signal import pool
from theano.tensor.fft import rfft, irfft
from theano.printing import Print
from theano.tensor.signal.conv import conv2d as vec_conv
from theano.ifelse import ifelse
try:
import theano.sparse as th_sparse_module
except ImportError:
th_sparse_module = None
try:
from theano.tensor.nnet.nnet import softsign as T_softsign
except ImportError:
from theano.sandbox.softsign import softsign as T_softsign
import numpy as np
from .common import floatx
from .common import epsilon
from .common import normalize_data_format
from ..utils.generic_utils import transpose_shape
from ..utils.generic_utils import has_arg
# Legacy functions
from .common import set_image_dim_ordering, image_dim_ordering
py_all = all
py_any = any
py_sum = sum
py_slice = slice
# INTERNAL UTILS
theano.config.floatX = floatx()
# 0 = test, 1 = train
_LEARNING_PHASE = T.scalar(dtype='uint8', name='keras_learning_phase')
_UID_PREFIXES = defaultdict(int)
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
# Returns
Learning phase (scalar integer tensor or Python integer).
"""
# False = test, True = train
return _LEARNING_PHASE
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
# Arguments
value: Learning phase value, either 0 or 1 (integers).
# Raises
ValueError: if `value` is neither `0` nor `1`.
"""
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('Expected learning phase to be '
'0 or 1.')
_LEARNING_PHASE = value
def get_uid(prefix=''):
"""Provides a unique UID given a string prefix.
# Arguments
prefix: string.
# Returns
An integer.
# Example
```python
>>> keras.backend.get_uid('dense')
1
>>> keras.backend.get_uid('dense')
2
```
"""
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def reset_uids():
"""Resets UIDs to default
:return:
"""
global _UID_PREFIXES
_UID_PREFIXES = defaultdict(int)
# VARIABLE MANIPULATION
def _assert_sparse_module():
if not th_sparse_module:
raise ImportError("Failed to import theano.sparse\n"
"You probably need to pip install nose-parameterized")
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
# Arguments
tensor: A tensor instance.
# Returns
A boolean.
# Example
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return th_sparse_module and isinstance(tensor.type, th_sparse_module.SparseType)
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
# Arguments
tensor: A tensor instance (potentially sparse).
# Returns
A dense tensor.
# Examples
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return th_sparse_module.dense_from_sparse(tensor)
else:
return tensor
NAME_SCOPE_STACK = []
@contextmanager
def name_scope(name):
global NAME_SCOPE_STACK
NAME_SCOPE_STACK.append(name)
yield
NAME_SCOPE_STACK.pop()
def _prepare_name(name, default):
prefix = '/'.join(NAME_SCOPE_STACK)
if name is None:
return prefix + '/' + default
return prefix + '/' + name
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
# Examples
```python
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
_assert_sparse_module()
variable = th_sparse_module.as_sparse_variable(
value, name=_prepare_name(name, 'variable'))
else:
if isinstance(value, (theano.tensor.TensorVariable,
theano.tensor.sharedvar.TensorSharedVariable,
theano.tensor.TensorConstant)):
# Support for RandomStreams().normal(), .uniform().
value = value.eval()
value = np.asarray(value, dtype=dtype)
variable = theano.shared(value=value,
name=_prepare_name(name, 'variable'),
strict=False)
variable._keras_shape = value.shape
variable._uses_learning_phase = False
variable.constraint = constraint
return variable
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
# Arguments
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
# Returns
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
if shape is None:
shape = ()
np_value = value * np.ones(shape)
const = T.constant(np_value,
dtype=dtype,
name=_prepare_name(name, 'constant'))
const._keras_shape = shape
const._uses_learning_phase = False
return const
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
# Arguments
x: A candidate tensor.
# Returns
A boolean: Whether the argument is a Keras tensor.
# Raises
ValueError: In case `x` is not a symbolic tensor.
# Examples
```python
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
>>> # A variable indirectly created outside of keras is not a Keras tensor.
>>> K.is_keras_tensor(k_var)
False
>>> keras_var = K.variable(np_var)
>>> # A variable created with the keras backend is not a Keras tensor.
>>> K.is_keras_tensor(keras_var)
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> # A placeholder is not a Keras tensor.
>>> K.is_keras_tensor(keras_placeholder)
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> # Any Keras layer output is a Keras tensor.
>>> K.is_keras_tensor(keras_layer_output)
True
```
"""
if not is_tensor(x):
raise ValueError('Unexpectedly found an instance of type `' +
str(type(x)) + '`. '
'Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def is_tensor(x):
return isinstance(x, (T.TensorVariable,
T.sharedvar.TensorSharedVariable))
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
# Arguments
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
# Returns
Tensor instance (with Keras metadata included).
# Examples
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph._keras_shape
(2, 4, 5)
```
"""
if dtype is None:
dtype = floatx()
if shape is None and ndim is None:
raise ValueError('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
else:
shape = tuple([None for _ in range(ndim)])
name = _prepare_name(name, 'placeholder')
broadcast = (False,) * ndim
if sparse:
_assert_sparse_module()
x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
else:
x = T.TensorType(dtype, broadcast)(name)
x._keras_shape = shape
x._uses_learning_phase = False
x._theano_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
# Arguments
x: A candidate placeholder.
# Returns
Boolean.
"""
return hasattr(x, '_theano_placeholder') and x._theano_placeholder
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
# Arguments
x: A tensor or variable.
# Returns
A symbolic shape (which is itself a tensor).
# Examples
```python
# TensorFlow example
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
Shape.0
>>> K.shape(inputs)
Shape.0
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval()
array([2, 2])
```
"""
return x.shape
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
# Arguments
x: Tensor or variable.
# Returns
A tuple of integers (or None entries).
# Examples
```python
>>> from keras import backend as K
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(inputs)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
if hasattr(x, '_keras_shape'):
return x._keras_shape
else:
return None
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
# Arguments
x: Tensor or variable.
# Returns
Integer (scalar), number of axes.
# Examples
```python
>>> from keras import backend as K
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(inputs)
3
>>> K.ndim(kvar)
2
```
"""
return x.ndim
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
# Arguments
x: Tensor or variable.
# Returns
String, dtype of `x`.
# Examples
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32_ref'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32_ref'
```
"""
return x.dtype
def eval(x):
"""Evaluates the value of a variable.
# Arguments
x: A variable.
# Returns
A Numpy array.
# Examples
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return to_dense(x).eval()
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
# Arguments
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
# Returns
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
# Example
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
return variable(np.zeros(shape), dtype, name)
# Aliases
zeros_symbolic = zeros
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
# Arguments
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
# Returns
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
# Example
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
return variable(np.ones(shape), dtype, name)
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
# Arguments
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
# Returns
A Keras variable, an identity matrix.
# Example
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
if isinstance(size, (list, tuple)):
n, m = size
else:
n, m = size, size
return variable(np.eye(n, m), dtype, name)
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones symbolic variable with the shape of x.
"""
if dtype is None:
dtype = floatx()
return T.ones_like(x, dtype=dtype)
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
# Arguments
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
# Returns
A Keras variable with the shape of x filled with zeros.
# Example
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
return T.zeros_like(x, dtype=dtype)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
# Arguments
x: The input tensor.
name: String, name for the variable to create.
# Returns
A tensor of the same shape, type and content.
"""
return x.copy(name=name)
def random_uniform_variable(shape, low, high, dtype=None, name=None):
"""Instantiates a variable with values drawn from a uniform distribution.
# Arguments
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
# Returns
A Keras variable, filled with drawn samples.
# Example
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
return variable(np.random.uniform(low=low, high=high, size=shape),
dtype=dtype, name=name)
def random_normal_variable(shape, mean, scale, dtype=None, name=None):
"""Instantiates a variable with values drawn from a normal distribution.
# Arguments
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
# Returns
A Keras variable, filled with drawn samples.
# Example
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
return variable(np.random.normal(loc=0.0, scale=scale, size=shape),
dtype=dtype, name=name)
def count_params(x):
"""Returns the static number of elements in a Keras variable or tensor.
# Arguments
x: Keras variable or tensor.
# Returns
Integer, the number of elements in `x`, i.e., the product of the
array's static dimensions.
# Example
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
# We don't want those compilation to show up in Theano profiler.
f = theano.function([], x.shape, profile=False)
return np.prod(f())
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
# Arguments
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
# Returns
Keras tensor with dtype `dtype`.
# Example
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
/placeholder
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
Elemwise{ceil,no_inplace}.0
>>> input
/placeholder
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
Elemwise{ceil,no_inplace}.0
```
"""
return T.cast(x, dtype)
def ceil(x, name=None):
"""Ceils the value of `x`.
# Arguments
x: A `Variable`.
name: Name of the new (ceiled) `Variable`.
# Returns
`Variable` `x` ceiled.
"""
return T.ceil(x)
def floor(x):
"""Floors the value of `x`.
# Arguments
x: A `Variable`.
name: Name of the new (floored) `Variable`.
# Returns
`Variable` `x` floored.
"""
return T.floor(x)
# UPDATES OPS
def update(x, new_x):
"""Update the value of `x` to `new_x`.
# Arguments
x: A `Variable`.
new_x: A tensor of same shape as `x`.
# Returns
The variable `x` updated.
"""
return (x, new_x)
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
# Arguments
x: A `Variable`.
increment: A tensor of same shape as `x`.
# Returns
The variable `x` updated.
"""
return (x, x + increment)
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
# Arguments
x: A `Variable`.
decrement: A tensor of same shape as `x`.
# Returns
The variable `x` updated.
"""
return (x, x - decrement)
def moving_average_update(variable, value, momentum):
"""Compute the moving average of a variable.
# Arguments
x: A `Variable`.
value: A tensor with the same shape as `x`.
momentum: The moving average momentum.
# Returns
An operation to update the variable.
"""
return (variable, variable * momentum + value * (1. - momentum))
# LINEAR ALGEBRA
"""
Assumed overridden:
+, -, /, *, +=, -=, *=, /=
"""
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A tensor, dot product of `x` and `y`.
# Examples
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
dot.0
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
Reshape{3}.0
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if is_sparse(x):
out = th_sparse_module.basic.structured_dot(x, y)
else:
out = T.dot(x, y)
if hasattr(x, '_keras_shape') and hasattr(y, '_keras_shape'):
x_shape = list(x._keras_shape)
y_shape = list(y._keras_shape)
if len(x_shape) > 0:
x_shape.pop()
if len(y_shape) == 1:
y_shape.pop()
elif len(y_shape) > 1:
y_shape.pop(-2)
out._keras_shape = tuple(x_shape + y_shape)
return out
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
batch_dot results in a tensor with less dimensions than the input.
If the number of dimensions is reduced to 1, we use `expand_dims` to
make sure that ndim is at least 2.
# Arguments
x, y: tensors with ndim >= 2
axes: list (or single) int with target dimensions
# Returns
A tensor with shape equal to the concatenation of x's shape
(less the dimension that was summed over) and y's shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to (batch_size, 1).
# Examples
Assume x = [[1, 2], [3, 4]] and y = [[5, 6], [7, 8]]
batch_dot(x, y, axes=1) = [[17, 53]] which is the main diagonal
of x.dot(y.T), although we never have to calculate the off-diagonal
elements.
Shape inference:
Let x's shape be (100, 20) and y's shape be (100, 30, 20).
If dot_axes is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in x's shape and y's shape:
x.shape[0] : 100 : append to output shape
x.shape[1] : 20 : do not append to output shape,
dimension 1 of x has been summed over. (dot_axes[0] = 1)
y.shape[0] : 100 : do not append to output shape,
always ignore first dimension of y
y.shape[1] : 30 : append to output shape
y.shape[2] : 20 : do not append to output shape,
dimension 2 of y has been summed over. (dot_axes[1] = 2)
output_shape = (100, 30)
"""
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
if y.ndim == 2:
axes = [x.ndim - 1, y.ndim - 1]
else:
axes = [x.ndim - 1, y.ndim - 2]
if py_any([isinstance(a, (list, tuple)) for a in axes]):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
if isinstance(axes, tuple):
axes = list(axes)
if 0 in axes:
raise ValueError('Can not perform batch_dot over axis 0.'
'If your inputs are not batched,'
' add a dummy batch dimension to your '
'inputs using K.expand_dims(x, 0)')
out = T.batched_tensordot(x, y, axes=axes)
if ndim(out) == 1:
out = expand_dims(out, 1)
if hasattr(x, '_keras_shape') and hasattr(y, '_keras_shape'):
shape = []
for axis in range(len(x._keras_shape)):
if axis != axes[0]:
shape.append(x._keras_shape[axis])
for axis in range(1, len(y._keras_shape)):
if axis != axes[1]:
shape.append(y._keras_shape[axis])
if len(shape) == 1:
shape.append(1) # Expand dims if ndim == 1
out._keras_shape = tuple(shape)
return out
def dot_product(x, kernel):
"""Wrapper for dot product operation, in order to be compatible with both Theano and Tensorflow.
# Arguments:
x: input
kernel: weights
# Returns
A tensor.
"""
return dot(x, kernel)
def transpose(x):
"""Transposes a tensor and returns it.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
# Examples
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> inputs = K.placeholder((2, 3))
>>> inputs
>>> input_transposed = K.transpose(inputs)
>>> input_transposed
```
"""
y = T.transpose(x)
if hasattr(x, '_keras_shape'):
y._keras_shape = tuple(reversed(x._keras_shape))
return y
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
# Arguments
reference: A tensor.
indices: An integer tensor of indices.
# Returns
A tensor of same type as `reference`.
"""
y = reference[indices]
if hasattr(reference, '_keras_shape') and hasattr(indices, '_keras_shape'):
y._keras_shape = indices._keras_shape + reference._keras_shape[1:]
return y
def fft(x, norm=None):
"""Fast fourier transform:
Compute an n-point fft of frames along given axis.
"""
return rfft(x, norm=norm)
def ifft(x, norm=None, is_odd=False):
"""Inverse fast fourier transform
"""
return irfft(x, norm=norm, is_odd=is_odd)
def real(x):
"""Gets the real part of a complex tensor
"""
return T.real(x)
# ELEMENT-WISE OPERATIONS
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with maximum values of `x`.
"""
return T.max(x, axis=axis, keepdims=keepdims)
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with miminum values of `x`.
"""
return T.min(x, axis=axis, keepdims=keepdims)
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with sum of `x`.
"""
return T.sum(x, axis=axis, keepdims=keepdims)
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with the product of elements of `x`.
"""
return T.prod(x, axis=axis, keepdims=keepdims)
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
# Returns
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return T.extra_ops.cumsum(x, axis=axis)
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the product.
# Returns
A tensor of the cumulative product of values of `x` along `axis`.
"""
return T.extra_ops.cumprod(x, axis=axis)
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
# Returns
A tensor with the mean of elements of `x`.
"""
dtype = None
# bool is available since theano v0.9dev
if 'int' in x.dtype or x.dtype == 'bool':
dtype = floatx()
return T.mean(x, axis=axis, keepdims=keepdims, dtype=dtype)
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with the standard deviation of elements of `x`.
"""
return T.std(x, axis=axis, keepdims=keepdims)
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with the variance of elements of `x`.
"""
return T.var(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
# Arguments
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
# Returns
A uint8 tensor (0s and 1s).
"""
y = T.any(x, axis=axis, keepdims=keepdims)
y = _set_keras_shape_for_reduction(x, y, axis, keepdims)
return y
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
# Arguments
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
# Returns
A uint8 tensor (0s and 1s).
"""
y = T.all(x, axis=axis, keepdims=keepdims)
y = _set_keras_shape_for_reduction(x, y, axis, keepdims)
return y
def _set_keras_shape_for_reduction(x, y, axis, keepdims):
if hasattr(x, '_keras_shape'):
if axis is None:
y._keras_shape = (1,) * len(x._keras_shape) if keepdims else (1,)
else:
if isinstance(axis, int):
axis_list = [axis]
else:
axis_list = list(set(int(a) for a in axis))
keras_shape_list = list(x._keras_shape)
if keepdims:
for a in axis_list:
keras_shape_list[a] = 1
else:
for a in axis_list[::-1]:
keras_shape_list.pop(a)
if not keras_shape_list:
keras_shape_list = (1,)
y._keras_shape = tuple(keras_shape_list)
return y
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
# Arguments
x: Tensor or variable.
axis: axis along which to perform the reduction.
# Returns
A tensor.
"""
return T.argmax(x, axis=axis, keepdims=False)
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
# Arguments
x: Tensor or variable.
axis: axis along which to perform the reduction.
# Returns
A tensor.
"""
return T.argmin(x, axis=axis, keepdims=False)
def square(x):
"""Element-wise square.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
return T.sqr(x)
def abs(x):
"""Element-wise absolute value.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
return T.abs_(x)
def sqrt(x):
"""Element-wise square root.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
x = T.clip(x, 0., np.inf)
return T.sqrt(x)
def exp(x):
"""Element-wise exponential.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
return T.exp(x)
def log(x):
"""Element-wise log.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
return T.log(x)
def log2(x):
"""log in base 2
"""
return T.log2(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
# Returns
The reduced tensor.
"""
# Theano has a built-in optimization for logsumexp
# (see https://github.com/Theano/Theano/pull/4736)
# so we can just write the expression directly:
return T.log(T.sum(T.exp(x), axis=axis, keepdims=keepdims))
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
return T.round(x, mode='half_to_even')
def sign(x):
"""Element-wise sign.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
return T.sgn(x)
def pow(x, a):
"""Element-wise exponentiation.
# Arguments
x: Tensor or variable.
a: Python integer.
# Returns
A tensor.
"""
return T.pow(x, a)
def clip(x, min_value, max_value):
"""Element-wise value clipping.
# Arguments
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
# Returns
A tensor.
"""
if (isinstance(min_value, (int, float)) and
isinstance(max_value, (int, float))):
if max_value < min_value:
max_value = min_value
if min_value is None:
min_value = -np.inf
if max_value is None:
max_value = np.inf
return T.clip(x, min_value, max_value)
def equal(x, y):
"""Element-wise equality between two tensors.
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A bool tensor.
"""
return T.eq(x, y)
def not_equal(x, y):
"""Element-wise inequality between two tensors.
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A bool tensor.
"""
z = T.neq(x, y)
if hasattr(x, '_keras_shape'):
z._keras_shape = x._keras_shape
elif hasattr(y, '_keras_shape'):
z._keras_shape = y._keras_shape
return z
def greater(x, y):
"""Element-wise truth value of (x > y).
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A bool tensor.
"""
return T.gt(x, y)
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A bool tensor.
"""
return T.ge(x, y)
def less(x, y):
"""Element-wise truth value of (x < y).
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A bool tensor.
"""
return T.lt(x, y)
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A bool tensor.
"""
return T.le(x, y)
def maximum(x, y):
"""Element-wise maximum of two tensors.
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A tensor.
"""
return T.maximum(x, y)
def minimum(x, y):
"""Element-wise minimum of two tensors.
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A tensor.
"""
return T.minimum(x, y)
def sin(x):
"""Computes sin of x element-wise.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
return T.sin(x)
def cos(x):
"""Computes cos of x element-wise.
# Arguments
x: Tensor or variable.
# Returns
A tensor.
"""
return T.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_train'):
return _old_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon)
if gamma is None:
if beta is None:
gamma = ones_like(x)
else:
gamma = ones_like(beta)
if beta is None:
if gamma is None:
beta = zeros_like(x)
beta = zeros_like(gamma)
normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
x, gamma, beta, reduction_axes, epsilon)
return normed, mean, T.inv(stdinv ** 2)
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Apply batch normalization on x given mean, var, beta and gamma.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_test'):
return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)
if gamma is None:
gamma = ones_like(var)
if beta is None:
beta = zeros_like(mean)
if mean.ndim == 1:
# based on TensorFlow's default: normalize along rightmost dimension
reduction_axes = list(range(x.ndim - 1))
else:
reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]
return T.nnet.bn.batch_normalization_test(
x, gamma, beta, mean, var, reduction_axes, epsilon)
# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
def _old_normalize_batch_in_training(x, gamma, beta, reduction_axes,
epsilon=1e-3): # pragma: no cover
"""Computes mean and std for batch then apply batch_normalization on batch.
"""
if gamma is None:
gamma = ones_like(x)
if beta is None:
beta = zeros_like(x)
dev = theano.config.device
use_cudnn = (ndim(x) < 5 and
reduction_axes == [0, 2, 3] and
(dev.startswith('cuda') or dev.startswith('gpu')))
if use_cudnn:
broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
try:
trained = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
normed, mean, stdinv = trained
normed = theano.tensor.as_tensor_variable(normed)
mean = theano.tensor.as_tensor_variable(mean)
stdinv = theano.tensor.as_tensor_variable(stdinv)
var = T.inv(stdinv ** 2)
return normed, T.flatten(mean), T.flatten(var)
except AttributeError:
pass
var = x.var(reduction_axes)
mean = x.mean(reduction_axes)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x.shape[axis])
target_shape = T.stack(*target_shape)
broadcast_mean = T.reshape(mean, target_shape)
broadcast_var = T.reshape(var, target_shape)
broadcast_beta = T.reshape(beta, target_shape)
broadcast_gamma = T.reshape(gamma, target_shape)
normed = batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma,
epsilon)
return normed, mean, var
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
def _old_batch_normalization(x, mean, var, beta, gamma,
epsilon=1e-3): # pragma: no cover
"""Apply batch normalization on x given mean, var, beta and gamma.
"""
if gamma is None:
gamma = ones_like(var)
if beta is None:
beta = zeros_like(mean)
if mean.ndim == 1 and x.ndim > 1:
# in TensorFlow's batch_normalization, if the parameters are vectors
# the batch normalization should be applied along the rightmost axis.
# Theano expects the parameters to always have x.ndim dimensions.
shuffle_pattern = ['x'] * (x.ndim - 1) + [0]
mean = mean.dimshuffle(shuffle_pattern)
var = var.dimshuffle(shuffle_pattern)
beta = beta.dimshuffle(shuffle_pattern)
gamma = gamma.dimshuffle(shuffle_pattern)
ndim = x.ndim
dev = theano.config.device
use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu'))
if use_cudnn:
try:
axis = mean.broadcastable.index(False)
if axis != 1:
shuffle_pattern = list(range(ndim))
shuffle_pattern[1] = shuffle_pattern[axis]
shuffle_pattern[axis] = 1
result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
x.dimshuffle(shuffle_pattern),
gamma.dimshuffle(shuffle_pattern),
beta.dimshuffle(shuffle_pattern),
mean.dimshuffle(shuffle_pattern),
var.dimshuffle(shuffle_pattern),
'spatial', epsilon).dimshuffle(shuffle_pattern)
else:
result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
x, gamma, beta, mean, var, 'spatial', epsilon)
return theano.tensor.as_tensor_variable(result)
except AttributeError:
pass
except ValueError:
pass
return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon),
mode='high_mem')
# SHAPE OPERATIONS
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
# Arguments
tensors: list of tensors to concatenate.
axis: concatenation axis.
# Returns
A tensor.
"""
if py_all([is_sparse(x) for x in tensors]):
axis = axis % ndim(tensors[0])
if axis == 0:
output = th_sparse_module.basic.vstack(tensors, format='csr')
elif axis == 1:
output = th_sparse_module.basic.hstack(tensors, format='csr')
else:
raise ValueError('Invalid concat axis for sparse matrix:', axis)
else:
output = T.concatenate([to_dense(x) for x in tensors], axis=axis)
if py_all([hasattr(tensor, '_keras_shape') for tensor in tensors]):
input_shapes = [tensor._keras_shape for tensor in tensors]
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[axis] is None or shape[axis] is None:
output_shape[axis] = None
break
output_shape[axis] += shape[axis]
output._keras_shape = tuple(output_shape)
return output
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
# Arguments
x: Tensor or variable.
shape: Target shape tuple.
# Returns
A tensor.
"""
y = T.reshape(x, shape)
shape = tuple(x if isinstance(x, int) and x > 0 else None for x in shape)
y._keras_shape = shape
if hasattr(x, '_uses_learning_phase'):
y._uses_learning_phase = x._uses_learning_phase
else:
y._uses_learning_phase = False
return y
def permute_dimensions(x, pattern):
"""Transpose dimensions.
pattern should be a tuple or list of
dimension indices, e.g. [0, 2, 1].
"""
pattern = tuple(pattern)
y = x.dimshuffle(pattern)
if hasattr(x, '_keras_shape'):
y._keras_shape = tuple(np.asarray(x._keras_shape)[list(pattern)])
return y
def repeat_elements(x, rep, axis):
"""Repeat the elements of a tensor along an axis, like np.repeat.
If x has shape (s1, s2, s3) and axis=1, the output
will have shape (s1, s2 * rep, s3).
"""
y = T.repeat(x, rep, axis=axis)
if hasattr(x, '_keras_shape'):
y._keras_shape = list(x._keras_shape)
repeat_dim = x._keras_shape[axis]
if repeat_dim is not None:
y._keras_shape[axis] = repeat_dim * rep
y._keras_shape = tuple(y._keras_shape)
return y
def resize_images(x,
height_factor,
width_factor,
data_format,
interpolation='nearest'):
"""Resize the images contained in a 4D tensor of shape
- [batch, channels, height, width] (for 'channels_first' data_format)
- [batch, height, width, channels] (for 'channels_last' data_format)
by a factor of (height_factor, width_factor). Both factors should be
positive integers.
"""
if data_format == 'channels_first':
axis_1 = 2
axis_2 = 3
elif data_format == 'channels_last':
axis_1 = 1
axis_2 = 2
else:
raise ValueError('Invalid data_format:', data_format)
if interpolation == 'nearest':
output = repeat_elements(x, height_factor, axis=axis_1)
output = repeat_elements(output, width_factor, axis=axis_2)
elif interpolation == 'bilinear':
if not (height_factor == width_factor == 2):
raise NotImplementedError(
'Bilinear upscaling with factors other than (2, 2)'
'is not available when using the Theano backend.')
if data_format == 'channels_last':
output = permute_dimensions(x, [0, 3, 1, 2])
else:
output = x
output = T.nnet.abstract_conv.bilinear_upsampling(output,
ratio=height_factor)
if data_format == 'channels_last':
output = permute_dimensions(output, [0, 2, 3, 1])
if hasattr(x, '_keras_shape'):
output._keras_shape = list(x._keras_shape)
output._keras_shape[axis_1] *= height_factor
output._keras_shape[axis_2] *= width_factor
output._keras_shape = tuple(output._keras_shape)
else:
raise ValueError('interpolation should be one of "nearest" or "bilinear".')
return output
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resize the volume contained in a 5D tensor of shape
- [batch, channels, depth, height, width] (for 'channels_first' data_format)
- [batch, depth, height, width, channels] (for 'channels_last' data_format)
by a factor of (depth_factor, height_factor, width_factor).
Both factors should be positive integers.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format:', data_format)
def repeat(x, n):
"""Repeat a 2D tensor.
If x has shape (samples, dim) and n=2,
the output will have shape (samples, 2, dim).
"""
assert x.ndim == 2
y = x.dimshuffle((0, 'x', 1))
y = T.extra_ops.repeat(y, n, axis=1)
if hasattr(x, '_keras_shape'):
shape = list(x._keras_shape)
shape.insert(1, n)
y._keras_shape = tuple(shape)
return y
def repeatRdim(x, n, axis=1):
"""Repeats an RD tensor.
If x has shape (samples, dim1, dim2) and n=2 and axis=1,
the output will have shape (samples, 2, dim1, dim2).
"""
new_dim = range(axis) + ['x'] + range(axis, x.ndim)
x = x.dimshuffle(tuple(new_dim))
return T.extra_ops.repeat(x, n, axis=axis)
def set_subtensor(x, v):
"""Returns x with the given subtensor overwritten by v.
# Arguments
x: Tensor or variable.
v: Tensor or variable.
# Returns
The tensor `x` overwritten by `v`.
"""
return T.set_subtensor(x, v)
def inc_subtensor(x, v):
"""Returns x with the given subtensor incremented by v.
# Arguments
x: Tensor or variable.
v: Tensor or variable.
# Returns
The tensor `x` incremented by `v`.
"""
return T.inc_subtensor(x, v)
def equal_dimensions(x, y):
"""Checks if `x` has the same dimensions than `y`.
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
True if `x` has the same dimensions than `y`, False otherwise.
"""
y_shape = int_shape(y)
x_shape = int_shape(x)
fun_comp = x_shape[2] == y_shape[2] and x_shape[3] == y_shape[3]
return ifelse(fun_comp, y, funequal(x, y))
def funequal(x, y):
"""Utility for `equal_dimensions`.
# Arguments
x: Tensor or variable.
y: Tensor or variable.
# Returns
A tensor
"""
new_y = zeros([1, 1, 1, 1])
new_y = set_subtensor(new_y[:, :, :-1, :-1], y)
return new_y
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1-D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument.
The default type of the returned tensor is 'int32' to
match TensorFlow's default.
# Arguments
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
# Returns
An integer tensor.
"""
return T.arange(start, stop=stop, step=step, dtype=dtype)
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
# Arguments
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
# Returns
A tiled tensor.
"""
if isinstance(n, int):
n = (n,)
elif isinstance(n, list):
n = tuple(n)
y = T.tile(x, n)
shape = int_shape(x)
if shape is None:
return y
elif len(n) < len(shape): # Padding the axis
n = tuple([1 for _ in range(len(shape) - len(n))]) + n
elif len(n) != len(shape):
raise NotImplementedError
y._keras_shape = tuple([None if a is None else a * b
for (a, b) in zip(shape, n)])
return y
def flatten(x):
"""Flatten a tensor.
# Arguments
x: A tensor or variable.
# Returns
A tensor, reshaped into 1-D
"""
y = T.flatten(x)
if hasattr(x, '_keras_shape'):
if None in x._keras_shape:
y._keras_shape = (None,)
else:
y._keras_shape = (np.prod(x._keras_shape), )
return y
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
y = T.reshape(x, (x.shape[0], T.prod(x.shape[1:])))
if hasattr(x, '_keras_shape'):
if None in x._keras_shape[1:]:
y._keras_shape = (x._keras_shape[0], None)
else:
y._keras_shape = (x._keras_shape[0], np.prod(x._keras_shape[1:]))
return y
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
# Arguments
x: A tensor or variable.
axis: Position where to add a new axis.
# Returns
A tensor with expanded dimensions.
"""
pattern = [i for i in range(x.type.ndim)]
if axis < 0:
if x.type.ndim == 0:
axis = 0
else:
axis = axis % x.type.ndim + 1
pattern.insert(axis, 'x')
y = x.dimshuffle(pattern)
if hasattr(x, '_keras_shape'):
shape = list(x._keras_shape)
shape.insert(axis, 1)
y._keras_shape = tuple(shape)
return y
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
# Arguments
x: A tensor or variable.
axis: Axis to drop.
# Returns
A tensor with the same data as `x` but reduced dimensions.
"""
shape = list(x.shape)
shape.pop(axis)
y = T.reshape(x, tuple(shape))
if hasattr(x, '_keras_shape'):
kshape = list(x._keras_shape)
kshape.pop(axis)
y._keras_shape = tuple(kshape)
return y
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
# Arguments
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
# Returns
A padded 3D tensor.
"""
assert len(padding) == 2
input_shape = x.shape
output_shape = (input_shape[0],
input_shape[1] + padding[0] + padding[1],
input_shape[2])
output = T.zeros(output_shape)
result = T.set_subtensor(output[:, padding[0]:x.shape[1] + padding[0], :], x)
if hasattr(x, '_keras_shape'):
result._keras_shape = (x._keras_shape[0],
x._keras_shape[1] + py_sum(padding),
x._keras_shape[2])
return result
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
# Arguments
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A padded 4D tensor.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
top_pad, bottom_pad = padding[0]
left_pad, right_pad = padding[1]
data_format = normalize_data_format(data_format)
input_shape = x.shape
if data_format == 'channels_first':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + top_pad + bottom_pad,
input_shape[3] + left_pad + right_pad)
output = T.zeros(output_shape)
indices = (py_slice(None),
py_slice(None),
py_slice(top_pad, input_shape[2] + top_pad),
py_slice(left_pad, input_shape[3] + left_pad))
else:
output_shape = (input_shape[0],
input_shape[1] + top_pad + bottom_pad,
input_shape[2] + left_pad + right_pad,
input_shape[3])
output = T.zeros(output_shape)
indices = (py_slice(None),
py_slice(top_pad, input_shape[1] + top_pad),
py_slice(left_pad, input_shape[2] + left_pad),
py_slice(None))
y = T.set_subtensor(output[indices], x)
if hasattr(x, '_keras_shape'):
if data_format == 'channels_first':
if x._keras_shape[2] is not None:
h = x._keras_shape[2] + top_pad + bottom_pad
else:
h = None
if x._keras_shape[3] is not None:
w = x._keras_shape[3] + left_pad + right_pad
else:
w = None
output_keras_shape = (x._keras_shape[0],
x._keras_shape[1],
h,
w)
else:
if x._keras_shape[1] is not None:
h = x._keras_shape[1] + top_pad + bottom_pad
else:
h = None
if x._keras_shape[2] is not None:
w = x._keras_shape[2] + left_pad + right_pad
else:
w = None
output_keras_shape = (x._keras_shape[0],
h,
w,
x._keras_shape[3])
y._keras_shape = output_keras_shape
return y
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
# Arguments
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A padded 5D tensor.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
"""
data_format = normalize_data_format(data_format)
input_shape = x.shape
if data_format == 'channels_first':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + padding[0][0] + padding[0][1],
input_shape[3] + padding[1][0] + padding[1][1],
input_shape[4] + padding[2][0] + padding[2][1])
output = T.zeros(output_shape)
indices = (py_slice(None),
py_slice(None),
py_slice(padding[0][0], input_shape[2] + padding[0][0]),
py_slice(padding[1][0], input_shape[3] + padding[1][0]),
py_slice(padding[2][0], input_shape[4] + padding[2][0]))
else:
output_shape = (input_shape[0],
input_shape[1] + padding[0][0] + padding[0][1],
input_shape[2] + padding[1][0] + padding[1][1],
input_shape[3] + padding[2][0] + padding[2][1],
input_shape[4])
output = T.zeros(output_shape)
indices = (py_slice(None),
py_slice(padding[0][0], input_shape[1] + padding[0][0]),
py_slice(padding[1][0], input_shape[2] + padding[1][0]),
py_slice(padding[2][0], input_shape[3] + padding[2][0]),
py_slice(None))
y = T.set_subtensor(output[indices], x)
if hasattr(x, '_keras_shape'):
if data_format == 'channels_first':
if x._keras_shape[2] is not None:
h = x._keras_shape[2] + padding[0][0] + padding[0][1]
else:
h = None
if x._keras_shape[3] is not None:
w = x._keras_shape[3] + padding[1][0] + padding[1][1]
else:
w = None
if x._keras_shape[4] is not None:
d = x._keras_shape[4] + padding[2][0] + padding[2][1]
else:
d = None
output_keras_shape = (x._keras_shape[0],
x._keras_shape[1],
h,
w,
d)
else:
if x._keras_shape[1] is not None:
h = x._keras_shape[1] + padding[0][0] + padding[0][1]
else:
h = None
if x._keras_shape[2] is not None:
w = x._keras_shape[2] + padding[1][0] + padding[1][1]
else:
w = None
if x._keras_shape[3] is not None:
d = x._keras_shape[3] + padding[2][0] + padding[2][1]
else:
d = None
output_keras_shape = (x._keras_shape[0],
h,
w,
d,
x._keras_shape[4])
y._keras_shape = output_keras_shape
return y
def tril(x):
""" Computes a [batch] square lower triangular matrix.
# Arguments
x: Tensor or variable.
# Returns
Lower triangle of an x.
"""
return T.tril(x)
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
# Arguments
x: List of tensors.
axis: Axis along which to perform stacking.
# Returns
A tensor.
"""
return T.stack(x, axis=axis)
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
# Arguments
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
# Returns
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
"""
input_shape = tuple((indices.shape[i] for i in range(indices.ndim)))
indices = T.flatten(indices)
oh = T.extra_ops.to_one_hot(indices, num_classes)
oh = T.reshape(oh, input_shape + (num_classes,))
return oh
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
# Arguments
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
# Returns
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
elif isinstance(axes, tuple):
axes = list(axes)
for i in range(len(axes)):
if axes[i] == -1:
axes[i] = x.ndim - 1
slices = []
for i in range(x.ndim):
if i in axes:
slices.append(py_slice(None, None, -1))
else:
slices.append(py_slice(None, None, None))
return x[slices]
def slice(x, start, size):
if not (len(int_shape(x)) == len(start) == len(size)):
raise ValueError('The dimension and the size of indices should match.')
out = x[tuple([py_slice(i, i + j) for (i, j) in zip(start, size)])]
out._keras_shape = tuple(size)
return out
def pattern_broadcast(x, broadcastable):
"""Make the input adopt a specific broadcasting pattern.
"""
return T.patternbroadcast(x, broadcastable)
# VALUE MANIPULATION
def get_value(x):
"""Returns the value of a variable.
# Arguments
x: input variable.
# Returns
A Numpy array.
"""
if isinstance(x, np.ndarray):
return x
if not hasattr(x, 'get_value'):
raise TypeError('`get_value` can only be called on a variable. '
'If you have an expression instead, use `eval()`.')
return x.get_value()
def batch_get_value(xs):
"""Returns the value of more than one tensor variable.
# Arguments
ops: list of ops to run.
# Returns
A list of Numpy arrays.
"""
return [get_value(x) for x in xs]
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
# Arguments
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
x.set_value(np.asarray(value, dtype=x.dtype))
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
# Arguments
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
for x, value in tuples:
x.set_value(np.asarray(value, dtype=x.dtype))
def get_variable_shape(x):
"""Returns the shape of a variable.
# Arguments
x: A variable.
# Returns
A tuple of integers.
"""
return x.get_value(borrow=True, return_internal_type=True).shape
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
# Example
```python
>>> x = K.print_tensor(x, message="x is: ")
```
# Arguments
x: Tensor to print.
message: Message to print jointly with the tensor.
# Returns
The same tensor `x`, unchanged.
"""
p_op = Print(message)
return p_op(x)
# GRAPH MANIPULATION
class Function(object):
"""Wrapper around Theano Function
"""
def __init__(self, inputs, outputs, updates=[], name=None, **kwargs):
unique_variables_to_update = {}
for v, nv in updates:
if v not in unique_variables_to_update:
unique_variables_to_update[v] = nv
updates = unique_variables_to_update.items()
self.function = theano.function(inputs, outputs, updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
name=name,
**kwargs)
self.name = name
def __call__(self, inputs):
assert isinstance(inputs, (list, tuple))
return self.function(*inputs)
def _raise_invalid_arg(key):
msg = 'Invalid argument "%s" passed to K.function with Theano backend' % key
raise ValueError(msg)
def function(inputs, outputs, updates=[], **kwargs):
"""Return a :class:`callable object <theano.compile.function_module.Function>`
that will calculate `outputs` from `inputs`.
"""
if len(kwargs) > 0:
for key in kwargs.keys():
if not has_arg(theano.function, key, True):
_raise_invalid_arg(key)
return Function(inputs, outputs, updates=updates, **kwargs)
def gradients(loss, variables):
"""Return symbolic gradients of one cost with respect to one or more variables.
"""
return T.grad(loss, variables)
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
# Arguments
variables: tensor or list of tensors to consider constant with respect
to any other variable.
# Returns
A single tensor or a list of tensors (depending on the passed argument)
that has constant gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(theano.gradient.disconnected_grad, variables)
else:
return theano.gradient.disconnected_grad(variables)
# CONTROL FLOW
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None, pos_extra_outputs_states=None):
"""Iterates over the time dimension of a tensor.
# Arguments
step_function:
Parameters:
inputs: Tensor with shape (samples, ...) (no time dimension),
representing input for the batch of samples at a certain
time step.
states: List of tensors.
Returns:
outputs: Tensor with shape (samples, ...) (no time dimension),
new_states: List of tensors, same length and shapes
as 'states'.
inputs: Tensor of temporal data of shape (samples, time, ...)
(at least 3D).
initial_states: Tensor with shape (samples, ...) (no time dimension),
containing the initial values for the states used in
the step function.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape (samples, time),
with a zero for every element that is masked.
constants: A list of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic loop
(`while_loop` or `scan` depending on backend).
input_length: Static number of timesteps in the input.
Must be specified if using `unroll`.
pos_extra_outputs_states: Positions that extra_output_states will have.
# Returns
A tuple (last_output, outputs, new_states).
last_output: The latest output of the rnn, of shape `(samples, ...)`
outputs: Tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: List of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
"""
ndim = inputs.ndim
assert ndim >= 3, 'Input should be at least 3D.'
if unroll:
if input_length is None:
raise ValueError('When specifying `unroll=True`, '
'an `input_length` '
'must be provided to `rnn`.')
axes = [1, 0] + list(range(2, ndim))
inputs = inputs.dimshuffle(axes)
if constants is None:
constants = []
global uses_learning_phase
uses_learning_phase = False
if mask is not None:
if mask.ndim != 2:
raise ValueError(
'mask should have `shape=(samples, time)`, '
'got {}'.format(mask.shape))
mask = mask.dimshuffle([1, 0])
def get_matching_mask(mask_t, ref_tensor_t):
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
ndim = ref_tensor_t.ndim
for _ in range(ndim - 1):
mask_t = expand_dims(mask_t)
add_shape = ref_tensor_t.shape[1:]
reps = T.concatenate([[1], add_shape], 0)
return T.tile(mask_t, reps, ndim=ndim)
if unroll:
indices = list(range(input_length))
if go_backwards:
indices = indices[::-1]
successive_outputs = []
successive_states = []
states = initial_states
for i in indices:
output, new_states = step_function(inputs[i], states + constants)
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if len(successive_outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output_mask = get_matching_mask(mask[i], output)
output = T.switch(output_mask, output, prev_output)
kept_states = []
for state, new_state in zip(states, new_states):
state_mask = get_matching_mask(mask[i], state)
kept_states.append(T.switch(state_mask, new_state, state))
states = kept_states
successive_outputs.append(output)
successive_states.append(states)
outputs = T.stack(*successive_outputs)
states = []
for i in range(len(successive_states[-1])):
new_states = []
for states_at_step in successive_states:
new_states.append(states_at_step[i])
states.append(T.stack(*new_states))
else:
# build an all-zero tensor of shape (samples, output_dim)
initial_output = step_function(inputs[0], initial_states + constants)
initial_output = initial_output[0] * 0
# Theano gets confused by broadcasting patterns in the scan op
initial_output = T.unbroadcast(initial_output, 0, 1)
if len(initial_states) > 0:
initial_states[0] = T.unbroadcast(initial_states[0], 0, 1)
def _step(inputs, mask, output_tm1, *states):
outputs, new_states = step_function(inputs, states)
if getattr(outputs, '_uses_learning_phase', False):
global uses_learning_phase
uses_learning_phase = True
# output previous output if masked.
output_mask = get_matching_mask(mask, outputs)
outputs = T.switch(output_mask, outputs, output_tm1)
return_states = []
for state, new_state in zip(states, new_states):
# TODO: Theano cannot optimize this and therefore, it shows the InconsistencyError (new backend)
state_mask = get_matching_mask(mask, state)
return_states.append(T.switch(state_mask, new_state, state))
return [outputs] + return_states
results, _ = theano.scan(
_step,
sequences=[inputs, mask],
outputs_info=[initial_output] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
# deal with Theano API inconsistency
if isinstance(results, list):
outputs = results[0]
states = results[1:]
else:
outputs = results
states = []
else:
if unroll:
indices = list(range(input_length))
if go_backwards:
indices = indices[::-1]
successive_outputs = []
successive_states = []
states = initial_states
for i in indices:
outputs, states = step_function(inputs[i], states + constants)
if getattr(outputs, '_uses_learning_phase', False):
uses_learning_phase = True
successive_outputs.append(outputs)
successive_states.append(states)
outputs = T.stack(*successive_outputs)
states = []
for i in range(len(successive_states[-1])):
states.append(T.stack(
*[states_at_step[i] for states_at_step in successive_states]))
else:
def _step(inputs, *states):
outputs, new_states = step_function(inputs, states)
if getattr(outputs, '_uses_learning_phase', False):
global uses_learning_phase
uses_learning_phase = True
return [outputs] + new_states
# Theano likes to make shape==1 dimensions
# in the initial states (outputs_info) broadcastable
if len(initial_states) > 0:
initial_states[0] = T.unbroadcast(initial_states[0], 0, 1)
results, _ = theano.scan(
_step,
sequences=inputs,
outputs_info=[None] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
# deal with Theano API inconsistency
if isinstance(results, list):
outputs = results[0]
states = results[1:]
else:
outputs = results
states = []
outputs = T.squeeze(outputs)
last_output = outputs[-1]
axes = [1, 0] + list(range(2, outputs.ndim))
outputs = outputs.dimshuffle(axes)
if pos_extra_outputs_states is None:
states = [T.squeeze(state[-1]) for state in states]
else:
states = [state if i_s in pos_extra_outputs_states
else T.squeeze(state[-1]) for i_s, state in enumerate(states)]
last_output._uses_learning_phase = uses_learning_phase
return last_output, outputs, states
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
# Returns
The selected tensor.
"""
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
cond_ndim = ndim(condition)
expr_ndim = ndim(then_expression)
if cond_ndim < expr_ndim:
ndim_diff = expr_ndim - cond_ndim
for _ in range(ndim_diff):
condition = expand_dims(condition)
return T.switch(condition, then_expression, else_expression)
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
# Arguments
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
# Returns
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
if training is 1 or training is True:
if callable(x):
return x()
else:
return x
elif training is 0 or training is False:
if callable(alt):
return alt()
else:
return alt
if callable(x):
x = x()
if callable(alt):
alt = alt()
# else: assume learning phase is a placeholder tensor.
x = ifelse(training, x, alt)
if uses_learning_phase:
x._uses_learning_phase = True
return x
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
# Arguments
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
# Returns
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
def _assert_has_capability(module, func):
if not hasattr(module, func):
raise EnvironmentError(
'It looks like like your version of '
'Theano is out of date. '
'Install the latest version with:\n'
'pip install git+git://github.com/Theano/Theano.git '
'--upgrade --no-deps')
def elu(x, alpha=1.0):
""" Exponential linear unit
# Arguments
x: Tensor to compute the activation function for.
alpha: scalar
"""
_assert_has_capability(T.nnet, 'elu')
return T.nnet.elu(x, alpha)
def relu(x, alpha=0., max_value=None, threshold=0.):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
# Arguments
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: Saturation threshold.
# Returns
A tensor.
"""
_assert_has_capability(T.nnet, 'relu')
if alpha != 0.:
if threshold != 0.:
negative_part = T.nnet.relu(-x + threshold)
else:
negative_part = T.nnet.relu(-x)
if threshold != 0.:
x = x * T.cast(T.gt(x, threshold), floatx())
else:
x = T.nnet.relu(x)
if max_value is not None:
x = T.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
def softmax(x, axis=-1):
if (axis == -1 or axis == x.ndim - 1) and x.ndim == 2:
return T.nnet.softmax(x)
xm = x.max(axis=axis, keepdims=True)
return T.exp(x - xm) / T.exp(
x - xm).sum(axis=axis, keepdims=True)
def softmax_3d(x):
""""Softmax on the last axis of a 2d or 3d tensor.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
# Raises
Exception: If the input tensor is not 2D or 3D.
"""
nd = ndim(x)
if nd == 2:
return softmax(x)
elif nd == 3:
e = exp(x - max(x, axis=-1, keepdims=True))
s = sum(e, axis=-1, keepdims=True)
return e / s
else:
raise Exception('Cannot apply softmax to a tensor that is not 2D or 3D. ' +
'Here, ndim=' + str(nd))
def softplus(x):
"""Softplus of a tensor.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T.nnet.softplus(x)
def softsign(x):
"""Softsign of a tensor.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T_softsign(x)
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
# Arguments
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
# Returns
Output tensor.
"""
output_dimensions = list(range(len(int_shape(output))))
if axis != -1 and axis not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(int_shape(output)))))
# If the channels are not in the last axis, move them to be there:
if axis != -1 and axis != output_dimensions[-1]:
permutation = output_dimensions[:axis]
permutation += output_dimensions[axis + 1:] + [axis]
output = permute_dimensions(output, permutation)
target = permute_dimensions(target, permutation)
if from_logits:
output = T.nnet.softmax(output)
else:
# scale preds so that the class probas of each sample sum to 1
output /= output.sum(axis=-1, keepdims=True)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, epsilon(), 1.0 - epsilon())
return T.nnet.categorical_crossentropy(output, target)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
# Arguments
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
# Returns
Output tensor.
"""
output_dimensions = list(range(len(int_shape(output))))
if axis != -1 and axis not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(int_shape(output)))))
# If the channels are not in the last axis, move them to be there:
if axis != -1 and axis != output_dimensions[-1]:
permutation = output_dimensions[:axis]
permutation += output_dimensions[axis + 1:] + [axis]
output = permute_dimensions(output, permutation)
target = permute_dimensions(target, permutation)
target = T.cast(T.flatten(target), 'int32')
target = T.extra_ops.to_one_hot(target, nb_class=output.shape[-1])
target = reshape(target, shape(output))
return categorical_crossentropy(target, output, from_logits, axis=-1)
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
# Arguments
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
# Returns
A tensor.
"""
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, epsilon(), 1.0 - epsilon())
return T.nnet.binary_crossentropy(output, target)
def weighted_binary_crossentropy(target, output, from_logits=False, lambda_w_rec=1.0, lambda_w_pre=1.0):
"""Weighted crossentropy of binary random variables.
# Arguments
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
lambda_w_rec: Float. First weight.
lambda_w_pre: Float. Second weight.
# Returns
A tensor.
"""
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _epsilon clipping
output = T.clip(output, _epsilon(), 1.0 - _epsilon())
return -(lambda_w_rec * target * T.log(output) + lambda_w_pre * (1.0 - target) * T.log(1.0 - output))
def sigmoid(x):
"""Element-wise sigmoid.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T.nnet.sigmoid(x)
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T.nnet.hard_sigmoid(x)
def tanh(x):
"""Element-wise tanh.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T.tanh(x)
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
# Arguments
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
# Returns
A tensor.
"""
if level < 0. or level >= 1:
raise ValueError('Dropout level must be in interval [0, 1[.')
if seed is None:
seed = np.random.randint(1, 10e6)
if isinstance(noise_shape, list):
noise_shape = tuple(noise_shape)
rng = RandomStreams(seed=seed)
retain_prob = 1. - level
if noise_shape is None:
random_tensor = rng.binomial(x.shape, p=retain_prob, dtype=x.dtype)
else:
random_tensor = rng.binomial(noise_shape, p=retain_prob, dtype=x.dtype)
random_tensor = T.patternbroadcast(random_tensor,
[dim == 1 for dim in noise_shape])
x *= random_tensor
x /= retain_prob
return x
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
# Arguments
x: Tensor or variable.
axis: axis along which to perform normalization.
# Returns
A tensor.
"""
square_sum = T.sum(T.square(x), axis=axis, keepdims=True)
norm = T.sqrt(T.maximum(square_sum, epsilon()))
return x / norm
def l1_normalize(x, axis):
"""Normalizes a tensor wrt the L1 norm alongside the specified axis.
# Arguments
x: Tensor or variable.
axis: axis along which to perform normalization.
# Returns
A tensor.
"""
norm = T.max(T.sum(abs(x), axis=axis, keepdims=True))
return x / norm
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
# Arguments
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
# Returns
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
# handle k < 1 and k >= predictions.shape[1] cases to match TF behavior
if k < 1:
# dtype='bool' is only available since Theano 0.9.0
try:
return T.zeros_like(targets, dtype='bool')
except TypeError:
return T.zeros_like(targets, dtype='int8')
if k >= int_shape(predictions)[1]:
try:
return T.ones_like(targets, dtype='bool')
except TypeError:
return T.ones_like(targets, dtype='int8')
predictions_k = T.sort(predictions)[:, -k]
targets_values = predictions[T.arange(targets.shape[0]), targets]
return T.ge(targets_values, predictions_k)
# CONVOLUTIONS
def _preprocess_conv2d_input(x, data_format):
"""Transpose and cast the input before the conv2d.
# Arguments
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = x.dimshuffle((0, 3, 1, 2))
return x
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
# Arguments
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols, slices)
# TF input shape: (samples, rows, cols, slices, input_depth)
x = x.dimshuffle((0, 4, 1, 2, 3))
return x
def _preprocess_conv2d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# Theano expects `(depth, input_depth, rows, cols)`.
kernel = kernel.dimshuffle((3, 2, 0, 1))
return kernel
def _preprocess_conv2d_depthwise_kernel(kernel, kernel_shape, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# Theano expects `(input_depth * depth, 1, rows, cols)`
# for depthwise convolution.
kernel = kernel[::-1, ::-1, :, :]
kernel = kernel.dimshuffle((2, 3, 0, 1))
kernel = reshape(kernel, kernel_shape)
return kernel
def _preprocess_conv3d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(space, input_depth, depth)`,
# independently of `data_format`.
# Theano expects `(depth, input_depth, space)`.
kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
return kernel
def _preprocess_padding(padding):
"""Convert keras' padding to theano's padding.
# Arguments
padding: string, `"same"` or `"valid"`.
# Returns
a string, `"SAME"` or `"VALID"`.
# Raises
ValueError: if `padding` is invalid.
"""
if padding == 'same':
th_padding = 'half'
elif padding == 'valid':
th_padding = 'valid'
elif padding == 'full':
th_padding = 'full'
else:
raise ValueError('Border mode not supported:', str(padding))
return th_padding
def _preprocess_conv2d_image_shape(image_shape, data_format):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if data_format == 'channels_last':
if image_shape:
image_shape = transpose_shape(image_shape, 'channels_first',
spatial_axes=(1, 2))
if image_shape is not None:
image_shape = tuple(int_or_none(v) for v in image_shape)
return image_shape
def _preprocess_conv3d_volume_shape(volume_shape, data_format):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if data_format == 'channels_last':
if volume_shape:
volume_shape = (volume_shape[0], volume_shape[4],
volume_shape[1], volume_shape[2], volume_shape[3])
if volume_shape is not None:
volume_shape = tuple(int_or_none(v) for v in volume_shape)
return volume_shape
def _preprocess_conv2d_filter_shape(filter_shape, data_format):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if filter_shape:
filter_shape = (filter_shape[3], filter_shape[2],
filter_shape[0], filter_shape[1])
if filter_shape is not None:
filter_shape = tuple(int_or_none(v) for v in filter_shape)
return filter_shape
def _preprocess_conv2d_depthwise_filter_shape(filter_shape, data_format):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if filter_shape:
filter_shape = (filter_shape[3] * filter_shape[2], 1,
filter_shape[0], filter_shape[1])
if filter_shape is not None:
filter_shape = tuple(int_or_none(v) for v in filter_shape)
return filter_shape
def _preprocess_conv3d_filter_shape(filter_shape, data_format):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if filter_shape:
filter_shape = (filter_shape[4], filter_shape[3],
filter_shape[0], filter_shape[1], filter_shape[2])
if filter_shape is not None:
filter_shape = tuple(int_or_none(v) for v in filter_shape)
return filter_shape
def _postprocess_conv2d_output(conv_out, x,
padding, kernel_shape,
strides, data_format):
if padding == 'same':
if kernel_shape[2] % 2 == 0:
i = (x.shape[2] + strides[0] - 1) // strides[0]
conv_out = conv_out[:, :, :i, :]
if kernel_shape[3] % 2 == 0:
i = (x.shape[3] + strides[1] - 1) // strides[1]
conv_out = conv_out[:, :, :, :i]
if data_format == 'channels_last':
conv_out = conv_out.dimshuffle((0, 2, 3, 1))
return conv_out
def _postprocess_conv3d_output(conv_out, x,
padding, kernel_shape,
strides, data_format):
if padding == 'same':
if kernel_shape[2] % 2 == 0:
i = (x.shape[2] + strides[0] - 1) // strides[0]
conv_out = conv_out[:, :, :i, :, :]
if kernel_shape[3] % 2 == 0:
i = (x.shape[3] + strides[1] - 1) // strides[1]
conv_out = conv_out[:, :, :, :i, :]
if kernel_shape[4] % 2 == 0:
i = (x.shape[4] + strides[2] - 1) // strides[2]
conv_out = conv_out[:, :, :, :, :i]
if data_format == 'channels_last':
conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1))
return conv_out
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
"""1D convolution.
# Arguments
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilate rate.
# Returns
A tensor, result of 1D convolution.
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
data_format = normalize_data_format(data_format)
kernel_shape = int_shape(kernel)
if padding == 'causal':
# causal (dilated) convolution:
if not kernel_shape:
raise AttributeError('Causal padding requires kernel._keras_shape set.')
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
shape = int_shape(x)
if data_format == 'channels_last':
# original shape: (batch, length, input_dim)
# add dim to x to have (batch, length, 1, input_dim)
x = expand_dims(x, 2)
# update x._keras_shape
if shape is not None:
x._keras_shape = (shape[0], shape[1], 1, shape[2])
else:
# original shape: (batch, input_dim, length)
# add dim to x to have (batch, input_dim, length, 1)
x = expand_dims(x, 3)
# update x._keras_shape
if shape is not None:
x._keras_shape = (shape[0], shape[1], shape[2], 1)
# update dilation rate, strides
dilation_rate = (dilation_rate, 1)
strides = (strides, 1)
# add dim to kernel (always same format independently of data_format)
# i.e. (rows, 1, input_depth, depth)
kernel = expand_dims(kernel, 1)
output = conv2d(x, kernel,
strides=strides, padding=padding,
data_format=data_format, dilation_rate=dilation_rate)
# remove added dim
if data_format == 'channels_last':
output = squeeze(output, 2)
else:
output = squeeze(output, 3)
return output
def conv2d(x, kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
"""2D convolution.
# Arguments
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: tuple of 2 integers.
# Returns
A tensor, result of 2D convolution.
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
data_format = normalize_data_format(data_format)
image_shape = _preprocess_conv2d_image_shape(int_shape(x), data_format)
kernel_shape = int_shape(kernel)
if kernel_shape is None:
kernel_shape = kernel.eval().shape # in case of a shared variable
kernel_shape = _preprocess_conv2d_filter_shape(kernel_shape, data_format)
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
th_padding = _preprocess_padding(padding)
conv_out = T.nnet.conv2d(x, kernel,
border_mode=th_padding,
subsample=strides,
input_shape=image_shape,
filter_shape=kernel_shape,
filter_dilation=dilation_rate)
conv_out = _postprocess_conv2d_output(conv_out, x, padding,
kernel_shape, strides, data_format)
return conv_out
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),
padding='valid', data_format=None, dilation_rate=(1, 1)):
"""2D deconvolution (transposed convolution).
# Arguments
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: "channels_last" or "channels_first".
Whether to use Theano or TensorFlow data format
in inputs/kernels/outputs.
dilation_rate: tuple of 2 integers.
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
flip_filters = False
data_format = normalize_data_format(data_format)
if data_format == 'channels_last':
output_shape = (output_shape[0],
output_shape[3],
output_shape[1],
output_shape[2])
kernel_shape = int_shape(kernel)
if kernel_shape is None:
kernel_shape = kernel.eval().shape # in case of a shared variable
if padding == 'same' and kernel_shape[0] % 2 == 0:
raise ValueError('In `Conv2DTranspose`, with padding mode `same`, '
'even kernel sizes are not supported with Theano. '
'You can set `kernel_size` to an odd number.')
kernel_shape = _preprocess_conv2d_filter_shape(kernel_shape, data_format)
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
th_padding = _preprocess_padding(padding)
op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(
imshp=None,
kshp=kernel_shape,
subsample=strides,
border_mode=th_padding,
filter_flip=not flip_filters,
filter_dilation=dilation_rate)
conv_out = op(kernel, x, output_shape[2:])
conv_out = _postprocess_conv2d_output(conv_out, x, padding,
kernel_shape, strides, data_format)
return conv_out
def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1,
padding='valid', data_format=None, dilation_rate=1):
"""1D convolution with separable filters.
# Arguments
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
# Returns
Output tensor.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
"""
data_format = normalize_data_format(data_format)
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
if data_format == 'channels_last':
spatial_start_dim = 2
else:
spatial_start_dim = 3
x = expand_dims(x, spatial_start_dim)
depthwise_kernel = expand_dims(depthwise_kernel, 1)
pointwise_kernel = expand_dims(pointwise_kernel, 1)
strides = strides + (1,)
dilation_rate = dilation_rate + (1,)
image_shape = _preprocess_conv2d_image_shape(int_shape(x), data_format)
depthwise_kernel_shape = int_shape(depthwise_kernel)
if depthwise_kernel_shape is None:
# in case of a shared variable
depthwise_kernel_shape = depthwise_kernel.eval().shape
depthwise_kernel_shape = _preprocess_conv2d_depthwise_filter_shape(
depthwise_kernel_shape, data_format)
pointwise_kernel_shape = int_shape(pointwise_kernel)
if pointwise_kernel_shape is None:
# in case of a shared variable
pointwise_kernel_shape = pointwise_kernel.eval().shape
pointwise_kernel_shape = _preprocess_conv2d_filter_shape(
pointwise_kernel_shape, data_format)
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_depthwise_kernel(
depthwise_kernel, depthwise_kernel_shape, data_format)
pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)
th_padding = _preprocess_padding(padding)
conv_out = T.nnet.conv2d(x, depthwise_kernel,
border_mode=th_padding,
subsample=strides,
input_shape=image_shape,
filter_shape=depthwise_kernel_shape,
filter_dilation=dilation_rate,
num_groups=image_shape[1])
conv_out = T.nnet.conv2d(conv_out, pointwise_kernel,
border_mode=th_padding,
subsample=(1, 1),
input_shape=None,
filter_shape=pointwise_kernel_shape,
filter_dilation=dilation_rate)
conv_out = _postprocess_conv2d_output(conv_out, x, padding,
pointwise_kernel_shape,
strides, data_format)
conv_out = squeeze(conv_out, spatial_start_dim)
return conv_out
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
padding='valid', data_format=None, dilation_rate=(1, 1)):
"""2D convolution with separable filters.
# Arguments
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
# Returns
Output tensor.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
"""
data_format = normalize_data_format(data_format)
image_shape = _preprocess_conv2d_image_shape(int_shape(x), data_format)
depthwise_kernel_shape = int_shape(depthwise_kernel)
if depthwise_kernel_shape is None:
# in case of a shared variable
depthwise_kernel_shape = depthwise_kernel.eval().shape
depthwise_kernel_shape = _preprocess_conv2d_depthwise_filter_shape(
depthwise_kernel_shape, data_format)
pointwise_kernel_shape = int_shape(pointwise_kernel)
if pointwise_kernel_shape is None:
# in case of a shared variable
pointwise_kernel_shape = pointwise_kernel.eval().shape
pointwise_kernel_shape = _preprocess_conv2d_filter_shape(
pointwise_kernel_shape, data_format)
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_depthwise_kernel(
depthwise_kernel, depthwise_kernel_shape, data_format)
pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)
th_padding = _preprocess_padding(padding)
conv_out = T.nnet.conv2d(x, depthwise_kernel,
border_mode=th_padding,
subsample=strides,
input_shape=image_shape,
filter_shape=depthwise_kernel_shape,
filter_dilation=dilation_rate,
num_groups=image_shape[1])
conv_out = T.nnet.conv2d(conv_out, pointwise_kernel,
border_mode=th_padding,
subsample=(1, 1),
input_shape=None,
filter_shape=pointwise_kernel_shape,
filter_dilation=dilation_rate)
conv_out = _postprocess_conv2d_output(conv_out, x, padding,
pointwise_kernel_shape,
strides, data_format)
return conv_out
def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
"""2D convolution with separable filters.
# Arguments
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
# Returns
Output tensor.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
"""
data_format = normalize_data_format(data_format)
image_shape = _preprocess_conv2d_image_shape(int_shape(x), data_format)
depthwise_kernel_shape = int_shape(depthwise_kernel)
if depthwise_kernel_shape is None:
# in case of a shared variable
depthwise_kernel_shape = depthwise_kernel.eval().shape
depthwise_kernel_shape = _preprocess_conv2d_depthwise_filter_shape(
depthwise_kernel_shape, data_format)
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_depthwise_kernel(
depthwise_kernel, depthwise_kernel_shape, data_format)
th_padding = _preprocess_padding(padding)
conv_out = T.nnet.conv2d(x, depthwise_kernel,
border_mode=th_padding,
subsample=strides,
input_shape=image_shape,
filter_shape=depthwise_kernel_shape,
filter_dilation=dilation_rate,
num_groups=image_shape[1])
conv_out = _postprocess_conv2d_output(
conv_out, x, padding, depthwise_kernel_shape, strides, data_format)
return conv_out
def conv3d(x, kernel, strides=(1, 1, 1),
padding='valid', data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
# Arguments
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: tuple of 3 integers.
# Returns
A tensor, result of 3D convolution.
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
data_format = normalize_data_format(data_format)
volume_shape = _preprocess_conv3d_volume_shape(int_shape(x), data_format)
kernel_shape = int_shape(kernel)
if kernel_shape is None:
kernel_shape = kernel.eval().shape # in case of a shared variable
kernel_shape = _preprocess_conv3d_filter_shape(kernel_shape, data_format)
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
th_padding = _preprocess_padding(padding)
conv_out = T.nnet.conv3d(x, kernel,
border_mode=th_padding,
subsample=strides,
input_shape=volume_shape,
filter_shape=kernel_shape,
filter_dilation=dilation_rate)
conv_out = _postprocess_conv3d_output(conv_out, x, padding,
kernel_shape, strides, data_format)
return conv_out
def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1),
padding='valid', data_format=None):
"""3D deconvolution (i.e. transposed convolution).
# Arguments
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
# Returns
A tensor, result of transposed 3D convolution.
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
flip_filters = False
data_format = normalize_data_format(data_format)
if data_format == 'channels_last':
output_shape = (output_shape[0],
output_shape[4],
output_shape[1],
output_shape[2],
output_shape[3])
kernel_shape = int_shape(kernel)
if kernel_shape is None:
kernel_shape = kernel.eval().shape # in case of a shared variable
if padding == 'same' and kernel_shape[0] % 2 == 0:
raise ValueError('In `Conv3DTranspose`, with padding mode `same`, '
'even kernel sizes are not supported with Theano. '
'You can set `kernel_size` to an odd number.')
kernel_shape = _preprocess_conv3d_filter_shape(kernel_shape, data_format)
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
th_padding = _preprocess_padding(padding)
op = T.nnet.abstract_conv.AbstractConv3d_gradInputs(imshp=None,
kshp=kernel_shape,
subsample=strides,
border_mode=th_padding,
filter_flip=not flip_filters)
conv_out = op(kernel, x, output_shape[2:])
conv_out = _postprocess_conv3d_output(conv_out, x, padding,
kernel_shape, strides, data_format)
return conv_out
def pool2d(x, pool_size, strides=(1, 1), padding='valid',
data_format=None, pool_mode='max'):
"""2D Pooling.
# Arguments
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
# Returns
A tensor, result of 2D pooling.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
data_format = normalize_data_format(data_format)
assert pool_size[0] >= 1 and pool_size[1] >= 1
if padding == 'same':
odd_pad_w = pool_size[0] > 2 and pool_size[0] % 2 == 1
w_pad = pool_size[0] - 2 if odd_pad_w else pool_size[0] - 1
odd_pad_h = pool_size[1] > 2 and pool_size[1] % 2 == 1
h_pad = pool_size[1] - 2 if odd_pad_h else pool_size[1] - 1
pad = (w_pad, h_pad)
elif padding == 'valid':
pad = (0, 0)
else:
raise ValueError('Invalid border mode:', padding)
if data_format == 'channels_last':
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == 'max':
pool_out = pool.pool_2d(x, ws=pool_size, stride=strides,
ignore_border=True,
pad=pad,
mode='max')
elif pool_mode == 'avg':
pool_out = pool.pool_2d(x, ws=pool_size, stride=strides,
ignore_border=True,
pad=pad,
mode='average_exc_pad')
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if padding == 'same':
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
pool_out = pool_out[:, :,
: expected_width,
: expected_height]
if data_format == 'channels_last':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid',
data_format=None, pool_mode='max'):
"""3D Pooling.
# Arguments
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
# Returns
A tensor, result of 3D pooling.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
data_format = normalize_data_format(data_format)
if padding == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
d_pad = pool_size[2] - 2 if pool_size[2] % 2 == 1 else pool_size[2] - 1
pad = (w_pad, h_pad, d_pad)
elif padding == 'valid':
pad = (0, 0, 0)
else:
raise ValueError('Invalid padding:', padding)
if data_format == 'channels_last':
x = x.dimshuffle((0, 4, 1, 2, 3))
if pool_mode == 'max':
pool_out = pool.pool_3d(x, ws=pool_size, stride=strides,
ignore_border=True,
pad=pad,
mode='max')
elif pool_mode == 'avg':
pool_out = pool.pool_3d(x, ws=pool_size, stride=strides,
ignore_border=True,
pad=pad,
mode='average_exc_pad')
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if padding == 'same':
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
expected_depth = (x.shape[4] + strides[2] - 1) // strides[2]
pool_out = pool_out[:, :,
: expected_width,
: expected_height,
: expected_depth]
if data_format == 'channels_last':
pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
return pool_out
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
# Arguments
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
Output tensor.
# Raises
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
data_format = normalize_data_format(data_format)
if ndim(bias) != 1 and ndim(bias) != ndim(x) - 1:
raise ValueError('Unexpected bias dimensions %d, '
'expect to be 1 or %d dimensions'
% (ndim(bias), ndim(x) - 1))
bias_shape = tuple(bias.shape)
if ndim(x) == 5:
if data_format == 'channels_first':
if ndim(bias) == 1:
x += reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x += reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if ndim(bias) == 1:
x += reshape(bias, (1, 1, 1, 1, bias_shape[0]))
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if ndim(bias) == 1:
x += reshape(bias, (1, bias_shape[0], 1, 1))
else:
x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if ndim(bias) == 1:
x += reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if ndim(bias) == 1:
x += reshape(bias, (1, bias_shape[0], 1))
else:
x += reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if ndim(bias) == 1:
x += reshape(bias, (1, 1, bias_shape[0]))
else:
x += reshape(bias, (1,) + bias_shape)
else:
x += bias
return x
# RANDOMNESS
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
# Arguments
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
# Returns
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
# Arguments
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
# Returns
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.uniform(shape, low=minval, high=maxval, dtype=dtype)
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
# Arguments
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
# Returns
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.binomial(shape, p=p, dtype=dtype)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
# Arguments
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
# Returns
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
try:
return rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype,
truncate=True)
except TypeError:
normal_t = rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype)
# Poor man's truncated normal: we literally clip the tensor
return T.clip(normal_t, mean - 2 * stddev, mean + 2 * stddev)
def random_multinomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random multinomial distribution of values.
# Arguments
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of multinomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
# Returns
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
rng = RandomStreams(seed=seed)
return rng.multinomial(shape, pvals=p, dtype=dtype)
# COUNT SKETCH
def count_sketch(h, s, x, d=16000):
"""Count sketch operator.
See https://arxiv.org/abs/1606.01847.
# Arguments
h: Count sketch vector h \in \{1, d\} ^n
s: Count sketch vector s \in \{-1, 1\} ^n
x: Count sketch input vector
d: Compact Bilinear dimension
"""
rval, updates = theano.scan(fn=__count_sketch,
sequences=[h, s, x.dimshuffle(1, 0)],
outputs_info=T.alloc(0., x.shape[0], d),
non_sequences=[], n_steps=x.shape[1])
return rval[-1] # We are interested only in the last value
def __count_sketch(h, s, v, # Sequences
y, # Outputs info
):
"""Count sketch utility.
See https://arxiv.org/abs/1606.01847.
# Arguments
h: Count sketch vector h \in \{1, d\} ^n
s: Count sketch vector s \in \{-1, 1\} ^n
v: Count sketch input vector
y: Projected output vector
"""
return T.cast(T.inc_subtensor(y[:, h], T.dot(s, v)), 'float32')
# 1d Convolution
def scan_conv1d(u, v):
"""1D convolution over a set of vectors. All inputs will be treated by pairs.
#x must be equal to #kernel
# Arguments
u: first set of vectors
v: second set of vectors
"""
def __vec_conv(u, v, # Sequences
w, # Outputs info
):
u = u.dimshuffle(('x', 0))
v = v.dimshuffle(('x', 0))
conv_out = vec_conv(u, v,
border_mode='full')
init_cut = u.shape[1] / 2
end_cut = init_cut + u.shape[1]
return conv_out[0, init_cut:end_cut]
conv_out, updates = theano.scan(__vec_conv,
sequences=[u, v],
outputs_info=T.alloc(0., u.shape[1]), # , d),
non_sequences=[], n_steps=u.shape[0])
return conv_out
# Theano implementation of CTC
# Used with permission from <NAME>
# https://github.com/shawntan/
# Note that TensorFlow's native CTC code is significantly
# faster than this
def ctc_interleave_blanks(Y):
Y_ = T.alloc(-1, Y.shape[0] * 2 + 1)
Y_ = T.set_subtensor(Y_[T.arange(Y.shape[0]) * 2 + 1], Y)
return Y_
def ctc_create_skip_idxs(Y):
skip_idxs = T.arange((Y.shape[0] - 3) // 2) * 2 + 1
non_repeats = T.neq(Y[skip_idxs], Y[skip_idxs + 2])
return skip_idxs[non_repeats.nonzero()]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
), log_p_curr.shape[0]), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(
_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
def ctc_path_probs(predict, Y, alpha=1e-4):
smoothed = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
L = T.log(smoothed)
zeros = T.zeros_like(L[0])
log_first = zeros
f_skip_idxs = ctc_create_skip_idxs(Y)
# there should be a shortcut to calculating this
b_skip_idxs = ctc_create_skip_idxs(Y[::-1])
def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
f_active_next, log_f_next = ctc_update_log_p(
f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
b_active_next, log_b_next = ctc_update_log_p(
b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
return f_active_next, log_f_next, b_active_next, log_b_next
[f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
step,
sequences=[L, L[::-1, ::-1]],
outputs_info=[np.int32(1), log_first, np.int32(1), log_first])
idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
mask = ((idxs < f_active.dimshuffle(0, 'x')) &
(idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1])
log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
return log_probs, mask
def ctc_cost(predict, Y):
log_probs, mask = ctc_path_probs(predict, ctc_interleave_blanks(Y))
common_factor = T.max(log_probs)
total_log_prob = T.log(T.sum(T.exp(log_probs - common_factor)[mask.nonzero()]))
total_log_prob = total_log_prob + common_factor
return -total_log_prob
# batchifies original CTC code
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
# Arguments
y_true: tensor (samples, max_string_length) containing the truth labels
y_pred: tensor (samples, time_steps, num_categories) containing the
prediction, or output of the softmax
input_length: tensor (samples,1) containing the sequence length for
each batch item in y_pred
label_length: tensor (samples,1) containing the sequence length for
each batch item in y_true
# Returns
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
def ctc_step(y_true_step, y_pred_step, input_length_step, label_length_step):
y_pred_step = y_pred_step[0: input_length_step[0]]
y_true_step = y_true_step[0:label_length_step[0]]
return ctc_cost(y_pred_step, y_true_step)
ret, _ = theano.scan(
fn=ctc_step,
outputs_info=None,
sequences=[y_true, y_pred, input_length, label_length]
)
ret = ret.dimshuffle('x', 0)
return ret
# HIGH ORDER FUNCTIONS
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
# Arguments
fn: Callable that will be called upon each element in elems
elems: tensor, at least 2 dimensional
name: A string name for the map node in the graph
# Returns
Tensor with first dimension equal to the elems and second depending on
fn
"""
return theano.map(fn, elems, name=name)[0]
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
# Arguments
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
# Returns
Tensor with same type and shape as `initializer`.
"""
if initializer is None:
initializer = elems[0]
elems = elems[1:]
# We need to change the order of the arguments because theano accepts x as
# first parameter and accumulator as second
return theano.foldl(lambda x, acc: fn(acc, x),
elems, initializer, name=name)[0]
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
# Arguments
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
# Returns
Tensor with same type and shape as `initializer`.
"""
if initializer is None:
initializer = elems[-1]
elems = elems[:-1]
# We need to change the order of the arguments because theano accepts x as
# first parameter and accumulator as second
return theano.foldr(lambda x, acc: fn(acc, x),
elems, initializer, name=name)[0]
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
# Arguments
inputs: 3D tensor with shape: (batch_size, steps, input_dim)
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters)
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window
strides: a tuple of a single integer,
specifying the stride length of the convolution
data_format: the data format, channels_first or channels_last
# Returns
the tensor after 1d conv with un-shared weights, with shape (batch_size, output_length, filters)
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
data_format = normalize_data_format(data_format)
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length, feature_dim, filters = kernel_shape
xs = []
for i in range(output_length):
slice_length = py_slice(i * stride,
i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :],
(1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
# Shape: `(output_length, batch_size, filters)`.
output = batch_dot(x_aggregate, kernel)
return permute_dimensions(output, (1, 0, 2))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
# Arguments
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters)
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col)
data_format: the data format, channels_first or channels_last
# Returns
A 4d tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
# Raises
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
data_format = normalize_data_format(data_format)
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
_, feature_dim, filters = kernel_shape
if data_format == 'channels_first':
output = []
for i in range(output_row):
for j in range(output_col):
slice_row = py_slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = py_slice(j * stride_col,
j * stride_col + kernel_size[1])
x_flatten = reshape(inputs[:, :, slice_row, slice_col],
(1, -1, feature_dim))
output.append(dot(x_flatten,
kernel[i * output_col + j, :, :]))
output = concatenate(output, axis=0)
output = reshape(output,
(output_row, output_col, -1, filters))
output = permute_dimensions(output, (2, 3, 0, 1))
else:
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = py_slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = py_slice(j * stride_col,
j * stride_col + kernel_size[1])
xs.append(reshape(inputs[:, slice_row, slice_col, :],
(1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output,
(output_row, output_col, -1, filters))
output = permute_dimensions(output, (2, 0, 1, 3))
return output
def ctc_label_dense_to_sparse(labels, label_lengths):
raise NotImplementedError
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1,
merge_repeated=False):
raise NotImplementedError
# modified from the one included in np_utils.py
def conv_input_length(output_length, filter_size, border_mode, stride):
if output_length is None:
return None
assert border_mode in {'same', 'valid', 'full'}
add_extra = 0
if border_mode == 'same':
pad = filter_size // 2
add_extra = +1
elif border_mode == 'valid':
pad = 0
elif border_mode == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size + add_extra
def as_tensor_variable(x, name=None, ndim=None):
return T.as_tensor_variable(x, name, ndim)
| [
"theano.tensor.zeros_like",
"theano.tensor.argmin",
"theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs",
"theano.tensor.alloc",
"theano.map",
"theano.tensor.tile",
"theano.tensor.squeeze",
"theano.tensor.stack",
"theano.tensor.inv",
"theano.tensor.std",
"theano.tensor.nnet.abstract_conv... | [((1191, 1243), 'theano.tensor.scalar', 'T.scalar', ([], {'dtype': '"""uint8"""', 'name': '"""keras_learning_phase"""'}), "(dtype='uint8', name='keras_learning_phase')\n", (1199, 1243), True, 'from theano import tensor as T\n'), ((1260, 1276), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1271, 1276), False, 'from collections import defaultdict\n'), ((2572, 2588), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2583, 2588), False, 'from collections import defaultdict\n'), ((16003, 16030), 'theano.tensor.ones_like', 'T.ones_like', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (16014, 16030), True, 'from theano import tensor as T\n'), ((16800, 16828), 'theano.tensor.zeros_like', 'T.zeros_like', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (16812, 16828), True, 'from theano import tensor as T\n'), ((19837, 19880), 'theano.function', 'theano.function', (['[]', 'x.shape'], {'profile': '(False)'}), '([], x.shape, profile=False)\n', (19852, 19880), False, 'import theano\n'), ((20742, 20758), 'theano.tensor.cast', 'T.cast', (['x', 'dtype'], {}), '(x, dtype)\n', (20748, 20758), True, 'from theano import tensor as T\n'), ((20975, 20984), 'theano.tensor.ceil', 'T.ceil', (['x'], {}), '(x)\n', (20981, 20984), True, 'from theano import tensor as T\n'), ((21194, 21204), 'theano.tensor.floor', 'T.floor', (['x'], {}), '(x)\n', (21201, 21204), True, 'from theano import tensor as T\n'), ((26484, 26520), 'theano.tensor.batched_tensordot', 'T.batched_tensordot', (['x', 'y'], {'axes': 'axes'}), '(x, y, axes=axes)\n', (26503, 26520), True, 'from theano import tensor as T\n'), ((28024, 28038), 'theano.tensor.transpose', 'T.transpose', (['x'], {}), '(x)\n', (28035, 28038), True, 'from theano import tensor as T\n'), ((28746, 28764), 'theano.tensor.fft.rfft', 'rfft', (['x'], {'norm': 'norm'}), '(x, norm=norm)\n', (28750, 28764), False, 'from theano.tensor.fft import rfft, irfft\n'), ((28862, 28896), 'theano.tensor.fft.irfft', 'irfft', (['x'], {'norm': 'norm', 'is_odd': 'is_odd'}), '(x, norm=norm, is_odd=is_odd)\n', (28867, 28896), False, 'from theano.tensor.fft import rfft, irfft\n'), ((28977, 28986), 'theano.tensor.real', 'T.real', (['x'], {}), '(x)\n', (28983, 28986), True, 'from theano import tensor as T\n'), ((29522, 29560), 'theano.tensor.max', 'T.max', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (29527, 29560), True, 'from theano import tensor as T\n'), ((30068, 30106), 'theano.tensor.min', 'T.min', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (30073, 30106), True, 'from theano import tensor as T\n'), ((30626, 30664), 'theano.tensor.sum', 'T.sum', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (30631, 30664), True, 'from theano import tensor as T\n'), ((31220, 31259), 'theano.tensor.prod', 'T.prod', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (31226, 31259), True, 'from theano import tensor as T\n'), ((31573, 31605), 'theano.tensor.extra_ops.cumsum', 'T.extra_ops.cumsum', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (31591, 31605), True, 'from theano import tensor as T\n'), ((31932, 31965), 'theano.tensor.extra_ops.cumprod', 'T.extra_ops.cumprod', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (31951, 31965), True, 'from theano import tensor as T\n'), ((32661, 32713), 'theano.tensor.mean', 'T.mean', (['x'], {'axis': 'axis', 'keepdims': 'keepdims', 'dtype': 'dtype'}), '(x, axis=axis, keepdims=keepdims, dtype=dtype)\n', (32667, 32713), True, 'from theano import tensor as T\n'), ((33287, 33325), 'theano.tensor.std', 'T.std', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (33292, 33325), True, 'from theano import tensor as T\n'), ((33869, 33907), 'theano.tensor.var', 'T.var', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (33874, 33907), True, 'from theano import tensor as T\n'), ((34228, 34266), 'theano.tensor.any', 'T.any', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (34233, 34266), True, 'from theano import tensor as T\n'), ((34662, 34700), 'theano.tensor.all', 'T.all', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (34667, 34700), True, 'from theano import tensor as T\n'), ((35794, 35832), 'theano.tensor.argmax', 'T.argmax', (['x'], {'axis': 'axis', 'keepdims': '(False)'}), '(x, axis=axis, keepdims=False)\n', (35802, 35832), True, 'from theano import tensor as T\n'), ((36077, 36115), 'theano.tensor.argmin', 'T.argmin', (['x'], {'axis': 'axis', 'keepdims': '(False)'}), '(x, axis=axis, keepdims=False)\n', (36085, 36115), True, 'from theano import tensor as T\n'), ((36261, 36269), 'theano.tensor.sqr', 'T.sqr', (['x'], {}), '(x)\n', (36266, 36269), True, 'from theano import tensor as T\n'), ((36420, 36429), 'theano.tensor.abs_', 'T.abs_', (['x'], {}), '(x)\n', (36426, 36429), True, 'from theano import tensor as T\n'), ((36575, 36597), 'theano.tensor.clip', 'T.clip', (['x', '(0.0)', 'np.inf'], {}), '(x, 0.0, np.inf)\n', (36581, 36597), True, 'from theano import tensor as T\n'), ((36608, 36617), 'theano.tensor.sqrt', 'T.sqrt', (['x'], {}), '(x)\n', (36614, 36617), True, 'from theano import tensor as T\n'), ((36765, 36773), 'theano.tensor.exp', 'T.exp', (['x'], {}), '(x)\n', (36770, 36773), True, 'from theano import tensor as T\n'), ((36913, 36921), 'theano.tensor.log', 'T.log', (['x'], {}), '(x)\n', (36918, 36921), True, 'from theano import tensor as T\n'), ((36977, 36986), 'theano.tensor.log2', 'T.log2', (['x'], {}), '(x)\n', (36983, 36986), True, 'from theano import tensor as T\n'), ((38154, 38185), 'theano.tensor.round', 'T.round', (['x'], {'mode': '"""half_to_even"""'}), "(x, mode='half_to_even')\n", (38161, 38185), True, 'from theano import tensor as T\n'), ((38327, 38335), 'theano.tensor.sgn', 'T.sgn', (['x'], {}), '(x)\n', (38332, 38335), True, 'from theano import tensor as T\n'), ((38516, 38527), 'theano.tensor.pow', 'T.pow', (['x', 'a'], {}), '(x, a)\n', (38521, 38527), True, 'from theano import tensor as T\n'), ((39062, 39093), 'theano.tensor.clip', 'T.clip', (['x', 'min_value', 'max_value'], {}), '(x, min_value, max_value)\n', (39068, 39093), True, 'from theano import tensor as T\n'), ((39299, 39309), 'theano.tensor.eq', 'T.eq', (['x', 'y'], {}), '(x, y)\n', (39303, 39309), True, 'from theano import tensor as T\n'), ((39518, 39529), 'theano.tensor.neq', 'T.neq', (['x', 'y'], {}), '(x, y)\n', (39523, 39529), True, 'from theano import tensor as T\n'), ((39896, 39906), 'theano.tensor.gt', 'T.gt', (['x', 'y'], {}), '(x, y)\n', (39900, 39906), True, 'from theano import tensor as T\n'), ((40115, 40125), 'theano.tensor.ge', 'T.ge', (['x', 'y'], {}), '(x, y)\n', (40119, 40125), True, 'from theano import tensor as T\n'), ((40324, 40334), 'theano.tensor.lt', 'T.lt', (['x', 'y'], {}), '(x, y)\n', (40328, 40334), True, 'from theano import tensor as T\n'), ((40540, 40550), 'theano.tensor.le', 'T.le', (['x', 'y'], {}), '(x, y)\n', (40544, 40550), True, 'from theano import tensor as T\n'), ((40747, 40762), 'theano.tensor.maximum', 'T.maximum', (['x', 'y'], {}), '(x, y)\n', (40756, 40762), True, 'from theano import tensor as T\n'), ((40959, 40974), 'theano.tensor.minimum', 'T.minimum', (['x', 'y'], {}), '(x, y)\n', (40968, 40974), True, 'from theano import tensor as T\n'), ((41128, 41136), 'theano.tensor.sin', 'T.sin', (['x'], {}), '(x)\n', (41133, 41136), True, 'from theano import tensor as T\n'), ((41290, 41298), 'theano.tensor.cos', 'T.cos', (['x'], {}), '(x)\n', (41295, 41298), True, 'from theano import tensor as T\n'), ((42046, 42122), 'theano.tensor.nnet.bn.batch_normalization_train', 'T.nnet.bn.batch_normalization_train', (['x', 'gamma', 'beta', 'reduction_axes', 'epsilon'], {}), '(x, gamma, beta, reduction_axes, epsilon)\n', (42081, 42122), True, 'from theano import tensor as T\n'), ((42935, 43025), 'theano.tensor.nnet.bn.batch_normalization_test', 'T.nnet.bn.batch_normalization_test', (['x', 'gamma', 'beta', 'mean', 'var', 'reduction_axes', 'epsilon'], {}), '(x, gamma, beta, mean, var,\n reduction_axes, epsilon)\n', (42969, 43025), True, 'from theano import tensor as T\n'), ((44586, 44608), 'theano.tensor.stack', 'T.stack', (['*target_shape'], {}), '(*target_shape)\n', (44593, 44608), True, 'from theano import tensor as T\n'), ((44631, 44660), 'theano.tensor.reshape', 'T.reshape', (['mean', 'target_shape'], {}), '(mean, target_shape)\n', (44640, 44660), True, 'from theano import tensor as T\n'), ((44681, 44709), 'theano.tensor.reshape', 'T.reshape', (['var', 'target_shape'], {}), '(var, target_shape)\n', (44690, 44709), True, 'from theano import tensor as T\n'), ((44731, 44760), 'theano.tensor.reshape', 'T.reshape', (['beta', 'target_shape'], {}), '(beta, target_shape)\n', (44740, 44760), True, 'from theano import tensor as T\n'), ((44783, 44813), 'theano.tensor.reshape', 'T.reshape', (['gamma', 'target_shape'], {}), '(gamma, target_shape)\n', (44792, 44813), True, 'from theano import tensor as T\n'), ((48600, 48619), 'theano.tensor.reshape', 'T.reshape', (['x', 'shape'], {}), '(x, shape)\n', (48609, 48619), True, 'from theano import tensor as T\n'), ((49442, 49469), 'theano.tensor.repeat', 'T.repeat', (['x', 'rep'], {'axis': 'axis'}), '(x, rep, axis=axis)\n', (49450, 49469), True, 'from theano import tensor as T\n'), ((52847, 52879), 'theano.tensor.extra_ops.repeat', 'T.extra_ops.repeat', (['y', 'n'], {'axis': '(1)'}), '(y, n, axis=1)\n', (52865, 52879), True, 'from theano import tensor as T\n'), ((53323, 53358), 'theano.tensor.extra_ops.repeat', 'T.extra_ops.repeat', (['x', 'n'], {'axis': 'axis'}), '(x, n, axis=axis)\n', (53341, 53358), True, 'from theano import tensor as T\n'), ((53602, 53623), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['x', 'v'], {}), '(x, v)\n', (53617, 53623), True, 'from theano import tensor as T\n'), ((53867, 53888), 'theano.tensor.inc_subtensor', 'T.inc_subtensor', (['x', 'v'], {}), '(x, v)\n', (53882, 53888), True, 'from theano import tensor as T\n'), ((55182, 55232), 'theano.tensor.arange', 'T.arange', (['start'], {'stop': 'stop', 'step': 'step', 'dtype': 'dtype'}), '(start, stop=stop, step=step, dtype=dtype)\n', (55190, 55232), True, 'from theano import tensor as T\n'), ((55603, 55615), 'theano.tensor.tile', 'T.tile', (['x', 'n'], {}), '(x, n)\n', (55609, 55615), True, 'from theano import tensor as T\n'), ((56147, 56159), 'theano.tensor.flatten', 'T.flatten', (['x'], {}), '(x)\n', (56156, 56159), True, 'from theano import tensor as T\n'), ((58492, 58513), 'theano.tensor.zeros', 'T.zeros', (['output_shape'], {}), '(output_shape)\n', (58499, 58513), True, 'from theano import tensor as T\n'), ((58527, 58595), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['output[:, padding[0]:x.shape[1] + padding[0], :]', 'x'], {}), '(output[:, padding[0]:x.shape[1] + padding[0], :], x)\n', (58542, 58595), True, 'from theano import tensor as T\n'), ((60466, 60501), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['output[indices]', 'x'], {}), '(output[indices], x)\n', (60481, 60501), True, 'from theano import tensor as T\n'), ((63793, 63828), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['output[indices]', 'x'], {}), '(output[indices], x)\n', (63808, 63828), True, 'from theano import tensor as T\n'), ((65555, 65564), 'theano.tensor.tril', 'T.tril', (['x'], {}), '(x)\n', (65561, 65564), True, 'from theano import tensor as T\n'), ((65805, 65826), 'theano.tensor.stack', 'T.stack', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (65812, 65826), True, 'from theano import tensor as T\n'), ((66340, 66358), 'theano.tensor.flatten', 'T.flatten', (['indices'], {}), '(indices)\n', (66349, 66358), True, 'from theano import tensor as T\n'), ((66368, 66412), 'theano.tensor.extra_ops.to_one_hot', 'T.extra_ops.to_one_hot', (['indices', 'num_classes'], {}), '(indices, num_classes)\n', (66390, 66412), True, 'from theano import tensor as T\n'), ((66422, 66465), 'theano.tensor.reshape', 'T.reshape', (['oh', '(input_shape + (num_classes,))'], {}), '(oh, input_shape + (num_classes,))\n', (66431, 66465), True, 'from theano import tensor as T\n'), ((67540, 67576), 'theano.tensor.patternbroadcast', 'T.patternbroadcast', (['x', 'broadcastable'], {}), '(x, broadcastable)\n', (67558, 67576), True, 'from theano import tensor as T\n'), ((69619, 69633), 'theano.printing.Print', 'Print', (['message'], {}), '(message)\n', (69624, 69633), False, 'from theano.printing import Print\n'), ((71189, 71212), 'theano.tensor.grad', 'T.grad', (['loss', 'variables'], {}), '(loss, variables)\n', (71195, 71212), True, 'from theano import tensor as T\n'), ((80331, 80349), 'theano.tensor.squeeze', 'T.squeeze', (['outputs'], {}), '(outputs)\n', (80340, 80349), True, 'from theano import tensor as T\n'), ((81728, 81781), 'theano.tensor.switch', 'T.switch', (['condition', 'then_expression', 'else_expression'], {}), '(condition, then_expression, else_expression)\n', (81736, 81781), True, 'from theano import tensor as T\n'), ((82975, 82999), 'theano.ifelse.ifelse', 'ifelse', (['training', 'x', 'alt'], {}), '(training, x, alt)\n', (82981, 82999), False, 'from theano.ifelse import ifelse\n'), ((84290, 84310), 'theano.tensor.nnet.elu', 'T.nnet.elu', (['x', 'alpha'], {}), '(x, alpha)\n', (84300, 84310), True, 'from theano import tensor as T\n'), ((86081, 86099), 'theano.tensor.nnet.softplus', 'T.nnet.softplus', (['x'], {}), '(x)\n', (86096, 86099), True, 'from theano import tensor as T\n'), ((86250, 86263), 'theano.sandbox.softsign.softsign', 'T_softsign', (['x'], {}), '(x)\n', (86260, 86263), True, 'from theano.sandbox.softsign import softsign as T_softsign\n'), ((87849, 87896), 'theano.tensor.nnet.categorical_crossentropy', 'T.nnet.categorical_crossentropy', (['output', 'target'], {}), '(output, target)\n', (87880, 87896), True, 'from theano import tensor as T\n'), ((89187, 89244), 'theano.tensor.extra_ops.to_one_hot', 'T.extra_ops.to_one_hot', (['target'], {'nb_class': 'output.shape[-1]'}), '(target, nb_class=output.shape[-1])\n', (89209, 89244), True, 'from theano import tensor as T\n'), ((89995, 90037), 'theano.tensor.nnet.binary_crossentropy', 'T.nnet.binary_crossentropy', (['output', 'target'], {}), '(output, target)\n', (90021, 90037), True, 'from theano import tensor as T\n'), ((91032, 91049), 'theano.tensor.nnet.sigmoid', 'T.nnet.sigmoid', (['x'], {}), '(x)\n', (91046, 91049), True, 'from theano import tensor as T\n'), ((91357, 91379), 'theano.tensor.nnet.hard_sigmoid', 'T.nnet.hard_sigmoid', (['x'], {}), '(x)\n', (91376, 91379), True, 'from theano import tensor as T\n'), ((91523, 91532), 'theano.tensor.tanh', 'T.tanh', (['x'], {}), '(x)\n', (91529, 91532), True, 'from theano import tensor as T\n'), ((92256, 92280), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', ([], {'seed': 'seed'}), '(seed=seed)\n', (92269, 92280), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((94575, 94610), 'theano.tensor.ge', 'T.ge', (['targets_values', 'predictions_k'], {}), '(targets_values, predictions_k)\n', (94579, 94610), True, 'from theano import tensor as T\n'), ((104673, 104832), 'theano.tensor.nnet.conv2d', 'T.nnet.conv2d', (['x', 'kernel'], {'border_mode': 'th_padding', 'subsample': 'strides', 'input_shape': 'image_shape', 'filter_shape': 'kernel_shape', 'filter_dilation': 'dilation_rate'}), '(x, kernel, border_mode=th_padding, subsample=strides,\n input_shape=image_shape, filter_shape=kernel_shape, filter_dilation=\n dilation_rate)\n', (104686, 104832), True, 'from theano import tensor as T\n'), ((106795, 106986), 'theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs', 'T.nnet.abstract_conv.AbstractConv2d_gradInputs', ([], {'imshp': 'None', 'kshp': 'kernel_shape', 'subsample': 'strides', 'border_mode': 'th_padding', 'filter_flip': '(not flip_filters)', 'filter_dilation': 'dilation_rate'}), '(imshp=None, kshp=\n kernel_shape, subsample=strides, border_mode=th_padding, filter_flip=\n not flip_filters, filter_dilation=dilation_rate)\n', (106841, 106986), True, 'from theano import tensor as T\n'), ((109500, 109706), 'theano.tensor.nnet.conv2d', 'T.nnet.conv2d', (['x', 'depthwise_kernel'], {'border_mode': 'th_padding', 'subsample': 'strides', 'input_shape': 'image_shape', 'filter_shape': 'depthwise_kernel_shape', 'filter_dilation': 'dilation_rate', 'num_groups': 'image_shape[1]'}), '(x, depthwise_kernel, border_mode=th_padding, subsample=\n strides, input_shape=image_shape, filter_shape=depthwise_kernel_shape,\n filter_dilation=dilation_rate, num_groups=image_shape[1])\n', (109513, 109706), True, 'from theano import tensor as T\n'), ((109887, 110065), 'theano.tensor.nnet.conv2d', 'T.nnet.conv2d', (['conv_out', 'pointwise_kernel'], {'border_mode': 'th_padding', 'subsample': '(1, 1)', 'input_shape': 'None', 'filter_shape': 'pointwise_kernel_shape', 'filter_dilation': 'dilation_rate'}), '(conv_out, pointwise_kernel, border_mode=th_padding, subsample\n =(1, 1), input_shape=None, filter_shape=pointwise_kernel_shape,\n filter_dilation=dilation_rate)\n', (109900, 110065), True, 'from theano import tensor as T\n'), ((112329, 112535), 'theano.tensor.nnet.conv2d', 'T.nnet.conv2d', (['x', 'depthwise_kernel'], {'border_mode': 'th_padding', 'subsample': 'strides', 'input_shape': 'image_shape', 'filter_shape': 'depthwise_kernel_shape', 'filter_dilation': 'dilation_rate', 'num_groups': 'image_shape[1]'}), '(x, depthwise_kernel, border_mode=th_padding, subsample=\n strides, input_shape=image_shape, filter_shape=depthwise_kernel_shape,\n filter_dilation=dilation_rate, num_groups=image_shape[1])\n', (112342, 112535), True, 'from theano import tensor as T\n'), ((112716, 112894), 'theano.tensor.nnet.conv2d', 'T.nnet.conv2d', (['conv_out', 'pointwise_kernel'], {'border_mode': 'th_padding', 'subsample': '(1, 1)', 'input_shape': 'None', 'filter_shape': 'pointwise_kernel_shape', 'filter_dilation': 'dilation_rate'}), '(conv_out, pointwise_kernel, border_mode=th_padding, subsample\n =(1, 1), input_shape=None, filter_shape=pointwise_kernel_shape,\n filter_dilation=dilation_rate)\n', (112729, 112894), True, 'from theano import tensor as T\n'), ((114645, 114851), 'theano.tensor.nnet.conv2d', 'T.nnet.conv2d', (['x', 'depthwise_kernel'], {'border_mode': 'th_padding', 'subsample': 'strides', 'input_shape': 'image_shape', 'filter_shape': 'depthwise_kernel_shape', 'filter_dilation': 'dilation_rate', 'num_groups': 'image_shape[1]'}), '(x, depthwise_kernel, border_mode=th_padding, subsample=\n strides, input_shape=image_shape, filter_shape=depthwise_kernel_shape,\n filter_dilation=dilation_rate, num_groups=image_shape[1])\n', (114658, 114851), True, 'from theano import tensor as T\n'), ((116372, 116532), 'theano.tensor.nnet.conv3d', 'T.nnet.conv3d', (['x', 'kernel'], {'border_mode': 'th_padding', 'subsample': 'strides', 'input_shape': 'volume_shape', 'filter_shape': 'kernel_shape', 'filter_dilation': 'dilation_rate'}), '(x, kernel, border_mode=th_padding, subsample=strides,\n input_shape=volume_shape, filter_shape=kernel_shape, filter_dilation=\n dilation_rate)\n', (116385, 116532), True, 'from theano import tensor as T\n'), ((118560, 118720), 'theano.tensor.nnet.abstract_conv.AbstractConv3d_gradInputs', 'T.nnet.abstract_conv.AbstractConv3d_gradInputs', ([], {'imshp': 'None', 'kshp': 'kernel_shape', 'subsample': 'strides', 'border_mode': 'th_padding', 'filter_flip': '(not flip_filters)'}), '(imshp=None, kshp=\n kernel_shape, subsample=strides, border_mode=th_padding, filter_flip=\n not flip_filters)\n', (118606, 118720), True, 'from theano import tensor as T\n'), ((126582, 126606), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', ([], {'seed': 'seed'}), '(seed=seed)\n', (126595, 126606), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((127337, 127361), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', ([], {'seed': 'seed'}), '(seed=seed)\n', (127350, 127361), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((127963, 127987), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', ([], {'seed': 'seed'}), '(seed=seed)\n', (127976, 127987), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((128828, 128852), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', ([], {'seed': 'seed'}), '(seed=seed)\n', (128841, 128852), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((129753, 129777), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', ([], {'seed': 'seed'}), '(seed=seed)\n', (129766, 129777), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((132042, 132073), 'theano.tensor.alloc', 'T.alloc', (['(-1)', '(Y.shape[0] * 2 + 1)'], {}), '(-1, Y.shape[0] * 2 + 1)\n', (132049, 132073), True, 'from theano import tensor as T\n'), ((132255, 132292), 'theano.tensor.neq', 'T.neq', (['Y[skip_idxs]', 'Y[skip_idxs + 2]'], {}), '(Y[skip_idxs], Y[skip_idxs + 2])\n', (132260, 132292), True, 'from theano import tensor as T\n'), ((132685, 132711), 'theano.tensor.max', 'T.max', (['log_p_prev[:active]'], {}), '(log_p_prev[:active])\n', (132690, 132711), True, 'from theano import tensor as T\n'), ((132725, 132767), 'theano.tensor.exp', 'T.exp', (['(log_p_prev[:active] - common_factor)'], {}), '(log_p_prev[:active] - common_factor)\n', (132730, 132767), True, 'from theano import tensor as T\n'), ((132832, 132873), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['_p_prev[:active]', 'p_prev'], {}), '(_p_prev[:active], p_prev)\n', (132847, 132873), True, 'from theano import tensor as T\n'), ((132915, 132957), 'theano.tensor.inc_subtensor', 'T.inc_subtensor', (['_p_prev[1:]', '_p_prev[:-1]'], {}), '(_p_prev[1:], _p_prev[:-1])\n', (132930, 132957), True, 'from theano import tensor as T\n'), ((132995, 133067), 'theano.tensor.inc_subtensor', 'T.inc_subtensor', (['_p_prev[active_skip_idxs + 2]', 'p_prev[active_skip_idxs]'], {}), '(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])\n', (133010, 133067), True, 'from theano import tensor as T\n'), ((133151, 133238), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['zeros[:active_next]', '(log_p_curr[:active_next] + updated_log_p_prev)'], {}), '(zeros[:active_next], log_p_curr[:active_next] +\n updated_log_p_prev)\n', (133166, 133238), True, 'from theano import tensor as T\n'), ((133427, 133442), 'theano.tensor.log', 'T.log', (['smoothed'], {}), '(smoothed)\n', (133432, 133442), True, 'from theano import tensor as T\n'), ((133455, 133473), 'theano.tensor.zeros_like', 'T.zeros_like', (['L[0]'], {}), '(L[0])\n', (133467, 133473), True, 'from theano import tensor as T\n'), ((134594, 134610), 'theano.tensor.max', 'T.max', (['log_probs'], {}), '(log_probs)\n', (134599, 134610), True, 'from theano import tensor as T\n'), ((135765, 135868), 'theano.scan', 'theano.scan', ([], {'fn': 'ctc_step', 'outputs_info': 'None', 'sequences': '[y_true, y_pred, input_length, label_length]'}), '(fn=ctc_step, outputs_info=None, sequences=[y_true, y_pred,\n input_length, label_length])\n', (135776, 135868), False, 'import theano\n'), ((143579, 143614), 'theano.tensor.as_tensor_variable', 'T.as_tensor_variable', (['x', 'name', 'ndim'], {}), '(x, name, ndim)\n', (143599, 143614), True, 'from theano import tensor as T\n'), ((3861, 3903), 'theano.sparse.dense_from_sparse', 'th_sparse_module.dense_from_sparse', (['tensor'], {}), '(tensor)\n', (3895, 3903), True, 'import theano.sparse as th_sparse_module\n'), ((5646, 5676), 'numpy.asarray', 'np.asarray', (['value'], {'dtype': 'dtype'}), '(value, dtype=dtype)\n', (5656, 5676), True, 'import numpy as np\n'), ((6457, 6471), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (6464, 6471), True, 'import numpy as np\n'), ((9802, 9853), 'theano.sparse.csr_matrix', 'th_sparse_module.csr_matrix', ([], {'name': 'name', 'dtype': 'dtype'}), '(name=name, dtype=dtype)\n', (9829, 9853), True, 'import theano.sparse as th_sparse_module\n'), ((14117, 14132), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (14125, 14132), True, 'import numpy as np\n'), ((15015, 15029), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (15022, 15029), True, 'import numpy as np\n'), ((15796, 15808), 'numpy.eye', 'np.eye', (['n', 'm'], {}), '(n, m)\n', (15802, 15808), True, 'import numpy as np\n'), ((18098, 18147), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'low', 'high': 'high', 'size': 'shape'}), '(low=low, high=high, size=shape)\n', (18115, 18147), True, 'import numpy as np\n'), ((19161, 19211), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'scale', 'size': 'shape'}), '(loc=0.0, scale=scale, size=shape)\n', (19177, 19211), True, 'import numpy as np\n'), ((23549, 23592), 'theano.sparse.basic.structured_dot', 'th_sparse_module.basic.structured_dot', (['x', 'y'], {}), '(x, y)\n', (23586, 23592), True, 'import theano.sparse as th_sparse_module\n'), ((23617, 23628), 'theano.tensor.dot', 'T.dot', (['x', 'y'], {}), '(x, y)\n', (23622, 23628), True, 'from theano import tensor as T\n'), ((42158, 42176), 'theano.tensor.inv', 'T.inv', (['(stdinv ** 2)'], {}), '(stdinv ** 2)\n', (42163, 42176), True, 'from theano import tensor as T\n'), ((59780, 59801), 'theano.tensor.zeros', 'T.zeros', (['output_shape'], {}), '(output_shape)\n', (59787, 59801), True, 'from theano import tensor as T\n'), ((60236, 60257), 'theano.tensor.zeros', 'T.zeros', (['output_shape'], {}), '(output_shape)\n', (60243, 60257), True, 'from theano import tensor as T\n'), ((62821, 62842), 'theano.tensor.zeros', 'T.zeros', (['output_shape'], {}), '(output_shape)\n', (62828, 62842), True, 'from theano import tensor as T\n'), ((63465, 63486), 'theano.tensor.zeros', 'T.zeros', (['output_shape'], {}), '(output_shape)\n', (63472, 63486), True, 'from theano import tensor as T\n'), ((68498, 68530), 'numpy.asarray', 'np.asarray', (['value'], {'dtype': 'x.dtype'}), '(value, dtype=x.dtype)\n', (68508, 68530), True, 'import numpy as np\n'), ((70072, 70199), 'theano.function', 'theano.function', (['inputs', 'outputs'], {'updates': 'updates', 'allow_input_downcast': '(True)', 'on_unused_input': '"""ignore"""', 'name': 'name'}), "(inputs, outputs, updates=updates, allow_input_downcast=True,\n on_unused_input='ignore', name=name, **kwargs)\n", (70087, 70199), False, 'import theano\n'), ((71768, 71812), 'theano.gradient.disconnected_grad', 'theano.gradient.disconnected_grad', (['variables'], {}), '(variables)\n', (71801, 71812), False, 'import theano\n'), ((84964, 84978), 'theano.tensor.nnet.relu', 'T.nnet.relu', (['x'], {}), '(x)\n', (84975, 84978), True, 'from theano import tensor as T\n'), ((85022, 85047), 'theano.tensor.clip', 'T.clip', (['x', '(0.0)', 'max_value'], {}), '(x, 0.0, max_value)\n', (85028, 85047), True, 'from theano import tensor as T\n'), ((85219, 85236), 'theano.tensor.nnet.softmax', 'T.nnet.softmax', (['x'], {}), '(x)\n', (85233, 85236), True, 'from theano import tensor as T\n'), ((85289, 85302), 'theano.tensor.exp', 'T.exp', (['(x - xm)'], {}), '(x - xm)\n', (85294, 85302), True, 'from theano import tensor as T\n'), ((87568, 87590), 'theano.tensor.nnet.softmax', 'T.nnet.softmax', (['output'], {}), '(output)\n', (87582, 87590), True, 'from theano import tensor as T\n'), ((89146, 89163), 'theano.tensor.flatten', 'T.flatten', (['target'], {}), '(target)\n', (89155, 89163), True, 'from theano import tensor as T\n'), ((89848, 89870), 'theano.tensor.nnet.sigmoid', 'T.nnet.sigmoid', (['output'], {}), '(output)\n', (89862, 89870), True, 'from theano import tensor as T\n'), ((90639, 90661), 'theano.tensor.nnet.sigmoid', 'T.nnet.sigmoid', (['output'], {}), '(output)\n', (90653, 90661), True, 'from theano import tensor as T\n'), ((92139, 92171), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (92156, 92171), True, 'import numpy as np\n'), ((92529, 92599), 'theano.tensor.patternbroadcast', 'T.patternbroadcast', (['random_tensor', '[(dim == 1) for dim in noise_shape]'], {}), '(random_tensor, [(dim == 1) for dim in noise_shape])\n', (92547, 92599), True, 'from theano import tensor as T\n'), ((92974, 92985), 'theano.tensor.square', 'T.square', (['x'], {}), '(x)\n', (92982, 92985), True, 'from theano import tensor as T\n'), ((94467, 94486), 'theano.tensor.sort', 'T.sort', (['predictions'], {}), '(predictions)\n', (94473, 94486), True, 'from theano import tensor as T\n'), ((120460, 120550), 'theano.tensor.signal.pool.pool_2d', 'pool.pool_2d', (['x'], {'ws': 'pool_size', 'stride': 'strides', 'ignore_border': '(True)', 'pad': 'pad', 'mode': '"""max"""'}), "(x, ws=pool_size, stride=strides, ignore_border=True, pad=pad,\n mode='max')\n", (120472, 120550), False, 'from theano.tensor.signal import pool\n'), ((122616, 122706), 'theano.tensor.signal.pool.pool_3d', 'pool.pool_3d', (['x'], {'ws': 'pool_size', 'stride': 'strides', 'ignore_border': '(True)', 'pad': 'pad', 'mode': '"""max"""'}), "(x, ws=pool_size, stride=strides, ignore_border=True, pad=pad,\n mode='max')\n", (122628, 122706), False, 'from theano.tensor.signal import pool\n'), ((126545, 126577), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (126562, 126577), True, 'import numpy as np\n'), ((127300, 127332), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (127317, 127332), True, 'import numpy as np\n'), ((127926, 127958), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (127943, 127958), True, 'import numpy as np\n'), ((128791, 128823), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (128808, 128823), True, 'import numpy as np\n'), ((129719, 129748), 'numpy.random.randint', 'np.random.randint', (['(10000000.0)'], {}), '(10000000.0)\n', (129736, 129748), True, 'import numpy as np\n'), ((131362, 131396), 'theano.tensor.signal.conv.conv2d', 'vec_conv', (['u', 'v'], {'border_mode': '"""full"""'}), "(u, v, border_mode='full')\n", (131370, 131396), True, 'from theano.tensor.signal.conv import conv2d as vec_conv\n'), ((133102, 133116), 'theano.tensor.log', 'T.log', (['_p_prev'], {}), '(_p_prev)\n', (133107, 133116), True, 'from theano import tensor as T\n'), ((136404, 136436), 'theano.map', 'theano.map', (['fn', 'elems'], {'name': 'name'}), '(fn, elems, name=name)\n', (136414, 136436), False, 'import theano\n'), ((9876, 9906), 'theano.tensor.TensorType', 'T.TensorType', (['dtype', 'broadcast'], {}), '(dtype, broadcast)\n', (9888, 9906), True, 'from theano import tensor as T\n'), ((37881, 37889), 'theano.tensor.exp', 'T.exp', (['x'], {}), '(x)\n', (37886, 37889), True, 'from theano import tensor as T\n'), ((43818, 43931), 'theano.sandbox.cuda.dnn.dnn_batch_normalization_train', 'theano.sandbox.cuda.dnn.dnn_batch_normalization_train', (['x', 'broadcast_gamma', 'broadcast_beta', '"""spatial"""', 'epsilon'], {}), "(x, broadcast_gamma,\n broadcast_beta, 'spatial', epsilon)\n", (43871, 43931), False, 'import theano\n'), ((44009, 44049), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['normed'], {}), '(normed)\n', (44041, 44049), False, 'import theano\n'), ((44069, 44107), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['mean'], {}), '(mean)\n', (44101, 44107), False, 'import theano\n'), ((44129, 44169), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['stdinv'], {}), '(stdinv)\n', (44161, 44169), False, 'import theano\n'), ((44188, 44206), 'theano.tensor.inv', 'T.inv', (['(stdinv ** 2)'], {}), '(stdinv ** 2)\n', (44193, 44206), True, 'from theano import tensor as T\n'), ((46928, 46968), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['result'], {}), '(result)\n', (46960, 46968), False, 'import theano\n'), ((47599, 47651), 'theano.sparse.basic.vstack', 'th_sparse_module.basic.vstack', (['tensors'], {'format': '"""csr"""'}), "(tensors, format='csr')\n", (47628, 47651), True, 'import theano.sparse as th_sparse_module\n'), ((51033, 51102), 'theano.tensor.nnet.abstract_conv.bilinear_upsampling', 'T.nnet.abstract_conv.bilinear_upsampling', (['output'], {'ratio': 'height_factor'}), '(output, ratio=height_factor)\n', (51073, 51102), True, 'from theano import tensor as T\n'), ((56629, 56648), 'theano.tensor.prod', 'T.prod', (['x.shape[1:]'], {}), '(x.shape[1:])\n', (56635, 56648), True, 'from theano import tensor as T\n'), ((68790, 68822), 'numpy.asarray', 'np.asarray', (['value'], {'dtype': 'x.dtype'}), '(value, dtype=x.dtype)\n', (68800, 68822), True, 'import numpy as np\n'), ((75000, 75034), 'theano.tensor.concatenate', 'T.concatenate', (['[[1], add_shape]', '(0)'], {}), '([[1], add_shape], 0)\n', (75013, 75034), True, 'from theano import tensor as T\n'), ((75054, 75085), 'theano.tensor.tile', 'T.tile', (['mask_t', 'reps'], {'ndim': 'ndim'}), '(mask_t, reps, ndim=ndim)\n', (75060, 75085), True, 'from theano import tensor as T\n'), ((76277, 76305), 'theano.tensor.stack', 'T.stack', (['*successive_outputs'], {}), '(*successive_outputs)\n', (76284, 76305), True, 'from theano import tensor as T\n'), ((76905, 76940), 'theano.tensor.unbroadcast', 'T.unbroadcast', (['initial_output', '(0)', '(1)'], {}), '(initial_output, 0, 1)\n', (76918, 76940), True, 'from theano import tensor as T\n'), ((77966, 78114), 'theano.scan', 'theano.scan', (['_step'], {'sequences': '[inputs, mask]', 'outputs_info': '([initial_output] + initial_states)', 'non_sequences': 'constants', 'go_backwards': 'go_backwards'}), '(_step, sequences=[inputs, mask], outputs_info=[initial_output] +\n initial_states, non_sequences=constants, go_backwards=go_backwards)\n', (77977, 78114), False, 'import theano\n'), ((79038, 79066), 'theano.tensor.stack', 'T.stack', (['*successive_outputs'], {}), '(*successive_outputs)\n', (79045, 79066), True, 'from theano import tensor as T\n'), ((79862, 79992), 'theano.scan', 'theano.scan', (['_step'], {'sequences': 'inputs', 'outputs_info': '([None] + initial_states)', 'non_sequences': 'constants', 'go_backwards': 'go_backwards'}), '(_step, sequences=inputs, outputs_info=[None] + initial_states,\n non_sequences=constants, go_backwards=go_backwards)\n', (79873, 79992), False, 'import theano\n'), ((80528, 80548), 'theano.tensor.squeeze', 'T.squeeze', (['state[-1]'], {}), '(state[-1])\n', (80537, 80548), True, 'from theano import tensor as T\n'), ((84778, 84805), 'theano.tensor.nnet.relu', 'T.nnet.relu', (['(-x + threshold)'], {}), '(-x + threshold)\n', (84789, 84805), True, 'from theano import tensor as T\n'), ((84848, 84863), 'theano.tensor.nnet.relu', 'T.nnet.relu', (['(-x)'], {}), '(-x)\n', (84859, 84863), True, 'from theano import tensor as T\n'), ((94142, 94177), 'theano.tensor.zeros_like', 'T.zeros_like', (['targets'], {'dtype': '"""bool"""'}), "(targets, dtype='bool')\n", (94154, 94177), True, 'from theano import tensor as T\n'), ((94331, 94365), 'theano.tensor.ones_like', 'T.ones_like', (['targets'], {'dtype': '"""bool"""'}), "(targets, dtype='bool')\n", (94342, 94365), True, 'from theano import tensor as T\n'), ((94527, 94553), 'theano.tensor.arange', 'T.arange', (['targets.shape[0]'], {}), '(targets.shape[0])\n', (94535, 94553), True, 'from theano import tensor as T\n'), ((120691, 120793), 'theano.tensor.signal.pool.pool_2d', 'pool.pool_2d', (['x'], {'ws': 'pool_size', 'stride': 'strides', 'ignore_border': '(True)', 'pad': 'pad', 'mode': '"""average_exc_pad"""'}), "(x, ws=pool_size, stride=strides, ignore_border=True, pad=pad,\n mode='average_exc_pad')\n", (120703, 120793), False, 'from theano.tensor.signal import pool\n'), ((122847, 122949), 'theano.tensor.signal.pool.pool_3d', 'pool.pool_3d', (['x'], {'ws': 'pool_size', 'stride': 'strides', 'ignore_border': '(True)', 'pad': 'pad', 'mode': '"""average_exc_pad"""'}), "(x, ws=pool_size, stride=strides, ignore_border=True, pad=pad,\n mode='average_exc_pad')\n", (122859, 122949), False, 'from theano.tensor.signal import pool\n'), ((129159, 129213), 'theano.tensor.clip', 'T.clip', (['normal_t', '(mean - 2 * stddev)', '(mean + 2 * stddev)'], {}), '(normal_t, mean - 2 * stddev, mean + 2 * stddev)\n', (129165, 129213), True, 'from theano import tensor as T\n'), ((130324, 130351), 'theano.tensor.alloc', 'T.alloc', (['(0.0)', 'x.shape[0]', 'd'], {}), '(0.0, x.shape[0], d)\n', (130331, 130351), True, 'from theano import tensor as T\n'), ((130899, 130910), 'theano.tensor.dot', 'T.dot', (['s', 'v'], {}), '(s, v)\n', (130904, 130910), True, 'from theano import tensor as T\n'), ((131697, 131721), 'theano.tensor.alloc', 'T.alloc', (['(0.0)', 'u.shape[1]'], {}), '(0.0, u.shape[1])\n', (131704, 131721), True, 'from theano import tensor as T\n'), ((132197, 132228), 'theano.tensor.arange', 'T.arange', (['((Y.shape[0] - 3) // 2)'], {}), '((Y.shape[0] - 3) // 2)\n', (132205, 132228), True, 'from theano import tensor as T\n'), ((134236, 134256), 'theano.tensor.arange', 'T.arange', (['L.shape[1]'], {}), '(L.shape[1])\n', (134244, 134256), True, 'from theano import tensor as T\n'), ((44234, 44249), 'theano.tensor.flatten', 'T.flatten', (['mean'], {}), '(mean)\n', (44243, 44249), True, 'from theano import tensor as T\n'), ((44251, 44265), 'theano.tensor.flatten', 'T.flatten', (['var'], {}), '(var)\n', (44260, 44265), True, 'from theano import tensor as T\n'), ((46788, 46891), 'theano.sandbox.cuda.dnn.dnn_batch_normalization_test', 'theano.sandbox.cuda.dnn.dnn_batch_normalization_test', (['x', 'gamma', 'beta', 'mean', 'var', '"""spatial"""', 'epsilon'], {}), "(x, gamma, beta, mean,\n var, 'spatial', epsilon)\n", (46840, 46891), False, 'import theano\n'), ((47697, 47749), 'theano.sparse.basic.hstack', 'th_sparse_module.basic.hstack', (['tensors'], {'format': '"""csr"""'}), "(tensors, format='csr')\n", (47726, 47749), True, 'import theano.sparse as th_sparse_module\n'), ((49167, 49193), 'numpy.asarray', 'np.asarray', (['x._keras_shape'], {}), '(x._keras_shape)\n', (49177, 49193), True, 'import numpy as np\n'), ((56311, 56334), 'numpy.prod', 'np.prod', (['x._keras_shape'], {}), '(x._keras_shape)\n', (56318, 56334), True, 'import numpy as np\n'), ((56843, 56870), 'numpy.prod', 'np.prod', (['x._keras_shape[1:]'], {}), '(x._keras_shape[1:])\n', (56850, 56870), True, 'import numpy as np\n'), ((75830, 75872), 'theano.tensor.switch', 'T.switch', (['output_mask', 'output', 'prev_output'], {}), '(output_mask, output, prev_output)\n', (75838, 75872), True, 'from theano import tensor as T\n'), ((77017, 77055), 'theano.tensor.unbroadcast', 'T.unbroadcast', (['initial_states[0]', '(0)', '(1)'], {}), '(initial_states[0], 0, 1)\n', (77030, 77055), True, 'from theano import tensor as T\n'), ((77486, 77528), 'theano.tensor.switch', 'T.switch', (['output_mask', 'outputs', 'output_tm1'], {}), '(output_mask, outputs, output_tm1)\n', (77494, 77528), True, 'from theano import tensor as T\n'), ((79797, 79835), 'theano.tensor.unbroadcast', 'T.unbroadcast', (['initial_states[0]', '(0)', '(1)'], {}), '(initial_states[0], 0, 1)\n', (79810, 79835), True, 'from theano import tensor as T\n'), ((80662, 80682), 'theano.tensor.squeeze', 'T.squeeze', (['state[-1]'], {}), '(state[-1])\n', (80671, 80682), True, 'from theano import tensor as T\n'), ((84912, 84930), 'theano.tensor.gt', 'T.gt', (['x', 'threshold'], {}), '(x, threshold)\n', (84916, 84930), True, 'from theano import tensor as T\n'), ((85305, 85318), 'theano.tensor.exp', 'T.exp', (['(x - xm)'], {}), '(x - xm)\n', (85310, 85318), True, 'from theano import tensor as T\n'), ((90814, 90827), 'theano.tensor.log', 'T.log', (['output'], {}), '(output)\n', (90819, 90827), True, 'from theano import tensor as T\n'), ((90862, 90881), 'theano.tensor.log', 'T.log', (['(1.0 - output)'], {}), '(1.0 - output)\n', (90867, 90881), True, 'from theano import tensor as T\n'), ((94223, 94258), 'theano.tensor.zeros_like', 'T.zeros_like', (['targets'], {'dtype': '"""int8"""'}), "(targets, dtype='int8')\n", (94235, 94258), True, 'from theano import tensor as T\n'), ((94411, 94445), 'theano.tensor.ones_like', 'T.ones_like', (['targets'], {'dtype': '"""int8"""'}), "(targets, dtype='int8')\n", (94422, 94445), True, 'from theano import tensor as T\n'), ((133391, 133406), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (133401, 133406), True, 'import numpy as np\n'), ((134175, 134186), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (134183, 134186), True, 'import numpy as np\n'), ((134199, 134210), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (134207, 134210), True, 'import numpy as np\n'), ((134644, 134676), 'theano.tensor.exp', 'T.exp', (['(log_probs - common_factor)'], {}), '(log_probs - common_factor)\n', (134649, 134676), True, 'from theano import tensor as T\n'), ((76562, 76582), 'theano.tensor.stack', 'T.stack', (['*new_states'], {}), '(*new_states)\n', (76569, 76582), True, 'from theano import tensor as T\n'), ((79177, 79246), 'theano.tensor.stack', 'T.stack', (['*[states_at_step[i] for states_at_step in successive_states]'], {}), '(*[states_at_step[i] for states_at_step in successive_states])\n', (79184, 79246), True, 'from theano import tensor as T\n'), ((132102, 132122), 'theano.tensor.arange', 'T.arange', (['Y.shape[0]'], {}), '(Y.shape[0])\n', (132110, 132122), True, 'from theano import tensor as T\n'), ((76077, 76115), 'theano.tensor.switch', 'T.switch', (['state_mask', 'new_state', 'state'], {}), '(state_mask, new_state, state)\n', (76085, 76115), True, 'from theano import tensor as T\n'), ((77851, 77889), 'theano.tensor.switch', 'T.switch', (['state_mask', 'new_state', 'state'], {}), '(state_mask, new_state, state)\n', (77859, 77889), True, 'from theano import tensor as T\n'), ((132573, 132612), 'theano.tensor.concatenate', 'T.concatenate', (['[active_skip_idxs, [-1]]'], {}), '([active_skip_idxs, [-1]])\n', (132586, 132612), True, 'from theano import tensor as T\n')] |
try:
import unittest
from copy import copy
from numpy.testing import assert_allclose
import numpy as np
from spitfire.chemistry.mechanism import ChemicalMechanismSpec
from spitfire.chemistry.library import Library, Dimension
from spitfire.chemistry.flamelet import FlameletSpec
from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec
import cantera
import cantera as ct
import pytabprops
if int(cantera.__version__.replace('.', '')) >= 250:
class Test(unittest.TestCase):
def test(self):
gas = ct.Solution('h2o2.yaml', transport_model='Multi')
mech = ChemicalMechanismSpec.from_solution(gas)
fs = FlameletSpec(mech_spec=mech,
initial_condition='equilibrium',
oxy_stream=mech.stream('TPX', (300, 1.e5, 'O2:1, N2:3.76')),
fuel_stream=mech.stream('TPY', (300, 1.e5, 'H2:1')),
grid_points=16)
eq_lib1 = build_adiabatic_eq_library(fs, verbose=False)
z_dim = Dimension(eq_lib1.mixture_fraction_name, eq_lib1.mixture_fraction_values)
fuel_T_dim = Dimension('fuel_temperature', np.linspace(0.0, 1.0, 4))
air_T_dim = Dimension('air_temperature', np.linspace(0.0, 1.0, 3))
eq_lib2 = Library(z_dim, fuel_T_dim)
eq_lib2T = Library(fuel_T_dim, z_dim)
eq_lib3 = Library(z_dim, fuel_T_dim, air_T_dim)
eq_lib3T1 = Library(fuel_T_dim, z_dim, air_T_dim)
eq_lib3T2 = Library(fuel_T_dim, air_T_dim, z_dim)
for p in eq_lib1.props:
eq_lib2[p] = eq_lib2.get_empty_dataset()
eq_lib2T[p] = eq_lib2T.get_empty_dataset()
eq_lib3[p] = eq_lib3.get_empty_dataset()
eq_lib3T1[p] = eq_lib3T1.get_empty_dataset()
eq_lib3T2[p] = eq_lib3T2.get_empty_dataset()
for i, fuel_T_offset in enumerate(fuel_T_dim.values):
fuel_T = 300 + fuel_T_offset * 500.
fs2 = copy(fs)
fs2.fuel_stream.TP = fuel_T, 1.e5
eq_tmp = build_adiabatic_eq_library(fs2, verbose=False)
for p in eq_lib1.props:
eq_lib2[p][:, i] = eq_tmp[p]
eq_lib2T[p][i, :] = eq_tmp[p]
for j, air_T_offset in enumerate(air_T_dim.values):
air_T = 300 + air_T_offset * 500.
fs3 = copy(fs2)
fs3.oxy_stream.TP = air_T, 1.e5
eq_tmp = build_adiabatic_eq_library(fs3, verbose=False)
for p in eq_lib1.props:
eq_lib3[p][:, i, j] = eq_tmp[p]
eq_lib3T1[p][i, :, j] = eq_tmp[p]
eq_lib3T2[p][i, j, :] = eq_tmp[p]
nonT_props = list(eq_lib1.props)
nonT_props.remove('temperature')
eq_lib1.remove(*nonT_props)
eq_lib2.remove(*nonT_props)
eq_lib2T.remove(*nonT_props)
eq_lib3.remove(*nonT_props)
eq_lib3T1.remove(*nonT_props)
eq_lib3T2.remove(*nonT_props)
z_svv = np.linspace(0., 1., 6)
Tf_svv = np.linspace(0., 1., 5)
eq_lib1_t = apply_mixing_model(eq_lib1, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, verbose=False)
eq_lib2_t = apply_mixing_model(eq_lib2, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, verbose=False)
eq_lib3_t = apply_mixing_model(eq_lib3, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, num_procs=1, verbose=False)
eq_lib2T_t = apply_mixing_model(eq_lib2T, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, verbose=False)
eq_lib3T1_t = apply_mixing_model(eq_lib3T1, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, num_procs=1, verbose=False)
eq_lib3T2_t = apply_mixing_model(eq_lib3T2, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, num_procs=1, verbose=False)
eq_lib2_tt = apply_mixing_model(eq_lib2_t, {'fuel_temperature_mean': PDFSpec('Beta', Tf_svv, variance_name='Tfvar')}, added_suffix='', num_procs=1, verbose=False)
eq_lib3_tt = apply_mixing_model(eq_lib3_t, {'fuel_temperature_mean': PDFSpec('Beta', Tf_svv, variance_name='Tfvar')}, added_suffix='', num_procs=1, verbose=False)
def get_dim_names(lib):
return [d.name for d in lib.dims]
self.assertEqual(['mixture_fraction'], get_dim_names(eq_lib1))
self.assertEqual(['mixture_fraction_mean', 'scaled_scalar_variance_mean'], get_dim_names(eq_lib1_t))
self.assertEqual(['mixture_fraction', 'fuel_temperature'], get_dim_names(eq_lib2))
self.assertEqual(['mixture_fraction_mean', 'fuel_temperature_mean', 'scaled_scalar_variance_mean'],
get_dim_names(eq_lib2_t))
self.assertEqual(
['mixture_fraction_mean', 'fuel_temperature_mean', 'scaled_scalar_variance_mean', 'Tfvar'],
get_dim_names(eq_lib2_tt))
self.assertEqual(['mixture_fraction', 'fuel_temperature', 'air_temperature'],
get_dim_names(eq_lib3))
self.assertEqual(['mixture_fraction_mean', 'fuel_temperature_mean', 'air_temperature_mean',
'scaled_scalar_variance_mean'],
get_dim_names(eq_lib3_t))
self.assertEqual(['mixture_fraction_mean', 'fuel_temperature_mean', 'air_temperature_mean',
'scaled_scalar_variance_mean', 'Tfvar'],
get_dim_names(eq_lib3_tt))
self.assertEqual(['fuel_temperature', 'mixture_fraction'], get_dim_names(eq_lib2T))
self.assertEqual(['fuel_temperature_mean', 'mixture_fraction_mean', 'scaled_scalar_variance_mean'],
get_dim_names(eq_lib2T_t), eq_lib2T_t)
self.assertEqual(['fuel_temperature', 'mixture_fraction', 'air_temperature'],
get_dim_names(eq_lib3T1))
self.assertEqual(['fuel_temperature', 'air_temperature', 'mixture_fraction'],
get_dim_names(eq_lib3T2))
self.assertEqual(['fuel_temperature_mean', 'mixture_fraction_mean', 'air_temperature_mean',
'scaled_scalar_variance_mean'],
get_dim_names(eq_lib3T1_t))
self.assertEqual(['fuel_temperature_mean', 'air_temperature_mean', 'mixture_fraction_mean',
'scaled_scalar_variance_mean'],
get_dim_names(eq_lib3T2_t))
self.assertFalse(np.any(np.isnan(eq_lib1['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib1_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib2['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib2T['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib2_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib2T_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3T1['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3T2['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3_tt['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3T1_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3T2_t['temperature'])))
self.assertIsNone(assert_allclose(eq_lib2T['temperature'].T, eq_lib2['temperature']))
self.assertIsNone(assert_allclose(np.swapaxes(eq_lib3T1['temperature'], 0, 1),
eq_lib3['temperature']))
self.assertIsNone(assert_allclose(np.swapaxes(np.swapaxes(eq_lib3T2['temperature'], 1, 2), 0, 1),
eq_lib3['temperature']))
self.assertIsNone(assert_allclose(np.squeeze(eq_lib1_t['temperature'][:, 0]),
eq_lib1['temperature']))
self.assertIsNone(assert_allclose(np.squeeze(eq_lib2_t['temperature'][:, :, 0]),
eq_lib2['temperature']))
self.assertIsNone(assert_allclose(np.squeeze(eq_lib3_t['temperature'][:, :, :, 0]),
eq_lib3['temperature']))
self.assertIsNone(assert_allclose(np.squeeze(eq_lib3_tt['temperature'][:, :, :, 0, 0]),
eq_lib3['temperature']))
if __name__ == '__main__':
unittest.main()
except ImportError:
pass
| [
"spitfire.chemistry.library.Library",
"spitfire.chemistry.library.Dimension",
"numpy.testing.assert_allclose",
"cantera.__version__.replace",
"numpy.squeeze",
"numpy.swapaxes",
"numpy.linspace",
"numpy.isnan",
"spitfire.chemistry.tabulation.build_adiabatic_eq_library",
"spitfire.chemistry.mechanis... | [((490, 526), 'cantera.__version__.replace', 'cantera.__version__.replace', (['"""."""', '""""""'], {}), "('.', '')\n", (517, 526), False, 'import cantera\n'), ((9336, 9351), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9349, 9351), False, 'import unittest\n'), ((625, 674), 'cantera.Solution', 'ct.Solution', (['"""h2o2.yaml"""'], {'transport_model': '"""Multi"""'}), "('h2o2.yaml', transport_model='Multi')\n", (636, 674), True, 'import cantera as ct\n'), ((698, 738), 'spitfire.chemistry.mechanism.ChemicalMechanismSpec.from_solution', 'ChemicalMechanismSpec.from_solution', (['gas'], {}), '(gas)\n', (733, 738), False, 'from spitfire.chemistry.mechanism import ChemicalMechanismSpec\n'), ((1115, 1160), 'spitfire.chemistry.tabulation.build_adiabatic_eq_library', 'build_adiabatic_eq_library', (['fs'], {'verbose': '(False)'}), '(fs, verbose=False)\n', (1141, 1160), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((1186, 1259), 'spitfire.chemistry.library.Dimension', 'Dimension', (['eq_lib1.mixture_fraction_name', 'eq_lib1.mixture_fraction_values'], {}), '(eq_lib1.mixture_fraction_name, eq_lib1.mixture_fraction_values)\n', (1195, 1259), False, 'from spitfire.chemistry.library import Library, Dimension\n'), ((1455, 1481), 'spitfire.chemistry.library.Library', 'Library', (['z_dim', 'fuel_T_dim'], {}), '(z_dim, fuel_T_dim)\n', (1462, 1481), False, 'from spitfire.chemistry.library import Library, Dimension\n'), ((1509, 1535), 'spitfire.chemistry.library.Library', 'Library', (['fuel_T_dim', 'z_dim'], {}), '(fuel_T_dim, z_dim)\n', (1516, 1535), False, 'from spitfire.chemistry.library import Library, Dimension\n'), ((1562, 1599), 'spitfire.chemistry.library.Library', 'Library', (['z_dim', 'fuel_T_dim', 'air_T_dim'], {}), '(z_dim, fuel_T_dim, air_T_dim)\n', (1569, 1599), False, 'from spitfire.chemistry.library import Library, Dimension\n'), ((1628, 1665), 'spitfire.chemistry.library.Library', 'Library', (['fuel_T_dim', 'z_dim', 'air_T_dim'], {}), '(fuel_T_dim, z_dim, air_T_dim)\n', (1635, 1665), False, 'from spitfire.chemistry.library import Library, Dimension\n'), ((1694, 1731), 'spitfire.chemistry.library.Library', 'Library', (['fuel_T_dim', 'air_T_dim', 'z_dim'], {}), '(fuel_T_dim, air_T_dim, z_dim)\n', (1701, 1731), False, 'from spitfire.chemistry.library import Library, Dimension\n'), ((3465, 3489), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(6)'], {}), '(0.0, 1.0, 6)\n', (3476, 3489), True, 'import numpy as np\n'), ((3513, 3537), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(5)'], {}), '(0.0, 1.0, 5)\n', (3524, 3537), True, 'import numpy as np\n'), ((1319, 1343), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(4)'], {}), '(0.0, 1.0, 4)\n', (1330, 1343), True, 'import numpy as np\n'), ((1402, 1426), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(3)'], {}), '(0.0, 1.0, 3)\n', (1413, 1426), True, 'import numpy as np\n'), ((2241, 2249), 'copy.copy', 'copy', (['fs'], {}), '(fs)\n', (2245, 2249), False, 'from copy import copy\n'), ((2334, 2380), 'spitfire.chemistry.tabulation.build_adiabatic_eq_library', 'build_adiabatic_eq_library', (['fs2'], {'verbose': '(False)'}), '(fs2, verbose=False)\n', (2360, 2380), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((8164, 8230), 'numpy.testing.assert_allclose', 'assert_allclose', (["eq_lib2T['temperature'].T", "eq_lib2['temperature']"], {}), "(eq_lib2T['temperature'].T, eq_lib2['temperature'])\n", (8179, 8230), False, 'from numpy.testing import assert_allclose\n'), ((2693, 2702), 'copy.copy', 'copy', (['fs2'], {}), '(fs2)\n', (2697, 2702), False, 'from copy import copy\n'), ((2793, 2839), 'spitfire.chemistry.tabulation.build_adiabatic_eq_library', 'build_adiabatic_eq_library', (['fs3'], {'verbose': '(False)'}), '(fs3, verbose=False)\n', (2819, 2839), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((3614, 3641), 'spitfire.chemistry.tabulation.PDFSpec', 'PDFSpec', (['"""ClipGauss"""', 'z_svv'], {}), "('ClipGauss', z_svv)\n", (3621, 3641), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((3736, 3763), 'spitfire.chemistry.tabulation.PDFSpec', 'PDFSpec', (['"""ClipGauss"""', 'z_svv'], {}), "('ClipGauss', z_svv)\n", (3743, 3763), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((3858, 3885), 'spitfire.chemistry.tabulation.PDFSpec', 'PDFSpec', (['"""ClipGauss"""', 'z_svv'], {}), "('ClipGauss', z_svv)\n", (3865, 3885), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((3995, 4022), 'spitfire.chemistry.tabulation.PDFSpec', 'PDFSpec', (['"""ClipGauss"""', 'z_svv'], {}), "('ClipGauss', z_svv)\n", (4002, 4022), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((4121, 4148), 'spitfire.chemistry.tabulation.PDFSpec', 'PDFSpec', (['"""ClipGauss"""', 'z_svv'], {}), "('ClipGauss', z_svv)\n", (4128, 4148), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((4260, 4287), 'spitfire.chemistry.tabulation.PDFSpec', 'PDFSpec', (['"""ClipGauss"""', 'z_svv'], {}), "('ClipGauss', z_svv)\n", (4267, 4287), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((4403, 4449), 'spitfire.chemistry.tabulation.PDFSpec', 'PDFSpec', (['"""Beta"""', 'Tf_svv'], {'variance_name': '"""Tfvar"""'}), "('Beta', Tf_svv, variance_name='Tfvar')\n", (4410, 4449), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((4582, 4628), 'spitfire.chemistry.tabulation.PDFSpec', 'PDFSpec', (['"""Beta"""', 'Tf_svv'], {'variance_name': '"""Tfvar"""'}), "('Beta', Tf_svv, variance_name='Tfvar')\n", (4589, 4628), False, 'from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec\n'), ((7169, 7201), 'numpy.isnan', 'np.isnan', (["eq_lib1['temperature']"], {}), "(eq_lib1['temperature'])\n", (7177, 7201), True, 'import numpy as np\n'), ((7244, 7278), 'numpy.isnan', 'np.isnan', (["eq_lib1_t['temperature']"], {}), "(eq_lib1_t['temperature'])\n", (7252, 7278), True, 'import numpy as np\n'), ((7321, 7353), 'numpy.isnan', 'np.isnan', (["eq_lib2['temperature']"], {}), "(eq_lib2['temperature'])\n", (7329, 7353), True, 'import numpy as np\n'), ((7396, 7429), 'numpy.isnan', 'np.isnan', (["eq_lib2T['temperature']"], {}), "(eq_lib2T['temperature'])\n", (7404, 7429), True, 'import numpy as np\n'), ((7472, 7506), 'numpy.isnan', 'np.isnan', (["eq_lib2_t['temperature']"], {}), "(eq_lib2_t['temperature'])\n", (7480, 7506), True, 'import numpy as np\n'), ((7549, 7584), 'numpy.isnan', 'np.isnan', (["eq_lib2T_t['temperature']"], {}), "(eq_lib2T_t['temperature'])\n", (7557, 7584), True, 'import numpy as np\n'), ((7627, 7659), 'numpy.isnan', 'np.isnan', (["eq_lib3['temperature']"], {}), "(eq_lib3['temperature'])\n", (7635, 7659), True, 'import numpy as np\n'), ((7702, 7736), 'numpy.isnan', 'np.isnan', (["eq_lib3T1['temperature']"], {}), "(eq_lib3T1['temperature'])\n", (7710, 7736), True, 'import numpy as np\n'), ((7779, 7813), 'numpy.isnan', 'np.isnan', (["eq_lib3T2['temperature']"], {}), "(eq_lib3T2['temperature'])\n", (7787, 7813), True, 'import numpy as np\n'), ((7856, 7890), 'numpy.isnan', 'np.isnan', (["eq_lib3_t['temperature']"], {}), "(eq_lib3_t['temperature'])\n", (7864, 7890), True, 'import numpy as np\n'), ((7933, 7968), 'numpy.isnan', 'np.isnan', (["eq_lib3_tt['temperature']"], {}), "(eq_lib3_tt['temperature'])\n", (7941, 7968), True, 'import numpy as np\n'), ((8011, 8047), 'numpy.isnan', 'np.isnan', (["eq_lib3T1_t['temperature']"], {}), "(eq_lib3T1_t['temperature'])\n", (8019, 8047), True, 'import numpy as np\n'), ((8090, 8126), 'numpy.isnan', 'np.isnan', (["eq_lib3T2_t['temperature']"], {}), "(eq_lib3T2_t['temperature'])\n", (8098, 8126), True, 'import numpy as np\n'), ((8282, 8325), 'numpy.swapaxes', 'np.swapaxes', (["eq_lib3T1['temperature']", '(0)', '(1)'], {}), "(eq_lib3T1['temperature'], 0, 1)\n", (8293, 8325), True, 'import numpy as np\n'), ((8642, 8684), 'numpy.squeeze', 'np.squeeze', (["eq_lib1_t['temperature'][:, 0]"], {}), "(eq_lib1_t['temperature'][:, 0])\n", (8652, 8684), True, 'import numpy as np\n'), ((8811, 8856), 'numpy.squeeze', 'np.squeeze', (["eq_lib2_t['temperature'][:, :, 0]"], {}), "(eq_lib2_t['temperature'][:, :, 0])\n", (8821, 8856), True, 'import numpy as np\n'), ((8983, 9031), 'numpy.squeeze', 'np.squeeze', (["eq_lib3_t['temperature'][:, :, :, 0]"], {}), "(eq_lib3_t['temperature'][:, :, :, 0])\n", (8993, 9031), True, 'import numpy as np\n'), ((9158, 9210), 'numpy.squeeze', 'np.squeeze', (["eq_lib3_tt['temperature'][:, :, :, 0, 0]"], {}), "(eq_lib3_tt['temperature'][:, :, :, 0, 0])\n", (9168, 9210), True, 'import numpy as np\n'), ((8464, 8507), 'numpy.swapaxes', 'np.swapaxes', (["eq_lib3T2['temperature']", '(1)', '(2)'], {}), "(eq_lib3T2['temperature'], 1, 2)\n", (8475, 8507), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
from typing import Callable, Union
from tkinter import Toplevel
import tkinter as tk
import tkinter.ttk as ttk
from functools import partial
import numpy as np
from src.utils.MatplotlibTkinterIntegration import createPlot
from src.utils.State import State
from src.utils.constants import padding
from .hoyle import Hoyle_phi, Hoyle_theta, Hoyle_tot, Settings, fitting_Hoyle
from .utils import rotation
class Model:
"""
Gold Hoyle Model
===========
The model after Gold and Hoyle 1960. Improved to use a toroidal geometry by
Vandas and Romashets 2017.
"""
# this tells the application which variables are required so that it can
# disable the model when some variables are missing
# for a list of variables have a look at `src/utils/constants.py`
# "requiredVariables" and "optionalVariables"
requiredVariables = ["Magnetic Field"]
# the name of this model
name = "Gold Hoyle"
# set to True when this reconstruction has some options to configure
hasSettings = False
# set to True when this reconstruction can show some results
hasResults = False
def __init__(self):
self.canceled = False
def canRun(self) -> Union[bool, str]:
"""
If the reconstruction model depends on certain packages, check for them
here if they are installed. Return True only when the model can be run,
otherwise return a string that describes the problem
"""
return True
def showSettings(self, window: Toplevel, state: State):
"""Called when the user wants to change some options"""
print("showing settings")
window.title(self.name)
def run(
self,
state: State,
statusCallback: Callable[[float, str], None],
doneCallback: Callable[[], None],
errorCallback: Callable[[str], None]
):
"""
Called when the user wants to run the reconstruction. This function is
called in a new thread, so it doesn't block the tk mainloop.
NOTE: You can get the data via `state.getData("mag", "x")`. This data might
contain NaNs. The reconstruction should be able to handle them.
Parameters:
state -- The state object containing all the user inputs. Defined in
`src/utils/State.py`
statusCallback -- A function that updates the progress display of the
reconstruction. The function expects a float between 0 and 1 and a string
that describes the current operation. If you can't provide the float simply
pass None instead (not 0, since that would leave the progress bar empty the
whole time).
doneCallback -- A function that should be called when the reconstruction
has finished successfully
errorCallback -- A function that should be called when the reconstruction
threw an error. It expects a descriptive string which is directly presented
to the user.
"""
self.canceled = False
try:
statusCallback(None, "Reading the data")
# read the magnetic field
Bx = state.getData("mag", "x")
By = state.getData("mag", "y")
Bz = state.getData("mag", "z")
if Bx is None or By is None or Bz is None:
raise Exception("Not all magnetic field components found")
Btotal = state.getData("mag", "total")
# assume that they all have the same length
self.r = r = np.linspace(-1, 1, len(Bx))
self.Bx, self.By, self.Bz = Bx, By, Bz
statusCallback(None, "Calculating")
self.result = fitting_Hoyle(
Bx,
By,
Bz,
Btotal,
r,
Settings(),
statusCallback,
lambda: self.canceled
)
if self.canceled:
return
tor = np.pi / 180
# Define Br
Br = np.zeros_like(r)
# define Bphi
Bphi = self.result.B0 / (1 + self.result.b**2 * (r / self.result.R0)**2)
# Define Btheta
Btheta = self.result.B0 * self.result.b * r / (
(1 + self.result.b**2 * (r / self.result.R0)**2) * (
self.result.R0 +
(r / self.result.R0) * np.cos(self.result.theta * tor)
)
)
self.Br, self.Bphi, self.Btheta = rotation(
Br,
Bphi,
Btheta,
self.result.theta,
self.result.phi
)
self.hasResults = True
doneCallback()
except Exception as e:
self.hasResults = False
errorCallback(e)
raise
def cancel(self):
self.canceled = True
def showResults(self, window: Toplevel):
"""Called when the user wants to see the results"""
window.title(self.name)
self.resultsWindow = window
items = [
("Magnetic field error (quality)", "magFieldError", "%"),
("B0", "B0", "nT"),
"b",
("R0", "R0", "AU"),
"phi",
"theta"
]
for i, item in enumerate(items):
unit = None
if isinstance(item, tuple):
title = item[0]
key = item[1]
if len(item) > 2:
unit = item[2]
else:
title = item
key = item
label = ttk.Label(window, text=title)
label.grid(column=0, row=i, padx=padding, pady=padding)
var = tk.StringVar(window, str(getattr(self.result, key)))
entry = ttk.Entry(window, state="readonly", textvariable=var)
entry.var = var
entry.grid(column=1, row=i, padx=padding, pady=padding)
colMostRight = 1
if hasattr(self.result, key + "Error"):
label2 = ttk.Label(window, text="+-")
label2.grid(column=2, row=i)
var2 = tk.StringVar(window, str(getattr(self.result, key + "Error")))
entry2 = ttk.Entry(window, state="readonly", textvariable=var2)
entry2.var = var2
entry2.grid(column=3, row=i, padx=padding, pady=padding)
colMostRight = 3
if unit:
label3 = ttk.Label(window, text=unit)
label3.grid(column=colMostRight + 1, row=i, padx=padding)
btnFrame = ttk.Frame(window)
btnFrame.grid(
column=0,
row=len(items) + 1,
columnspan=4,
sticky="we",
padx=padding,
pady=padding
)
btn = ttk.Button(btnFrame, text="Show Fit", command=self.showFit)
btn.grid(column=1, row=1)
btn = ttk.Button(
btnFrame,
text="Show Bphi Plot",
command=partial(self.showPolar, Hoyle_phi)
)
btn.grid(column=2, row=1)
btn = ttk.Button(
btnFrame,
text="Show Btheta Plot",
command=partial(self.showPolar, Hoyle_theta)
)
btn.grid(column=3, row=1)
btn = ttk.Button(
btnFrame,
text="Show Btotal Plot",
command=partial(self.showPolar, Hoyle_tot)
)
btn.grid(column=4, row=1)
def showFit(self):
window = tk.Toplevel(self.resultsWindow)
fig = plt.figure(figsize=(6.4, 8))
r = self.r
Bx, By, Bz = self.Bx, self.By, self.Bz
Br, Bphi, Btheta = self.Br, self.Bphi, self.Btheta
plt.subplot(411)
plt.title('Bx')
plt.ylabel('Amplitude [nT]')
plt.xlabel('r (normalized)')
plt.plot(r, Bx, label='In-situ data', c='k')
plt.plot(r, Br, label='Gold_Hoyle Fit', c='b')
plt.subplot(412)
plt.title('By')
plt.ylabel('Amplitude [nT]')
plt.xlabel('r (normalized)')
plt.plot(r, By, label='In-situ data', c='k')
plt.plot(r, Bphi, label='Gold_Hoyle Fit', c='c')
plt.subplot(413)
plt.title('Bz')
plt.ylabel('Amplitude [nT]')
plt.xlabel('r (normalized)')
plt.plot(r, Bz, label='In-situ data', c='k')
plt.plot(r, Btheta, label='Gold_Hoyle Fit', c='m')
plt.subplot(414)
plt.title('Difference modeled - observed')
plt.ylabel('Amplitude [nT]')
plt.xlabel('r (normalized)')
plt.plot(r, Br - Bx, c='b', label="Br - Bx")
plt.plot(r, Bphi - By, c='c', label="Bphi - By")
plt.plot(r, Btheta - Bz, c='m', label="Btheta - Bz")
plt.legend()
plt.suptitle('Magnetic field')
plt.subplots_adjust(top=0.91, right=0.96, hspace=0.7)
self.plotFrame = ttk.Frame(window)
self.canvas, self.toolbar = createPlot(self.plotFrame, fig)
self.plotFrame.grid(row=1, column=1, rowspan=3)
self.toolbar.grid(row=1, column=1, sticky="we")
self.canvas.get_tk_widget().grid(row=2, column=1)
window.bind("<Destroy>", lambda x: plt.close(fig))
def showPolar(self, func):
window = tk.Toplevel(self.resultsWindow, takefocus=False)
window.title("Gold-Hoyle result")
fig = func(self.result.B0, self.result.R0, self.result.b, 200)
frame = ttk.Frame(window)
canvas, toolbar = createPlot(frame, fig)
frame.grid(row=1, column=1)
toolbar.grid(row=1, column=1, sticky="we")
canvas.get_tk_widget().grid(row=2, column=1)
window.bind("<Destroy>", lambda x: plt.close(fig))
| [
"tkinter.ttk.Button",
"matplotlib.pyplot.ylabel",
"tkinter.ttk.Entry",
"tkinter.ttk.Frame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tkinter.ttk.Label",
"matplotlib.pyplot.close",
"src.utils.MatplotlibTkinterIntegration.createPlot",
"tkinter.Toplevel",
"matplotlib.pyplot.subplot",
... | [((5844, 5861), 'tkinter.ttk.Frame', 'ttk.Frame', (['window'], {}), '(window)\n', (5853, 5861), True, 'import tkinter.ttk as ttk\n'), ((6017, 6076), 'tkinter.ttk.Button', 'ttk.Button', (['btnFrame'], {'text': '"""Show Fit"""', 'command': 'self.showFit'}), "(btnFrame, text='Show Fit', command=self.showFit)\n", (6027, 6076), True, 'import tkinter.ttk as ttk\n'), ((6604, 6635), 'tkinter.Toplevel', 'tk.Toplevel', (['self.resultsWindow'], {}), '(self.resultsWindow)\n', (6615, 6635), True, 'import tkinter as tk\n'), ((6646, 6674), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 8)'}), '(figsize=(6.4, 8))\n', (6656, 6674), True, 'from matplotlib import pyplot as plt\n'), ((6794, 6810), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(411)'], {}), '(411)\n', (6805, 6810), True, 'from matplotlib import pyplot as plt\n'), ((6815, 6830), 'matplotlib.pyplot.title', 'plt.title', (['"""Bx"""'], {}), "('Bx')\n", (6824, 6830), True, 'from matplotlib import pyplot as plt\n'), ((6835, 6863), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude [nT]"""'], {}), "('Amplitude [nT]')\n", (6845, 6863), True, 'from matplotlib import pyplot as plt\n'), ((6868, 6896), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r (normalized)"""'], {}), "('r (normalized)')\n", (6878, 6896), True, 'from matplotlib import pyplot as plt\n'), ((6901, 6945), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'Bx'], {'label': '"""In-situ data"""', 'c': '"""k"""'}), "(r, Bx, label='In-situ data', c='k')\n", (6909, 6945), True, 'from matplotlib import pyplot as plt\n'), ((6950, 6996), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'Br'], {'label': '"""Gold_Hoyle Fit"""', 'c': '"""b"""'}), "(r, Br, label='Gold_Hoyle Fit', c='b')\n", (6958, 6996), True, 'from matplotlib import pyplot as plt\n'), ((7002, 7018), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(412)'], {}), '(412)\n', (7013, 7018), True, 'from matplotlib import pyplot as plt\n'), ((7023, 7038), 'matplotlib.pyplot.title', 'plt.title', (['"""By"""'], {}), "('By')\n", (7032, 7038), True, 'from matplotlib import pyplot as plt\n'), ((7043, 7071), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude [nT]"""'], {}), "('Amplitude [nT]')\n", (7053, 7071), True, 'from matplotlib import pyplot as plt\n'), ((7076, 7104), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r (normalized)"""'], {}), "('r (normalized)')\n", (7086, 7104), True, 'from matplotlib import pyplot as plt\n'), ((7109, 7153), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'By'], {'label': '"""In-situ data"""', 'c': '"""k"""'}), "(r, By, label='In-situ data', c='k')\n", (7117, 7153), True, 'from matplotlib import pyplot as plt\n'), ((7158, 7206), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'Bphi'], {'label': '"""Gold_Hoyle Fit"""', 'c': '"""c"""'}), "(r, Bphi, label='Gold_Hoyle Fit', c='c')\n", (7166, 7206), True, 'from matplotlib import pyplot as plt\n'), ((7212, 7228), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(413)'], {}), '(413)\n', (7223, 7228), True, 'from matplotlib import pyplot as plt\n'), ((7233, 7248), 'matplotlib.pyplot.title', 'plt.title', (['"""Bz"""'], {}), "('Bz')\n", (7242, 7248), True, 'from matplotlib import pyplot as plt\n'), ((7253, 7281), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude [nT]"""'], {}), "('Amplitude [nT]')\n", (7263, 7281), True, 'from matplotlib import pyplot as plt\n'), ((7286, 7314), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r (normalized)"""'], {}), "('r (normalized)')\n", (7296, 7314), True, 'from matplotlib import pyplot as plt\n'), ((7319, 7363), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'Bz'], {'label': '"""In-situ data"""', 'c': '"""k"""'}), "(r, Bz, label='In-situ data', c='k')\n", (7327, 7363), True, 'from matplotlib import pyplot as plt\n'), ((7368, 7418), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'Btheta'], {'label': '"""Gold_Hoyle Fit"""', 'c': '"""m"""'}), "(r, Btheta, label='Gold_Hoyle Fit', c='m')\n", (7376, 7418), True, 'from matplotlib import pyplot as plt\n'), ((7424, 7440), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(414)'], {}), '(414)\n', (7435, 7440), True, 'from matplotlib import pyplot as plt\n'), ((7445, 7487), 'matplotlib.pyplot.title', 'plt.title', (['"""Difference modeled - observed"""'], {}), "('Difference modeled - observed')\n", (7454, 7487), True, 'from matplotlib import pyplot as plt\n'), ((7492, 7520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude [nT]"""'], {}), "('Amplitude [nT]')\n", (7502, 7520), True, 'from matplotlib import pyplot as plt\n'), ((7525, 7553), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r (normalized)"""'], {}), "('r (normalized)')\n", (7535, 7553), True, 'from matplotlib import pyplot as plt\n'), ((7558, 7602), 'matplotlib.pyplot.plot', 'plt.plot', (['r', '(Br - Bx)'], {'c': '"""b"""', 'label': '"""Br - Bx"""'}), "(r, Br - Bx, c='b', label='Br - Bx')\n", (7566, 7602), True, 'from matplotlib import pyplot as plt\n'), ((7607, 7655), 'matplotlib.pyplot.plot', 'plt.plot', (['r', '(Bphi - By)'], {'c': '"""c"""', 'label': '"""Bphi - By"""'}), "(r, Bphi - By, c='c', label='Bphi - By')\n", (7615, 7655), True, 'from matplotlib import pyplot as plt\n'), ((7660, 7712), 'matplotlib.pyplot.plot', 'plt.plot', (['r', '(Btheta - Bz)'], {'c': '"""m"""', 'label': '"""Btheta - Bz"""'}), "(r, Btheta - Bz, c='m', label='Btheta - Bz')\n", (7668, 7712), True, 'from matplotlib import pyplot as plt\n'), ((7717, 7729), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7727, 7729), True, 'from matplotlib import pyplot as plt\n'), ((7735, 7765), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Magnetic field"""'], {}), "('Magnetic field')\n", (7747, 7765), True, 'from matplotlib import pyplot as plt\n'), ((7770, 7823), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.91)', 'right': '(0.96)', 'hspace': '(0.7)'}), '(top=0.91, right=0.96, hspace=0.7)\n', (7789, 7823), True, 'from matplotlib import pyplot as plt\n'), ((7846, 7863), 'tkinter.ttk.Frame', 'ttk.Frame', (['window'], {}), '(window)\n', (7855, 7863), True, 'import tkinter.ttk as ttk\n'), ((7896, 7927), 'src.utils.MatplotlibTkinterIntegration.createPlot', 'createPlot', (['self.plotFrame', 'fig'], {}), '(self.plotFrame, fig)\n', (7906, 7927), False, 'from src.utils.MatplotlibTkinterIntegration import createPlot\n'), ((8185, 8233), 'tkinter.Toplevel', 'tk.Toplevel', (['self.resultsWindow'], {'takefocus': '(False)'}), '(self.resultsWindow, takefocus=False)\n', (8196, 8233), True, 'import tkinter as tk\n'), ((8351, 8368), 'tkinter.ttk.Frame', 'ttk.Frame', (['window'], {}), '(window)\n', (8360, 8368), True, 'import tkinter.ttk as ttk\n'), ((8391, 8413), 'src.utils.MatplotlibTkinterIntegration.createPlot', 'createPlot', (['frame', 'fig'], {}), '(frame, fig)\n', (8401, 8413), False, 'from src.utils.MatplotlibTkinterIntegration import createPlot\n'), ((3684, 3700), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (3697, 3700), True, 'import numpy as np\n'), ((4974, 5003), 'tkinter.ttk.Label', 'ttk.Label', (['window'], {'text': 'title'}), '(window, text=title)\n', (4983, 5003), True, 'import tkinter.ttk as ttk\n'), ((5145, 5198), 'tkinter.ttk.Entry', 'ttk.Entry', (['window'], {'state': '"""readonly"""', 'textvariable': 'var'}), "(window, state='readonly', textvariable=var)\n", (5154, 5198), True, 'import tkinter.ttk as ttk\n'), ((5369, 5397), 'tkinter.ttk.Label', 'ttk.Label', (['window'], {'text': '"""+-"""'}), "(window, text='+-')\n", (5378, 5397), True, 'import tkinter.ttk as ttk\n'), ((5530, 5584), 'tkinter.ttk.Entry', 'ttk.Entry', (['window'], {'state': '"""readonly"""', 'textvariable': 'var2'}), "(window, state='readonly', textvariable=var2)\n", (5539, 5584), True, 'import tkinter.ttk as ttk\n'), ((5733, 5761), 'tkinter.ttk.Label', 'ttk.Label', (['window'], {'text': 'unit'}), '(window, text=unit)\n', (5742, 5761), True, 'import tkinter.ttk as ttk\n'), ((6188, 6222), 'functools.partial', 'partial', (['self.showPolar', 'Hoyle_phi'], {}), '(self.showPolar, Hoyle_phi)\n', (6195, 6222), False, 'from functools import partial\n'), ((6342, 6378), 'functools.partial', 'partial', (['self.showPolar', 'Hoyle_theta'], {}), '(self.showPolar, Hoyle_theta)\n', (6349, 6378), False, 'from functools import partial\n'), ((6498, 6532), 'functools.partial', 'partial', (['self.showPolar', 'Hoyle_tot'], {}), '(self.showPolar, Hoyle_tot)\n', (6505, 6532), False, 'from functools import partial\n'), ((8126, 8140), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8135, 8140), True, 'from matplotlib import pyplot as plt\n'), ((8581, 8595), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8590, 8595), True, 'from matplotlib import pyplot as plt\n'), ((3997, 4028), 'numpy.cos', 'np.cos', (['(self.result.theta * tor)'], {}), '(self.result.theta * tor)\n', (4003, 4028), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Times"
P = np.array([[1, 0, 0, 0, 0, 0],
[0.5, 0, 0.5, 0, 0, 0],
[0, 0.5, 0, 0.5, 0, 0],
[0, 0, 0.5, 0, 0.5, 0],
[0, 0, 0, 0.5, 0, 0.5],
[0, 0, 0, 0, 0, 1]])
state = np.array([[0, 0, 0, 0, 1, 0]])
stateHist = state
dfStateHist = pd.DataFrame(state)
distr_hist = [[0, 0, 0, 0, 0, 0]]
i = 0
tmp = state.dot(P)
while i < 3:
tmp = np.dot(tmp,P)
print(tmp)
i += 1
for x in range(50):
state = np.dot(state, P)
print(state)
stateHist = np.append(stateHist, state, axis=0)
dfDistrHist = pd.DataFrame(stateHist)
dfDistrHist.plot()
plt.title("Starting with 4 coins")
plt.show()
| [
"numpy.append",
"numpy.array",
"numpy.dot",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((115, 266), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0], [0.5, 0, 0.5, 0, 0, 0], [0, 0.5, 0, 0.5, 0, 0], [0, 0,\n 0.5, 0, 0.5, 0], [0, 0, 0, 0.5, 0, 0.5], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0], [0.5, 0, 0.5, 0, 0, 0], [0, 0.5, 0, 0.5, 0, 0\n ], [0, 0, 0.5, 0, 0.5, 0], [0, 0, 0, 0.5, 0, 0.5], [0, 0, 0, 0, 0, 1]])\n', (123, 266), True, 'import numpy as np\n'), ((340, 370), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 1, 0]]'], {}), '([[0, 0, 0, 0, 1, 0]])\n', (348, 370), True, 'import numpy as np\n'), ((404, 423), 'pandas.DataFrame', 'pd.DataFrame', (['state'], {}), '(state)\n', (416, 423), True, 'import pandas as pd\n'), ((681, 704), 'pandas.DataFrame', 'pd.DataFrame', (['stateHist'], {}), '(stateHist)\n', (693, 704), True, 'import pandas as pd\n'), ((724, 758), 'matplotlib.pyplot.title', 'plt.title', (['"""Starting with 4 coins"""'], {}), "('Starting with 4 coins')\n", (733, 758), True, 'import matplotlib.pyplot as plt\n'), ((759, 769), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (767, 769), True, 'import matplotlib.pyplot as plt\n'), ((507, 521), 'numpy.dot', 'np.dot', (['tmp', 'P'], {}), '(tmp, P)\n', (513, 521), True, 'import numpy as np\n'), ((580, 596), 'numpy.dot', 'np.dot', (['state', 'P'], {}), '(state, P)\n', (586, 596), True, 'import numpy as np\n'), ((630, 665), 'numpy.append', 'np.append', (['stateHist', 'state'], {'axis': '(0)'}), '(stateHist, state, axis=0)\n', (639, 665), True, 'import numpy as np\n')] |
"""Input/output"""
import os
import wave
import numpy as np
def read_wave(file: os.PathLike) -> tuple[int, np.ndarray]:
"""Read WAV file into numpy array
NOTE: only mono audio is supported. Multi-channel audio is interlaced,
and would need to be de-interlaced into a 2D array.
Args:
file (os.PathLike): input WAV file
Returns:
tuple[int, np.ndarray]: sample rate, data
Data type is determined from the file; for 16bit PCM (as in competition),
the output data type is int16. For mono audio, return shape is 1D array.
"""
with wave.open(str(file), "rb") as wav_file:
buffer = wav_file.readframes(wav_file.getnframes())
sample_width_bits = wav_file.getsampwidth() * 8
_dtype = "uint8" if sample_width_bits == 8 else f"int{sample_width_bits}"
data = np.frombuffer(buffer, dtype=_dtype)
if wav_file.getnchannels() > 1:
raise NotImplementedError(
"Cannot read WAV file with more than one channels, found: "
+ str(wav_file.getnchannels())
)
return wav_file.getframerate(), data
| [
"numpy.frombuffer"
] | [((846, 881), 'numpy.frombuffer', 'np.frombuffer', (['buffer'], {'dtype': '_dtype'}), '(buffer, dtype=_dtype)\n', (859, 881), True, 'import numpy as np\n')] |
import numpy as np
import xarray as xr
from xbitinfo import get_keepbits
from . import _skip_slow, ensure_loaded, parameterized, randn, requires_dask
class GetKeepbits:
"""
Benchmark time and peak memory of `get_keepbits`.
"""
# https://asv.readthedocs.io/en/stable/benchmarks.html
timeout = 30.0
repeat = 3
number = 5
def setup(self, *args, **kwargs):
self.info_per_bit = {
"air": np.array(
[
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
3.94447851e-01,
3.94447851e-01,
3.94447851e-01,
3.94447851e-01,
3.94447851e-01,
3.94310542e-01,
7.36739987e-01,
5.62682836e-01,
3.60511555e-01,
1.52471111e-01,
4.18818055e-02,
3.65276146e-03,
1.19975820e-05,
4.39366160e-05,
4.18329296e-05,
2.54572089e-05,
1.44121797e-04,
1.34144798e-03,
1.55468479e-06,
5.38601212e-04,
8.09862581e-04,
1.74893445e-04,
4.97915410e-05,
3.88027711e-04,
0.00000000e00,
3.95323228e-05,
6.88854435e-04,
]
)
}
def time_get_keepbits(self, **kwargs):
"""Take time for `get_keepbits`."""
get_keepbits(self.info_per_bit, **kwargs)
def peakmem_get_keepbits(self, **kwargs):
"""Take memory peak for `get_keepbits`."""
get_keepbits(self.info_per_bit, **kwargs)
| [
"numpy.array",
"xbitinfo.get_keepbits"
] | [((1752, 1793), 'xbitinfo.get_keepbits', 'get_keepbits', (['self.info_per_bit'], {}), '(self.info_per_bit, **kwargs)\n', (1764, 1793), False, 'from xbitinfo import get_keepbits\n'), ((1900, 1941), 'xbitinfo.get_keepbits', 'get_keepbits', (['self.info_per_bit'], {}), '(self.info_per_bit, **kwargs)\n', (1912, 1941), False, 'from xbitinfo import get_keepbits\n'), ((440, 889), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.394447851, 0.394447851, 0.394447851, \n 0.394447851, 0.394447851, 0.394310542, 0.736739987, 0.562682836, \n 0.360511555, 0.152471111, 0.0418818055, 0.00365276146, 1.1997582e-05, \n 4.3936616e-05, 4.18329296e-05, 2.54572089e-05, 0.000144121797, \n 0.00134144798, 1.55468479e-06, 0.000538601212, 0.000809862581, \n 0.000174893445, 4.9791541e-05, 0.000388027711, 0.0, 3.95323228e-05, \n 0.000688854435]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.394447851, 0.394447851, 0.394447851, \n 0.394447851, 0.394447851, 0.394310542, 0.736739987, 0.562682836, \n 0.360511555, 0.152471111, 0.0418818055, 0.00365276146, 1.1997582e-05, \n 4.3936616e-05, 4.18329296e-05, 2.54572089e-05, 0.000144121797, \n 0.00134144798, 1.55468479e-06, 0.000538601212, 0.000809862581, \n 0.000174893445, 4.9791541e-05, 0.000388027711, 0.0, 3.95323228e-05, \n 0.000688854435])\n', (448, 889), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy
from shogun import MSG_DEBUG
traindat = numpy.random.random_sample((10,10))
testdat = numpy.random.random_sample((10,10))
parameter_list=[[traindat,testdat,1.2],[traindat,testdat,1.4]]
def kernel_director_linear (fm_train_real=traindat,fm_test_real=testdat,scale=1.2):
try:
from shogun import DirectorKernel
except ImportError:
print("recompile shogun with --enable-swig-directors")
return
import shogun as sg
class DirectorLinearKernel(DirectorKernel):
def __init__(self):
DirectorKernel.__init__(self, True)
def kernel_function(self, idx_a, idx_b):
seq1 = self.get_lhs().get_feature_vector(idx_a)
seq2 = self.get_rhs().get_feature_vector(idx_b)
return numpy.dot(seq1, seq2)
from shogun import LinearKernel, AvgDiagKernelNormalizer
from shogun import Time
feats_train=sg.features(fm_train_real)
#feats_train.io.set_loglevel(MSG_DEBUG)
feats_train.parallel.set_num_threads(1)
feats_test=sg.features(fm_test_real)
kernel=LinearKernel()
kernel.set_normalizer(AvgDiagKernelNormalizer(scale))
kernel.init(feats_train, feats_train)
dkernel=DirectorLinearKernel()
dkernel.set_normalizer(AvgDiagKernelNormalizer(scale))
dkernel.init(feats_train, feats_train)
#print "km_train"
t=Time()
km_train=kernel.get_kernel_matrix()
#t1=t.cur_time_diff(True)
#print "dkm_train"
t=Time()
dkm_train=dkernel.get_kernel_matrix()
#t2=t.cur_time_diff(True)
#print "km_train", km_train
#print "dkm_train", dkm_train
return km_train, dkm_train
if __name__=='__main__':
print('DirectorLinear')
kernel_director_linear(*parameter_list[0])
| [
"numpy.random.random_sample",
"shogun.LinearKernel",
"shogun.DirectorKernel.__init__",
"shogun.features",
"numpy.dot",
"shogun.Time",
"shogun.AvgDiagKernelNormalizer"
] | [((75, 111), 'numpy.random.random_sample', 'numpy.random.random_sample', (['(10, 10)'], {}), '((10, 10))\n', (101, 111), False, 'import numpy\n'), ((121, 157), 'numpy.random.random_sample', 'numpy.random.random_sample', (['(10, 10)'], {}), '((10, 10))\n', (147, 157), False, 'import numpy\n'), ((838, 864), 'shogun.features', 'sg.features', (['fm_train_real'], {}), '(fm_train_real)\n', (849, 864), True, 'import shogun as sg\n'), ((959, 984), 'shogun.features', 'sg.features', (['fm_test_real'], {}), '(fm_test_real)\n', (970, 984), True, 'import shogun as sg\n'), ((994, 1008), 'shogun.LinearKernel', 'LinearKernel', ([], {}), '()\n', (1006, 1008), False, 'from shogun import LinearKernel, AvgDiagKernelNormalizer\n'), ((1256, 1262), 'shogun.Time', 'Time', ([], {}), '()\n', (1260, 1262), False, 'from shogun import Time\n'), ((1352, 1358), 'shogun.Time', 'Time', ([], {}), '()\n', (1356, 1358), False, 'from shogun import Time\n'), ((1032, 1062), 'shogun.AvgDiagKernelNormalizer', 'AvgDiagKernelNormalizer', (['scale'], {}), '(scale)\n', (1055, 1062), False, 'from shogun import LinearKernel, AvgDiagKernelNormalizer\n'), ((1160, 1190), 'shogun.AvgDiagKernelNormalizer', 'AvgDiagKernelNormalizer', (['scale'], {}), '(scale)\n', (1183, 1190), False, 'from shogun import LinearKernel, AvgDiagKernelNormalizer\n'), ((526, 561), 'shogun.DirectorKernel.__init__', 'DirectorKernel.__init__', (['self', '(True)'], {}), '(self, True)\n', (549, 561), False, 'from shogun import DirectorKernel\n'), ((717, 738), 'numpy.dot', 'numpy.dot', (['seq1', 'seq2'], {}), '(seq1, seq2)\n', (726, 738), False, 'import numpy\n')] |
import networkx as nx
from entity.entity import Graph, Node
from node2vec import Node2Vec
import numpy as np
from gensim.models.word2vec import Word2Vec
from flask import current_app
from sklearn.decomposition import PCA
from sklearn import manifold
from algorithm.search_community import PyLouvain
from algorithm.similar_structure import get_similar_structure
from algorithm.structure_correspond import find_structure_correspond
import config.task_mapping as TASK_ID
import logging
import os
import pickle
def node2vec(graph: nx.Graph, model_path: str, dimensions: int = 20, walk_length: int = 80, num_walks: int = 10, workers: int = 4):
model = None
if os.path.exists(model_path) is False:
# The model is not saved before, now fit the graph and save model to model_path.
node2vec = Node2Vec(graph, dimensions=dimensions,
walk_length=walk_length, num_walks=num_walks, workers=workers, temp_folder="./temp")
model = node2vec.fit(workers=workers)
model.save(model_path)
logging.info("Saved model to file '%s'." % model_path)
else:
# The model is saved before, load it directly.
if os.path.isfile(model_path) is False:
raise FileNotFoundError("The path %s is not a file !" % model_path)
model = Word2Vec.load(model_path)
logging.info("Loaded model from file '%s'. " % model_path)
return model
def get_graph_by_name(name: str):
nxGraph, model = get_graph_model_by_name(name)
graph = Graph()
X = []
for i in range(len(nxGraph.nodes)):
X.append(model.wv.get_vector(str(i + 1)))
X_2 = sigmoid(decomposition(X))
for node_id in nxGraph.nodes():
node = Node(node_id)
"""node_id index from 1 but X_2 index from 0."""
node.vec = [float(x) for x in X_2[node_id - 1]]
graph.add_node(node)
graph.edges = list(nxGraph.edges())
# print(graph.edges[0])
return graph
def get_graph_model_by_name(graph_name: str):
if "soc_blog_catalog" == graph_name:
return current_app.config["soc_blog_graph"], current_app.config["soc_blog_model"]
elif "mock_community_graph" == graph_name:
return current_app.config["mock_community_graph"], current_app.config["mock_community_model"]
return None, None
def get_similar_struc(name: str, nodes: list, k: int):
graph, model = get_graph_model_by_name(name)
compoments = get_similar_structure(name, nodes, k)
correspond = find_structure_correspond(nodes, model, compoments)
return {"compoments": compoments, "correspond": correspond}
def get_commutity_by_name(name: str):
if 'soc_blog_catalog' == name:
return get_patition_model_by_name(name, './dump/community/soc_blog_community')
elif 'mock_community_graph' == name:
return get_patition_model_by_name(name, './dump/community/mock_community_graph')
return None
def get_patition_model_by_name(name: str, path: str):
"""
If dumped the partition before, load the partition directly,
if not , use algrithm to search partition first, then dump it to file while the method's param specific.
"""
partition = None
if os.path.exists(path):
pickle_file = open(path, 'rb')
partition = pickle.load(pickle_file)
logging.info("Loaded partition from file '%s'." % path)
else:
graph, model = get_graph_model_by_name(name)
alg = PyLouvain.from_graph(graph)
res, q = alg.apply_method()
partition = []
for arr in res:
partition.append([a + 1 for a in arr])
dump_file = open(path, 'wb')
pickle.dump(partition, dump_file)
logging.info("Dumped partition to file '%s' " % path)
return partition
def decomposition(X: list, n_component: int = 2):
pca = PCA(n_component)
pca.fit(X)
res = pca.fit_transform(X)
return res
def decomposition_by_tsne(X: list, n_component: int = 2):
embeded = manifold.TSNE(n_components=n_component,
random_state=1).fit_transform(X)
return embeded
def sigmoid(X):
return 1.0 / (1 + np.exp(-X))
def doTask(task, container: dict = None):
if dict is None:
raise AttributeError("container is None in 'doTask'!")
"""task是一个Task类型的json"""
taskId = task.taskId
if taskId == TASK_ID.COMMUNITY_DETECT:
__checkParam(task, "graph_name")
container[taskId] = get_commutity_by_name(task["params"]["graph_name"])
__doSubTasks(task, container)
elif taskId == TASK_ID.NODE_EMBEDDING:
__checkParam(task, "graph_name")
container[taskId] = get_graph_by_name(task["params"]["graph_name"])
__doSubTasks(task, container)
elif taskId == TASK_ID.UNUSUAL_NODE_DETECT:
pass
elif taskId == TASK_ID.SIMILAR_STRUCTURE:
__checkParam(task, "graph_name")
__checkParam(task, "nodes")
__checkParam(task, "k")
container[taskId] = get_graph_by_name(
task["params"]["graph_name"], task["params"]["nodes"], task["params"]["k"])
__doSubTasks(task, container)
elif taskId == TASK_ID.NODE_CORESSPOND:
pass
else:
pass
def __doSubTasks(task, container):
if "subTasks" in task:
for subTask in task["subTasks"]:
doTask(subTask, container)
def __checkParam(task: dict, paramName: str):
if "params" not in task:
raise AttributeError(
"params is None in '%s' !" % task.taskName)
if paramName not in task["params"]:
raise AttributeError(
"params '%s' is None in 'doTask' of task '%s' !" % (paramName, task.taskName))
| [
"os.path.exists",
"gensim.models.word2vec.Word2Vec.load",
"algorithm.structure_correspond.find_structure_correspond",
"algorithm.search_community.PyLouvain.from_graph",
"pickle.dump",
"sklearn.decomposition.PCA",
"pickle.load",
"sklearn.manifold.TSNE",
"os.path.isfile",
"algorithm.similar_structur... | [((1520, 1527), 'entity.entity.Graph', 'Graph', ([], {}), '()\n', (1525, 1527), False, 'from entity.entity import Graph, Node\n'), ((2431, 2468), 'algorithm.similar_structure.get_similar_structure', 'get_similar_structure', (['name', 'nodes', 'k'], {}), '(name, nodes, k)\n', (2452, 2468), False, 'from algorithm.similar_structure import get_similar_structure\n'), ((2486, 2537), 'algorithm.structure_correspond.find_structure_correspond', 'find_structure_correspond', (['nodes', 'model', 'compoments'], {}), '(nodes, model, compoments)\n', (2511, 2537), False, 'from algorithm.structure_correspond import find_structure_correspond\n'), ((3184, 3204), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3198, 3204), False, 'import os\n'), ((3817, 3833), 'sklearn.decomposition.PCA', 'PCA', (['n_component'], {}), '(n_component)\n', (3820, 3833), False, 'from sklearn.decomposition import PCA\n'), ((665, 691), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (679, 691), False, 'import os\n'), ((810, 938), 'node2vec.Node2Vec', 'Node2Vec', (['graph'], {'dimensions': 'dimensions', 'walk_length': 'walk_length', 'num_walks': 'num_walks', 'workers': 'workers', 'temp_folder': '"""./temp"""'}), "(graph, dimensions=dimensions, walk_length=walk_length, num_walks=\n num_walks, workers=workers, temp_folder='./temp')\n", (818, 938), False, 'from node2vec import Node2Vec\n'), ((1047, 1101), 'logging.info', 'logging.info', (['("Saved model to file \'%s\'." % model_path)'], {}), '("Saved model to file \'%s\'." % model_path)\n', (1059, 1101), False, 'import logging\n'), ((1311, 1336), 'gensim.models.word2vec.Word2Vec.load', 'Word2Vec.load', (['model_path'], {}), '(model_path)\n', (1324, 1336), False, 'from gensim.models.word2vec import Word2Vec\n'), ((1345, 1403), 'logging.info', 'logging.info', (['("Loaded model from file \'%s\'. " % model_path)'], {}), '("Loaded model from file \'%s\'. " % model_path)\n', (1357, 1403), False, 'import logging\n'), ((1717, 1730), 'entity.entity.Node', 'Node', (['node_id'], {}), '(node_id)\n', (1721, 1730), False, 'from entity.entity import Graph, Node\n'), ((3265, 3289), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (3276, 3289), False, 'import pickle\n'), ((3298, 3353), 'logging.info', 'logging.info', (['("Loaded partition from file \'%s\'." % path)'], {}), '("Loaded partition from file \'%s\'." % path)\n', (3310, 3353), False, 'import logging\n'), ((3431, 3458), 'algorithm.search_community.PyLouvain.from_graph', 'PyLouvain.from_graph', (['graph'], {}), '(graph)\n', (3451, 3458), False, 'from algorithm.search_community import PyLouvain\n'), ((3638, 3671), 'pickle.dump', 'pickle.dump', (['partition', 'dump_file'], {}), '(partition, dump_file)\n', (3649, 3671), False, 'import pickle\n'), ((3680, 3733), 'logging.info', 'logging.info', (['("Dumped partition to file \'%s\' " % path)'], {}), '("Dumped partition to file \'%s\' " % path)\n', (3692, 3733), False, 'import logging\n'), ((1178, 1204), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (1192, 1204), False, 'import os\n'), ((3970, 4025), 'sklearn.manifold.TSNE', 'manifold.TSNE', ([], {'n_components': 'n_component', 'random_state': '(1)'}), '(n_components=n_component, random_state=1)\n', (3983, 4025), False, 'from sklearn import manifold\n'), ((4130, 4140), 'numpy.exp', 'np.exp', (['(-X)'], {}), '(-X)\n', (4136, 4140), True, 'import numpy as np\n')] |
# Copyright 2020 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import cv2
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from utils import general, tensor
from models import network_factory
from dataloader import augment
from torchvision import transforms
parser = argparse.ArgumentParser(description="Reversing the cycle: single shot")
# general
parser.add_argument("--gpu_ids", type=str, help="id(s) of the gpu to use", default="0")
parser.add_argument("--ckpt", help="path to checkpoint", required=True)
parser.add_argument("--maxdisp", type=int, default=192, help="maxium disparity")
parser.add_argument("--model", help="stereo network to use", required=True)
parser.add_argument(
"--left", type=str, help="path to left image(s) [space separated]", required=True
)
parser.add_argument(
"--right", type=str, help="path to right image(s) [space separated]", required=True
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
# test
parser.add_argument("--final_h", type=int, default=384, help="height after pad in test")
parser.add_argument("--final_w", type=int, default=1280, help="width after pad in test")
parser.add_argument("--results", type=str, default="./artifacts", help="result folder")
parser.add_argument(
"--qualitative", action="store_true", help="save colored maps instead of 16bit"
)
parser.add_argument(
"--cmap",
type=str,
default="magma",
help="colormap to use",
choices=["magma", "gray", "jet", "kitti"],
)
parser.add_argument(
"--maxval", type=int, default=-1, help="max value in kitti colormap"
)
args = parser.parse_args()
gpus = general.parse_gpu_ids(args.gpu_ids)
args.cuda = len(gpus) > 0 and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
def _parse(names):
"""Split a string with space separated valus.
Args:
Return:
a list where each element is a non-empty value of the original list
"""
imgs = names.split(" ")
imgs = [x.strip() for x in imgs if x.strip()]
return imgs
def run_single_shot(network):
""" Generate depth for a single (or a list of) example.
Args:
network: pre-trained stereo model
"""
test_params = {
"results": args.results,
"model": args.model,
"lefts": _parse(args.left),
"rights": _parse(args.right),
"qualitative": args.qualitative,
"maxval": args.maxval,
"cmap": args.cmap,
}
padding_params = {
"final_h": args.final_h,
"final_w": args.final_w,
}
network.eval()
transformation = augment.ComposeTransformation(
[
augment.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
]
)
with tqdm(total=len(test_params["lefts"])) as pbar:
for (left_i, right_i) in zip(test_params["lefts"], test_params["rights"]):
if not os.path.exists(left_i):
print("missing left image:{}".format(left_i))
continue
if not os.path.exists(right_i):
print("missing right image:{}".format(right_i))
continue
left_img = cv2.imread(left_i)
right_img = cv2.imread(right_i)
if left_img.shape != right_img.shape:
raise ValueError("Left and right images have different shapes")
h, w, _ = left_img.shape
top_pad = padding_params["final_h"] - h
left_pad = padding_params["final_w"] - w
# add padding
left_img = np.lib.pad(
left_img, ((top_pad, 0), (0, left_pad), (0, 0)), mode="edge"
)
right_img = np.lib.pad(
right_img, ((top_pad, 0), (0, left_pad), (0, 0)), mode="edge"
)
# transorm to tensor
left = transformation(left_img)
right = transformation(right_img)
# create batch
left = torch.unsqueeze(left, 0)
right = torch.unsqueeze(right, 0)
name = "disp_" + os.path.basename(left_i)
if args.cuda:
# loading images on GPU
left = torch.FloatTensor(left).cuda()
right = torch.FloatTensor(right).cuda()
left, right = Variable(left), Variable(right)
# make prediction
with torch.no_grad():
output = network(left, right)
output = torch.squeeze(output)
output = torch.nn.functional.relu(output)
output = output.data.cpu().numpy()
extension = "." + name.split(".")[-1]
name = name.replace(extension, "")
# remove padding
if left_pad == 0:
final_output = output[top_pad:, :]
else:
final_output = output[top_pad:, :-left_pad]
if final_output.shape[0] != h or final_output.shape[1] != w:
raise ValueError("Problems in cropping final predictions")
destination = os.path.join(
test_params["results"], test_params["model"], "{}", name + ".png"
)
# saving predictions
if test_params["qualitative"]:
min_value = final_output.min()
max_value = final_output.max()
final_output = (final_output - min_value) / (max_value - min_value)
final_output *= 255.0
general.save_color(
destination.format("qualitative"),
final_output,
cmap=test_params["cmap"],
params={"maxval": test_params["maxval"]},
)
else:
general.save_kitti_disp(destination.format("16bit"), final_output)
pbar.update(1)
print("Done! Predictions saved in {} folder".format(test_params["results"]))
if __name__ == "__main__":
print("=> model: {}".format(args.model))
print("=> checkpoint: {}".format(args.ckpt))
if not os.path.exists(args.ckpt):
raise ValueError("Checkpoint not found!")
model = network_factory.get_network(args.model)(
{"maxdisp": args.maxdisp, "imagenet_pt": False}
)
if args.cuda:
print("=> selected gpu(s) with ids {}".format(*gpus))
model = nn.DataParallel(model)
model.cuda()
print(
"=> Number of model parameters: {}".format(
sum(p.numel() for p in model.parameters() if p.requires_grad)
)
)
state_dict = torch.load(args.ckpt)
model.load_state_dict(state_dict["state_dict"], strict=True)
print("EPOCHS: {}".format(state_dict["epoch"]))
run_single_shot(model)
| [
"utils.general.parse_gpu_ids",
"numpy.lib.pad",
"torch.cuda.is_available",
"torch.squeeze",
"os.path.exists",
"argparse.ArgumentParser",
"torch.unsqueeze",
"models.network_factory.get_network",
"torch.autograd.Variable",
"torchvision.transforms.Normalize",
"torch.nn.functional.relu",
"cv2.imre... | [((1053, 1124), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Reversing the cycle: single shot"""'}), "(description='Reversing the cycle: single shot')\n", (1076, 1124), False, 'import argparse\n'), ((2435, 2470), 'utils.general.parse_gpu_ids', 'general.parse_gpu_ids', (['args.gpu_ids'], {}), '(args.gpu_ids)\n', (2456, 2470), False, 'from utils import general, tensor\n'), ((2528, 2556), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2545, 2556), False, 'import torch\n'), ((2501, 2526), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2524, 2526), False, 'import torch\n'), ((2575, 2608), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2597, 2608), False, 'import torch\n'), ((7416, 7437), 'torch.load', 'torch.load', (['args.ckpt'], {}), '(args.ckpt)\n', (7426, 7437), False, 'import torch\n'), ((6911, 6936), 'os.path.exists', 'os.path.exists', (['args.ckpt'], {}), '(args.ckpt)\n', (6925, 6936), False, 'import os\n'), ((7001, 7040), 'models.network_factory.get_network', 'network_factory.get_network', (['args.model'], {}), '(args.model)\n', (7028, 7040), False, 'from models import network_factory\n'), ((7201, 7223), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (7216, 7223), True, 'import torch.nn as nn\n'), ((3484, 3507), 'dataloader.augment.ArrayToTensor', 'augment.ArrayToTensor', ([], {}), '()\n', (3505, 3507), False, 'from dataloader import augment\n'), ((3521, 3578), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0, 0, 0]', 'std': '[255, 255, 255]'}), '(mean=[0, 0, 0], std=[255, 255, 255])\n', (3541, 3578), False, 'from torchvision import transforms\n'), ((4022, 4040), 'cv2.imread', 'cv2.imread', (['left_i'], {}), '(left_i)\n', (4032, 4040), False, 'import cv2\n'), ((4065, 4084), 'cv2.imread', 'cv2.imread', (['right_i'], {}), '(right_i)\n', (4075, 4084), False, 'import cv2\n'), ((4409, 4481), 'numpy.lib.pad', 'np.lib.pad', (['left_img', '((top_pad, 0), (0, left_pad), (0, 0))'], {'mode': '"""edge"""'}), "(left_img, ((top_pad, 0), (0, left_pad), (0, 0)), mode='edge')\n", (4419, 4481), True, 'import numpy as np\n'), ((4536, 4609), 'numpy.lib.pad', 'np.lib.pad', (['right_img', '((top_pad, 0), (0, left_pad), (0, 0))'], {'mode': '"""edge"""'}), "(right_img, ((top_pad, 0), (0, left_pad), (0, 0)), mode='edge')\n", (4546, 4609), True, 'import numpy as np\n'), ((4811, 4835), 'torch.unsqueeze', 'torch.unsqueeze', (['left', '(0)'], {}), '(left, 0)\n', (4826, 4835), False, 'import torch\n'), ((4856, 4881), 'torch.unsqueeze', 'torch.unsqueeze', (['right', '(0)'], {}), '(right, 0)\n', (4871, 4881), False, 'import torch\n'), ((5913, 5992), 'os.path.join', 'os.path.join', (["test_params['results']", "test_params['model']", '"""{}"""', "(name + '.png')"], {}), "(test_params['results'], test_params['model'], '{}', name + '.png')\n", (5925, 5992), False, 'import os\n'), ((3754, 3776), 'os.path.exists', 'os.path.exists', (['left_i'], {}), '(left_i)\n', (3768, 3776), False, 'import os\n'), ((3884, 3907), 'os.path.exists', 'os.path.exists', (['right_i'], {}), '(right_i)\n', (3898, 3907), False, 'import os\n'), ((4912, 4936), 'os.path.basename', 'os.path.basename', (['left_i'], {}), '(left_i)\n', (4928, 4936), False, 'import os\n'), ((5224, 5239), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5237, 5239), False, 'import torch\n'), ((5312, 5333), 'torch.squeeze', 'torch.squeeze', (['output'], {}), '(output)\n', (5325, 5333), False, 'import torch\n'), ((5359, 5391), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['output'], {}), '(output)\n', (5383, 5391), False, 'import torch\n'), ((5144, 5158), 'torch.autograd.Variable', 'Variable', (['left'], {}), '(left)\n', (5152, 5158), False, 'from torch.autograd import Variable\n'), ((5160, 5175), 'torch.autograd.Variable', 'Variable', (['right'], {}), '(right)\n', (5168, 5175), False, 'from torch.autograd import Variable\n'), ((5027, 5050), 'torch.FloatTensor', 'torch.FloatTensor', (['left'], {}), '(left)\n', (5044, 5050), False, 'import torch\n'), ((5082, 5106), 'torch.FloatTensor', 'torch.FloatTensor', (['right'], {}), '(right)\n', (5099, 5106), False, 'import torch\n')] |
"""
Title :RegularizationCallback.py
Description :Callback for custom weight regularization
Author :<NAME>
Date Created :23-03-2020
Date Modified :11-05-2020
version :1.1
python_version :3.6.6
"""
import keras
import numpy as np
from keras import backend as K
from layers.MicroConv2D import MicroConv2D
class RegularizationCallback(keras.callbacks.Callback):
def __init__(self, l1_penalty):
super(RegularizationCallback, self).__init__()
self.l1_penalty = l1_penalty
def on_batch_end(self, batch, logs=None):
# Revert L1 regularization on the diagonal of each kernel
# The idea is to encourage large eigenvalues and small average weight per kernel
revert_kernel = (np.ones((3,3)) - np.identity(3)) * self.l1_penalty * 100
for layer in self.model.layers:
if isinstance(layer, MicroConv2D) and layer.filter_size == 3:
current_weights = layer.get_weights()
kernel_shape = current_weights[0].shape
for neuron in range(kernel_shape[3]):
for depth in range(kernel_shape[2]):
# Get the 3x3 convolution matrix
sign = 1 if K.sum(current_weights[0][:, :, depth, neuron]) > 0 else -1
current_weights[0][:, :, depth, neuron] += sign * revert_kernel
layer.set_weights(current_weights) | [
"numpy.identity",
"numpy.ones",
"keras.backend.sum"
] | [((740, 755), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (747, 755), True, 'import numpy as np\n'), ((757, 771), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (768, 771), True, 'import numpy as np\n'), ((1133, 1179), 'keras.backend.sum', 'K.sum', (['current_weights[0][:, :, depth, neuron]'], {}), '(current_weights[0][:, :, depth, neuron])\n', (1138, 1179), True, 'from keras import backend as K\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 <NAME> <<EMAIL>>
"""USAGE: %(program)s MATRIX.mm [CLIP_DOCS] [CLIP_TERMS]
Check truncated SVD error for the algo in gensim, using a given corpus. This script
runs the decomposition with several internal parameters (number of requested factors,
iterative chunk size) and reports error for each parameter combination.
The number of input documents is clipped to the first CLIP_DOCS. Similarly,
only the first CLIP_TERMS are considered (features with id >= CLIP_TERMS are
ignored, effectively restricting the vocabulary size). If you don't specify them,
the entire matrix will be used.
Example: ./svd_error.py ~/gensim/results/wiki_en_v10k.mm.bz2 100000 10000
"""
from __future__ import print_function, with_statement
import logging
import os
import sys
import time
import bz2
import itertools
import numpy as np
import scipy.linalg
import gensim
try:
from sparsesvd import sparsesvd
except ImportError:
# no SVDLIBC: install with `easy_install sparsesvd` if you want SVDLIBC results as well
sparsesvd = None
sparsesvd = None # don't use SVDLIBC
FACTORS = [300] # which num_topics to try
CHUNKSIZE = [10000, 1000] # which chunksize to try
POWER_ITERS = [0, 1, 2, 4, 6] # extra power iterations for the randomized algo
# when reporting reconstruction error, also report spectral norm error? (very slow)
COMPUTE_NORM2 = False
def norm2(a):
"""Spectral norm ("norm 2") of a symmetric matrix `a`."""
if COMPUTE_NORM2:
logging.info("computing spectral norm of a %s matrix", str(a.shape))
return scipy.linalg.eigvalsh(a).max() # much faster than np.linalg.norm(2)
else:
return np.nan
def rmse(diff):
return np.sqrt(1.0 * np.multiply(diff, diff).sum() / diff.size)
def print_error(name, aat, u, s, ideal_nf, ideal_n2):
err = -np.dot(u, np.dot(np.diag(s), u.T))
err += aat
nf, n2 = np.linalg.norm(err), norm2(err)
print(
'%s error: norm_frobenius=%f (/ideal=%g), norm2=%f (/ideal=%g), RMSE=%g' %
(name, nf, nf / ideal_nf, n2, n2 / ideal_n2, rmse(err))
)
sys.stdout.flush()
class ClippedCorpus(object):
def __init__(self, corpus, max_docs, max_terms):
self.corpus = corpus
self.max_docs, self.max_terms = max_docs, max_terms
def __iter__(self):
for doc in itertools.islice(self.corpus, self.max_docs):
yield [(f, w) for f, w in doc if f < self.max_terms]
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# do we have enough cmd line arguments?
if len(sys.argv) < 2:
print(globals()["__doc__"] % locals())
sys.exit(1)
fname = sys.argv[1]
if fname.endswith('bz2'):
mm = gensim.corpora.MmCorpus(bz2.BZ2File(fname))
else:
mm = gensim.corpora.MmCorpus(fname)
# extra cmd parameters = use a subcorpus (fewer docs, smaller vocab)
if len(sys.argv) > 2:
n = int(sys.argv[2])
else:
n = mm.num_docs
if len(sys.argv) > 3:
m = int(sys.argv[3])
else:
m = mm.num_terms
logging.info("using %i documents and %i features", n, m)
corpus = ClippedCorpus(mm, n, m)
id2word = gensim.utils.FakeDict(m)
logging.info("computing corpus * corpus^T") # eigenvalues of this matrix are singular values of `corpus`, squared
aat = np.zeros((m, m), dtype=np.float64)
for chunk in gensim.utils.grouper(corpus, chunksize=5000):
num_nnz = sum(len(doc) for doc in chunk)
chunk = gensim.matutils.corpus2csc(chunk, num_nnz=num_nnz, num_terms=m, num_docs=len(chunk), dtype=np.float32)
chunk = chunk * chunk.T
chunk = chunk.toarray()
aat += chunk
del chunk
logging.info("computing full decomposition of corpus * corpus^t")
aat = aat.astype(np.float32)
spectrum_s, spectrum_u = scipy.linalg.eigh(aat)
spectrum_s = spectrum_s[::-1] # re-order to descending eigenvalue order
spectrum_u = spectrum_u.T[::-1].T
np.save(fname + '.spectrum.npy', spectrum_s)
for factors in FACTORS:
err = -np.dot(spectrum_u[:, :factors], np.dot(np.diag(spectrum_s[:factors]), spectrum_u[:, :factors].T))
err += aat
ideal_fro = np.linalg.norm(err)
del err
ideal_n2 = spectrum_s[factors + 1]
print('*' * 40, "%i factors, ideal error norm_frobenius=%f, norm_2=%f" % (factors, ideal_fro, ideal_n2))
print("*" * 30, end="")
print_error("baseline", aat,
np.zeros((m, factors)), np.zeros((factors)), ideal_fro, ideal_n2)
if sparsesvd:
logging.info("computing SVDLIBC SVD for %i factors", factors)
taken = time.time()
corpus_ram = gensim.matutils.corpus2csc(corpus, num_terms=m)
ut, s, vt = sparsesvd(corpus_ram, factors)
taken = time.time() - taken
del corpus_ram
del vt
u, s = ut.T.astype(np.float32), s.astype(np.float32)**2 # convert singular values to eigenvalues
del ut
print("SVDLIBC SVD for %i factors took %s s (spectrum %f .. %f)"
% (factors, taken, s[0], s[-1]))
print_error("SVDLIBC", aat, u, s, ideal_fro, ideal_n2)
del u
for power_iters in POWER_ITERS:
for chunksize in CHUNKSIZE:
logging.info(
"computing incremental SVD for %i factors, %i power iterations, chunksize %i",
factors, power_iters, chunksize
)
taken = time.time()
gensim.models.lsimodel.P2_EXTRA_ITERS = power_iters
model = gensim.models.LsiModel(
corpus, id2word=id2word, num_topics=factors,
chunksize=chunksize, power_iters=power_iters
)
taken = time.time() - taken
u, s = model.projection.u.astype(np.float32), model.projection.s.astype(np.float32)**2
del model
print(
"incremental SVD for %i factors, %i power iterations, "
"chunksize %i took %s s (spectrum %f .. %f)" %
(factors, power_iters, chunksize, taken, s[0], s[-1])
)
print_error('incremental SVD', aat, u, s, ideal_fro, ideal_n2)
del u
logging.info("computing multipass SVD for %i factors, %i power iterations", factors, power_iters)
taken = time.time()
model = gensim.models.LsiModel(
corpus, id2word=id2word, num_topics=factors, chunksize=2000,
onepass=False, power_iters=power_iters
)
taken = time.time() - taken
u, s = model.projection.u.astype(np.float32), model.projection.s.astype(np.float32)**2
del model
print(
"multipass SVD for %i factors, "
"%i power iterations took %s s (spectrum %f .. %f)" %
(factors, power_iters, taken, s[0], s[-1])
)
print_error('multipass SVD', aat, u, s, ideal_fro, ideal_n2)
del u
logging.info("finished running %s", program)
| [
"gensim.models.LsiModel",
"gensim.utils.FakeDict",
"sys.exit",
"numpy.linalg.norm",
"logging.info",
"numpy.save",
"numpy.multiply",
"bz2.BZ2File",
"gensim.corpora.MmCorpus",
"sys.stdout.flush",
"gensim.utils.grouper",
"time.time",
"logging.basicConfig",
"itertools.islice",
"numpy.diag",
... | [((2128, 2146), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2144, 2146), False, 'import sys\n'), ((2508, 2603), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (2527, 2603), False, 'import logging\n'), ((2666, 2695), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (2682, 2695), False, 'import os\n'), ((3256, 3312), 'logging.info', 'logging.info', (['"""using %i documents and %i features"""', 'n', 'm'], {}), "('using %i documents and %i features', n, m)\n", (3268, 3312), False, 'import logging\n'), ((3364, 3388), 'gensim.utils.FakeDict', 'gensim.utils.FakeDict', (['m'], {}), '(m)\n', (3385, 3388), False, 'import gensim\n'), ((3394, 3437), 'logging.info', 'logging.info', (['"""computing corpus * corpus^T"""'], {}), "('computing corpus * corpus^T')\n", (3406, 3437), False, 'import logging\n'), ((3519, 3553), 'numpy.zeros', 'np.zeros', (['(m, m)'], {'dtype': 'np.float64'}), '((m, m), dtype=np.float64)\n', (3527, 3553), True, 'import numpy as np\n'), ((3571, 3615), 'gensim.utils.grouper', 'gensim.utils.grouper', (['corpus'], {'chunksize': '(5000)'}), '(corpus, chunksize=5000)\n', (3591, 3615), False, 'import gensim\n'), ((3893, 3958), 'logging.info', 'logging.info', (['"""computing full decomposition of corpus * corpus^t"""'], {}), "('computing full decomposition of corpus * corpus^t')\n", (3905, 3958), False, 'import logging\n'), ((4163, 4207), 'numpy.save', 'np.save', (["(fname + '.spectrum.npy')", 'spectrum_s'], {}), "(fname + '.spectrum.npy', spectrum_s)\n", (4170, 4207), True, 'import numpy as np\n'), ((7331, 7375), 'logging.info', 'logging.info', (['"""finished running %s"""', 'program'], {}), "('finished running %s', program)\n", (7343, 7375), False, 'import logging\n'), ((1928, 1947), 'numpy.linalg.norm', 'np.linalg.norm', (['err'], {}), '(err)\n', (1942, 1947), True, 'import numpy as np\n'), ((2364, 2408), 'itertools.islice', 'itertools.islice', (['self.corpus', 'self.max_docs'], {}), '(self.corpus, self.max_docs)\n', (2380, 2408), False, 'import itertools\n'), ((2821, 2832), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2829, 2832), False, 'import sys\n'), ((2968, 2998), 'gensim.corpora.MmCorpus', 'gensim.corpora.MmCorpus', (['fname'], {}), '(fname)\n', (2991, 2998), False, 'import gensim\n'), ((4389, 4408), 'numpy.linalg.norm', 'np.linalg.norm', (['err'], {}), '(err)\n', (4403, 4408), True, 'import numpy as np\n'), ((2925, 2943), 'bz2.BZ2File', 'bz2.BZ2File', (['fname'], {}), '(fname)\n', (2936, 2943), False, 'import bz2\n'), ((4670, 4692), 'numpy.zeros', 'np.zeros', (['(m, factors)'], {}), '((m, factors))\n', (4678, 4692), True, 'import numpy as np\n'), ((4694, 4711), 'numpy.zeros', 'np.zeros', (['factors'], {}), '(factors)\n', (4702, 4711), True, 'import numpy as np\n'), ((4770, 4831), 'logging.info', 'logging.info', (['"""computing SVDLIBC SVD for %i factors"""', 'factors'], {}), "('computing SVDLIBC SVD for %i factors', factors)\n", (4782, 4831), False, 'import logging\n'), ((4852, 4863), 'time.time', 'time.time', ([], {}), '()\n', (4861, 4863), False, 'import time\n'), ((4889, 4936), 'gensim.matutils.corpus2csc', 'gensim.matutils.corpus2csc', (['corpus'], {'num_terms': 'm'}), '(corpus, num_terms=m)\n', (4915, 4936), False, 'import gensim\n'), ((4961, 4991), 'sparsesvd.sparsesvd', 'sparsesvd', (['corpus_ram', 'factors'], {}), '(corpus_ram, factors)\n', (4970, 4991), False, 'from sparsesvd import sparsesvd\n'), ((6543, 6644), 'logging.info', 'logging.info', (['"""computing multipass SVD for %i factors, %i power iterations"""', 'factors', 'power_iters'], {}), "('computing multipass SVD for %i factors, %i power iterations',\n factors, power_iters)\n", (6555, 6644), False, 'import logging\n'), ((6661, 6672), 'time.time', 'time.time', ([], {}), '()\n', (6670, 6672), False, 'import time\n'), ((6693, 6820), 'gensim.models.LsiModel', 'gensim.models.LsiModel', (['corpus'], {'id2word': 'id2word', 'num_topics': 'factors', 'chunksize': '(2000)', 'onepass': '(False)', 'power_iters': 'power_iters'}), '(corpus, id2word=id2word, num_topics=factors,\n chunksize=2000, onepass=False, power_iters=power_iters)\n', (6715, 6820), False, 'import gensim\n'), ((1882, 1892), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (1889, 1892), True, 'import numpy as np\n'), ((5012, 5023), 'time.time', 'time.time', ([], {}), '()\n', (5021, 5023), False, 'import time\n'), ((5516, 5650), 'logging.info', 'logging.info', (['"""computing incremental SVD for %i factors, %i power iterations, chunksize %i"""', 'factors', 'power_iters', 'chunksize'], {}), "(\n 'computing incremental SVD for %i factors, %i power iterations, chunksize %i'\n , factors, power_iters, chunksize)\n", (5528, 5650), False, 'import logging\n'), ((5723, 5734), 'time.time', 'time.time', ([], {}), '()\n', (5732, 5734), False, 'import time\n'), ((5827, 5944), 'gensim.models.LsiModel', 'gensim.models.LsiModel', (['corpus'], {'id2word': 'id2word', 'num_topics': 'factors', 'chunksize': 'chunksize', 'power_iters': 'power_iters'}), '(corpus, id2word=id2word, num_topics=factors,\n chunksize=chunksize, power_iters=power_iters)\n', (5849, 5944), False, 'import gensim\n'), ((6883, 6894), 'time.time', 'time.time', ([], {}), '()\n', (6892, 6894), False, 'import time\n'), ((4291, 4320), 'numpy.diag', 'np.diag', (['spectrum_s[:factors]'], {}), '(spectrum_s[:factors])\n', (4298, 4320), True, 'import numpy as np\n'), ((6023, 6034), 'time.time', 'time.time', ([], {}), '()\n', (6032, 6034), False, 'import time\n'), ((1755, 1778), 'numpy.multiply', 'np.multiply', (['diff', 'diff'], {}), '(diff, diff)\n', (1766, 1778), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract functions and classes used to build the QAOA circuit in cirq.
Quantum Approximate Optimization Algorithm (QAOA) provides a recipe on how to
parametrized a trial wavefunction of the ground state which can be solved
variationally.
Original paper:
A Quantum Approximate Optimization Algorithm
https://arxiv.org/abs/1411.4028
Different from the standard QAOA algorithm, we will allocate chunks of time to
apply either Hamiltonian, and optimize over these decisions.
"""
import abc
import ast
import enum
import itertools
from typing import Any, List, Tuple
import cirq
import numpy as np
def generate_x_hamiltonian_exponential(num_qubits, time
):
r"""Returns the list of all operators for the exponential of the X Hamiltonian.
Applies exp(i*time*\sum_i X_i) as a Cirq circuit.
Args:
num_qubits: Integer, Apply XPowGate to each qubit in range(num_qubits).
time: Float, how long to apply the Hamiltonian.
Returns:
List of all cirq operators for the X Hamiltonian defined by the time.
"""
# Corrects for the fact that Rx performs e^(-i*X*t/2)
return list(cirq.rx(-2 * time).on_each(
*[cirq.LineQubit(i) for i in range(num_qubits)]
))
class HamiltonianType(enum.IntEnum):
X = 0
CONSTRAINT = 1
def switch_hamiltonian_type(hamiltonian_type
):
"""Converts to the other HamiltonianType value.
Args:
hamiltonian_type: HamiltonianType, the type to get flipped
Returns:
The opposite HamiltonianType
"""
if hamiltonian_type is HamiltonianType.CONSTRAINT:
return HamiltonianType.X
else:
return HamiltonianType.CONSTRAINT
def bangbang_compressor(bangbang_protocol
):
"""Compresses the bang bang protocol.
Merges chunks of contiguous bangbang chunks into a Tuple of duration (in
number of chunks) and which Hamiltonian to apply.
Args:
bangbang_protocol: List of HamiltonianType values, determines which
Hamiltonian should be applied at the corresponding chunk.
Returns:
List of Tuples containing the Hamiltonian type and the number of chunks to
apply the Hamiltonian type for.
"""
current_mode = None
compressed_protocol = []
chunk_counter = 0
for protocol_mode in bangbang_protocol:
if current_mode is None:
current_mode = protocol_mode
chunk_counter = 1
elif current_mode == protocol_mode:
chunk_counter += 1
else:
compressed_protocol.append((chunk_counter, current_mode))
current_mode = protocol_mode
chunk_counter = 1
# Append what's left over
if chunk_counter > 0:
compressed_protocol.append((chunk_counter, current_mode))
return compressed_protocol
def protocol_to_string(bangbang_protocol):
"""Converts a bang-bang protocol into a string.
Args:
bangbang_protocol: List of circuit_lib.HamiltonianType
Returns:
String that represents the bangbang_protocol
"""
return str([int(hamiltonian_type) for hamiltonian_type in bangbang_protocol])
def string_to_protocol(protocol_string):
"""Converts a string into a bang-bang protocol.
Args:
protocol_string: String, represents the bangbang_protocol
Returns:
List of HamiltonianType, representing the bang-bang protocol.
"""
return ast.literal_eval(protocol_string)
class BangBangProtocolCircuit(abc.ABC):
"""QAOA circuit for bang-bang protocols.
This circuit will divide the given time into a series of chunks that will
be allocated to one of the two Hamiltonians.
Attributes:
chunk_time: Positive float, amount of time allocated to each chunk.
num_qubits: Positive int, the number of qubits in the simulation
hamiltonian_diagonal: List of floats, the diagonal of the target
Hamiltonian, that determines the evaluations of each measurement.
Note: Should be defined by child class using get_hamiltonian_diagonal().
simulator: cirq.Simulator, the simulator to run the circuit.
"""
def __init__(self, chunk_time, num_qubits):
"""Initializer.
This class creates QAOA circuit in cirq.
Args:
chunk_time: Positive float, amount of time allocated to each chunk.
num_qubits: Positive int, number of qubits in the simulation.
Raises:
ValueError: If num_qubits or chunk_time are not positive.
"""
if chunk_time < 0.0:
raise ValueError('chunk_time must be positive, not %f.' % chunk_time)
self.chunk_time = chunk_time
if num_qubits < 1:
raise ValueError('num_qubits must be positive, not %d' % num_qubits)
self.num_qubits = num_qubits
self.simulator = cirq.Simulator()
# This attribute must be defined by calling get_hamiltonian_diagonal().
self.hamiltonian_diagonal = None
def get_hamiltonian_diagonal(self):
"""Computes the diagonal elements of the target Hamiltonian.
Returns:
List of 2 ** self.num_qubits floats containing the diagonal elements of
the target Hamiltonian.
"""
return [
self.constraint_evaluation(measurement)
for measurement in itertools.product([0, 1], repeat=self.num_qubits)]
@abc.abstractmethod
def generate_constraint_hamiltonian_exponential(self, time
):
"""Generates the circuit to apply the constraint Hamiltonian.
Applies exp(i*time*Hamiltonian) as a Cirq circuit.
Args:
time: Float, the amount of time to apply the Hamiltonian.
Returns:
List of cirq operations.
"""
pass
def qaoa_circuit(self, bangbang_protocol
):
"""Generates the QAOA circuit for the problem given the bang bang protocol.
Starts with a layer of Hamiltonians to put into uniform superposition. Then
collapses each chunk into continugous times and then generates the
corresponding exponentiated Hamiltonians.
Args:
bangbang_protocol: List of HamiltonianType values, determines which
Hamiltonian should be applied at the corresponding chunk.
Returns:
The circuit for the bang bang protocol in QAOA format.
"""
compressed_protocol = []
for duration, hamiltonian_type in bangbang_compressor(bangbang_protocol):
compressed_protocol.append((self.chunk_time * duration, hamiltonian_type))
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(
*[cirq.LineQubit(i) for i in range(self.num_qubits)]
))
for time, hamiltonian_type in compressed_protocol:
if time == 0.0:
continue
if hamiltonian_type == HamiltonianType.CONSTRAINT:
circuit.append(self.generate_constraint_hamiltonian_exponential(time))
else:
circuit.append(generate_x_hamiltonian_exponential(self.num_qubits,
time))
return circuit
def get_wavefunction(self, bangbang_protocol):
"""Gets wavefunction from the circuit.
Adapted from:
biology.research.collaborations.xc.qaoa.
Note in real quantum hardware, wavefunction cannot be obtained directly. It
can only be sampled from measurements.
Args:
bangbang_protocol: List of HamiltonianType values, determines which
Hamiltonian should be applied at the corresponding chunk.
Returns:
Complex numpy array with shape [2 ** self.num_qubits].
"""
circuit = self.qaoa_circuit(bangbang_protocol)
return self.simulator.simulate(circuit).final_state
def get_probabilities(self, wavefunction):
"""Gets the probabilities of states from wavefunction.
Adapted from:
biology.research.collaborations.xc.qaoa.
Args:
wavefunction: Complex numpy array with shape [n_dim].
Returns:
Float numpy array with shape [2 ** self.num_qubits].
Raises:
ValueError: If the shape of wavefunction is not
[2 ** self.num_qubits] or the wavefunction is not normalized.
"""
if wavefunction.shape != (2 ** self.num_qubits,):
raise ValueError(
'The shape of wavefunction should be (%d,) but got %s'
% (2 ** self.num_qubits, wavefunction.shape))
probabilities = np.abs(wavefunction) ** 2
norm = np.sum(probabilities)
if not np.isclose(norm, 1., atol=0.001):
raise ValueError(
'Wavefunction should be normalized to 1 but got %4.6f' % norm)
return probabilities
@abc.abstractmethod
def constraint_evaluation(self, measurement):
"""Gets the evaluation of that measurement on the given problem.
Args:
measurement: Numpy array of integers, corresponds to the measured value.
Returns:
Float.
"""
pass
def get_constraint_expectation(self, wavefunction):
"""Gets the energy of the wavefunction with the Hamiltonian.
Computes the expectation value of the wavefunction with the target
Hamiltonian. Assumes the Hamiltonian is diagonal in the computational basis.
Args:
wavefunction: Complex numpy array with shape [n_dim].
Returns:
Float.
"""
probabilities = self.get_probabilities(wavefunction)
return np.dot(probabilities, self.hamiltonian_diagonal)
| [
"numpy.abs",
"numpy.isclose",
"cirq.rx",
"itertools.product",
"cirq.LineQubit",
"ast.literal_eval",
"cirq.Circuit",
"numpy.dot",
"numpy.sum",
"cirq.Simulator"
] | [((3897, 3930), 'ast.literal_eval', 'ast.literal_eval', (['protocol_string'], {}), '(protocol_string)\n', (3913, 3930), False, 'import ast\n'), ((5228, 5244), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (5242, 5244), False, 'import cirq\n'), ((6908, 6922), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (6920, 6922), False, 'import cirq\n'), ((8767, 8788), 'numpy.sum', 'np.sum', (['probabilities'], {}), '(probabilities)\n', (8773, 8788), True, 'import numpy as np\n'), ((9677, 9725), 'numpy.dot', 'np.dot', (['probabilities', 'self.hamiltonian_diagonal'], {}), '(probabilities, self.hamiltonian_diagonal)\n', (9683, 9725), True, 'import numpy as np\n'), ((8730, 8750), 'numpy.abs', 'np.abs', (['wavefunction'], {}), '(wavefunction)\n', (8736, 8750), True, 'import numpy as np\n'), ((8800, 8833), 'numpy.isclose', 'np.isclose', (['norm', '(1.0)'], {'atol': '(0.001)'}), '(norm, 1.0, atol=0.001)\n', (8810, 8833), True, 'import numpy as np\n'), ((1743, 1761), 'cirq.rx', 'cirq.rx', (['(-2 * time)'], {}), '(-2 * time)\n', (1750, 1761), False, 'import cirq\n'), ((5680, 5729), 'itertools.product', 'itertools.product', (['[0, 1]'], {'repeat': 'self.num_qubits'}), '([0, 1], repeat=self.num_qubits)\n', (5697, 5729), False, 'import itertools\n'), ((1779, 1796), 'cirq.LineQubit', 'cirq.LineQubit', (['i'], {}), '(i)\n', (1793, 1796), False, 'import cirq\n'), ((6968, 6985), 'cirq.LineQubit', 'cirq.LineQubit', (['i'], {}), '(i)\n', (6982, 6985), False, 'import cirq\n')] |
import json
import torch
import numpy as np
from typing import Optional
from datasets.arrow_dataset import Dataset
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from seq2seq.utils.dataset import DataTrainingArguments, normalize, serialize_schema
from seq2seq.utils.trainer import Seq2SeqTrainer, EvalPrediction
def spider_get_input(
question: str,
serialized_schema: str,
prefix: str,
) -> str:
return prefix + question.strip() + " " + serialized_schema.strip()
def spider_get_target(
query: str,
db_id: str,
normalize_query: bool,
target_with_db_id: bool,
) -> str:
_normalize = normalize if normalize_query else (lambda x: x)
return f"{db_id} | {_normalize(query)}" if target_with_db_id else _normalize(query)
def spider_add_serialized_schema(ex: dict, data_training_args: DataTrainingArguments) -> dict:
serialized_schema = serialize_schema(
question=ex["question"],
db_path=ex["db_path"],
db_id=ex["db_id"],
db_column_names=ex["db_column_names"],
db_table_names=ex["db_table_names"],
schema_serialization_type=data_training_args.schema_serialization_type,
schema_serialization_randomized=data_training_args.schema_serialization_randomized,
schema_serialization_with_db_id=data_training_args.schema_serialization_with_db_id,
schema_serialization_with_db_content=data_training_args.schema_serialization_with_db_content,
normalize_query=data_training_args.normalize_query,
)
return {"serialized_schema": serialized_schema}
def spider_pre_process_function(
batch: dict,
max_source_length: Optional[int],
max_target_length: Optional[int],
data_training_args: DataTrainingArguments,
tokenizer: PreTrainedTokenizerBase,
) -> dict:
prefix = data_training_args.source_prefix if data_training_args.source_prefix is not None else ""
inputs = [
spider_get_input(question=question, serialized_schema=serialized_schema, prefix=prefix)
for question, serialized_schema in zip(batch["question"], batch["serialized_schema"])
]
model_inputs: dict = tokenizer(
inputs,
max_length=max_source_length,
padding=False,
truncation=True,
return_overflowing_tokens=False,
)
targets = [
spider_get_target(
query=query,
db_id=db_id,
normalize_query=data_training_args.normalize_query,
target_with_db_id=data_training_args.target_with_db_id,
)
for db_id, query in zip(batch["db_id"], batch["query"])
]
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
targets,
max_length=max_target_length,
padding=False,
truncation=True,
return_overflowing_tokens=False,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
class SpiderTrainer(Seq2SeqTrainer):
def _post_process_function(
self, examples: Dataset, features: Dataset, predictions: np.ndarray, stage: str
) -> EvalPrediction:
inputs = self.tokenizer.batch_decode([f["input_ids"] for f in features], skip_special_tokens=True)
label_ids = [f["labels"] for f in features]
if self.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
_label_ids = np.where(label_ids != -100, label_ids, self.tokenizer.pad_token_id)
decoded_label_ids = self.tokenizer.batch_decode(_label_ids, skip_special_tokens=True)
metas = [
{
"query": x["query"],
"question": x["question"],
"context": context,
"label": label,
"db_id": x["db_id"],
"db_path": x["db_path"],
"db_table_names": x["db_table_names"],
"db_column_names": x["db_column_names"],
"db_foreign_keys": x["db_foreign_keys"],
}
for x, context, label in zip(examples, inputs, decoded_label_ids)
]
predictions = self.tokenizer.batch_decode(predictions, skip_special_tokens=True)
assert len(metas) == len(predictions)
with open(f"{self.args.output_dir}/predictions_{stage}.json", "w") as f:
json.dump(
[dict(**{"prediction": prediction}, **meta) for prediction, meta in zip(predictions, metas)],
f,
indent=4,
)
return EvalPrediction(predictions=predictions, label_ids=label_ids, metas=metas)
def _compute_metrics(self, eval_prediction: EvalPrediction) -> dict:
predictions, label_ids, metas = eval_prediction
if self.target_with_db_id:
# Remove database id from all predictions
predictions = [pred.split("|", 1)[-1].strip() for pred in predictions]
# TODO: using the decoded reference labels causes a crash in the spider evaluator
# if self.ignore_pad_token_for_loss:
# # Replace -100 in the labels as we can't decode them.
# label_ids = np.where(label_ids != -100, label_ids, tokenizer.pad_token_id)
# decoded_references = self.tokenizer.batch_decode(label_ids, skip_special_tokens=True)
# references = [{**{"query": r}, **m} for r, m in zip(decoded_references, metas)]
references = metas
return self.metric.compute(predictions=predictions, references=references)
# # Change it to our custom loss
# def compute_loss(self, model, inputs, return_outputs=False):
# """
# How the loss is computed by Trainer. By default, all models return the loss in the first element.
# Subclass and override for custom behavior.
# """
# if self.label_smoother is not None and "labels" in inputs:
# labels = inputs.pop("labels")
# else:
# labels = None
# outputs = model(**inputs)
# # Save past state if it exists
# # TODO: this needs to be fixed and made cleaner later.
# if self.args.past_index >= 0:
# self._past = outputs[self.args.past_index]
# if labels is not None:
# loss = self.label_smoother(outputs, labels)
# else:
# # We don't use .loss here since the model may return tuples instead of ModelOutput.
# loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
# def get_relation_norm(model):
# '''
# get relation params norm
# '''
# norm_loss = 0
# for name, param in model.parameters():
# if 'relation' in name:
# norm_loss += 0.5 * torch.sum(param**2)
# return norm_loss
# loss += get_relation_norm(model)
# return (loss, outputs) if return_outputs else loss
| [
"numpy.where",
"seq2seq.utils.dataset.serialize_schema",
"seq2seq.utils.trainer.EvalPrediction"
] | [((909, 1492), 'seq2seq.utils.dataset.serialize_schema', 'serialize_schema', ([], {'question': "ex['question']", 'db_path': "ex['db_path']", 'db_id': "ex['db_id']", 'db_column_names': "ex['db_column_names']", 'db_table_names': "ex['db_table_names']", 'schema_serialization_type': 'data_training_args.schema_serialization_type', 'schema_serialization_randomized': 'data_training_args.schema_serialization_randomized', 'schema_serialization_with_db_id': 'data_training_args.schema_serialization_with_db_id', 'schema_serialization_with_db_content': 'data_training_args.schema_serialization_with_db_content', 'normalize_query': 'data_training_args.normalize_query'}), "(question=ex['question'], db_path=ex['db_path'], db_id=ex[\n 'db_id'], db_column_names=ex['db_column_names'], db_table_names=ex[\n 'db_table_names'], schema_serialization_type=data_training_args.\n schema_serialization_type, schema_serialization_randomized=\n data_training_args.schema_serialization_randomized,\n schema_serialization_with_db_id=data_training_args.\n schema_serialization_with_db_id, schema_serialization_with_db_content=\n data_training_args.schema_serialization_with_db_content,\n normalize_query=data_training_args.normalize_query)\n", (925, 1492), False, 'from seq2seq.utils.dataset import DataTrainingArguments, normalize, serialize_schema\n'), ((4574, 4647), 'seq2seq.utils.trainer.EvalPrediction', 'EvalPrediction', ([], {'predictions': 'predictions', 'label_ids': 'label_ids', 'metas': 'metas'}), '(predictions=predictions, label_ids=label_ids, metas=metas)\n', (4588, 4647), False, 'from seq2seq.utils.trainer import Seq2SeqTrainer, EvalPrediction\n'), ((3460, 3527), 'numpy.where', 'np.where', (['(label_ids != -100)', 'label_ids', 'self.tokenizer.pad_token_id'], {}), '(label_ids != -100, label_ids, self.tokenizer.pad_token_id)\n', (3468, 3527), True, 'import numpy as np\n')] |
# port "loss analysis v5.xlsx" by <NAME> to python3
import openpyxl
import numpy as np
import sys
import os
import re
from collections import OrderedDict
import matplotlib.pyplot as plt
import warnings
# modules for this package
import analysis
from scipy import constants
T = 300 # TODO: make optional input?
Vth = constants.k * T / constants.e
def waterfall(ax, y, xlabels=None):
'''
Create a waterfall plot.
Assumes the first value is the starting point,
all other values are set to negative creating a 'waterfall' downwards.
'''
y = abs(np.array(y))
y[1:] = -1 * y[1:]
x = np.arange(len(y))
y_bot = np.append(0, y[:-1].cumsum())
ax.bar(x, y, bottom=y_bot, align='center')
ax.set_ylim(ymin = y_bot[-1] + y[-1])
if xlabels is not None:
ax.set_xticks(np.arange(len(xlabels)))
ax.set_xticklabels(xlabels, rotation=40, ha='right')
return ax
class Refl(object):
def __init__(self, fname):
self.load(fname)
def process(self, f_metal=None, wlbounds=(900, 1000), wljunc=600):
'''
Performs several calculations including:
- Average Reflection (AR)
- Weighted Average Reflection (WAR)
- Light lost from front surface escape
the results are loaded into attributes
'''
# xxx need upper bound for this?
self.AR = np.trapz(self.refl / 100, x=self.wl)
self.AM15G_Jph = analysis.AM15G_resample(self.wl)
i_upper = (self.wl <= 1000)
self.WAR = (np.dot(self.refl[i_upper], self.AM15G_Jph[i_upper])
/ np.sum(self.AM15G_Jph[i_upper]))
if f_metal is None:
index = (self.wl >= 400) * i_upper
refl_min = np.amin(self.refl[index])
self.f_metal = refl_min
else:
self.f_metal = f_metal
index_l = (self.wl >= wlbounds[0])
index = (self.wl <= wlbounds[1]) * index_l
# use numpys implementation for line fitting
popt, pcov = np.polyfit(self.wl[index], self.refl[index], 1, cov=True)
self.refl_wo_escape = np.copy(self.refl)
self.refl_wo_escape[index_l] = np.polyval(popt, self.wl[index_l])
# defined as area between 100% and the given curve, to simplify calculations
Jloss = OrderedDict()
Jloss['max_limit'] = np.sum(self.AM15G_Jph)
Jloss['metal_shading'] = np.dot(self.f_metal / 100 \
* np.ones(len(self.AM15G_Jph)),
self.AM15G_Jph)
Jloss['refl_wo_escape'] = np.dot(self.refl_wo_escape / 100 \
, self.AM15G_Jph) \
- Jloss['metal_shading']
Jloss['front_escape'] = np.dot(self.refl / 100, self.AM15G_Jph) \
- Jloss['metal_shading'] \
- Jloss['refl_wo_escape']
# this makes qe Jloss calculations easier
idx_junc = analysis.find_nearest(wljunc, self.wl)
Jloss['front_escape_blue'] = np.dot(self.refl[:idx_junc] / 100,
self.AM15G_Jph[:idx_junc])
Jloss['front_escape_red'] = np.dot(self.refl[idx_junc:] / 100,
self.AM15G_Jph[idx_junc:])
self.Jloss = Jloss
def plot(self, ax):
ax.plot(self.wl, self.refl, '-o')
ax.plot(self.wl, self.refl_wo_escape, '-o')
ax.plot(self.wl, np.ones(len(self.wl)) * self.f_metal, 'r-')
ax.set_ylabel('Reflectance [%]')
ax.grid(True)
def plot_QE(self, ax):
ax.fill_between(self.wl, 100 - self.refl,
100 - self.refl_wo_escape)
ax.legend(loc='best')
# ax.set_ylabel('Reflectance [%]')
# ax.grid(True)
def load(self, raw_data_file):
'''Loads Reflectance data in attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
data_array = np.genfromtxt(raw_data_file, usecols=(0, 1), skip_header=1,
delimiter=',').transpose()
# is this needed?
if data_array[0, 0] > data_array[0, -1]:
data_array = data_array[:, ::-1]
self.wl = data_array[0, :]
self.refl = data_array[1, :]
class QE(object):
def __init__(self, fname):
self.load(fname)
def process(self, wl, refl, refl_wo_escape, Jloss, wljunc=600):
'''
Performs several calculations from QE and Reflectance data including:
- IQE
- Leff and SRV_rear
- Current loss from each region of the device
the results are saved into attributes
'''
self.IQE = self.EQE / (100 - refl)
self.output_Basore_fit, self.plot_Basore_fit = analysis.fit_Basore(
self.wl, self.IQE)
EQE_on_eta_c = self.EQE / self.output_Basore_fit['eta_c']
idx = analysis.find_nearest(750, wl)
total_min = np.minimum((100 - refl_wo_escape), EQE_on_eta_c)
self.EQE_xxx_unnamed = np.append(100 - refl_wo_escape[:idx],
total_min[idx:])
AM15G_Jph = analysis.AM15G_resample(self.wl)
Jloss_qe = Jloss.copy()
del Jloss_qe['front_escape_red']
del Jloss_qe['front_escape_blue']
idx_junc = analysis.find_nearest(wljunc, self.wl)
Jloss_qe['parasitic_absorption'] = np.dot((100 - self.EQE_xxx_unnamed[idx_junc:]) / 100,
AM15G_Jph[idx_junc:]) \
- Jloss['front_escape_red']
Jloss_qe['bulk_recomm'] = np.dot((100 - self.EQE[idx_junc:]) / 100,
AM15G_Jph[idx_junc:]) \
- Jloss['front_escape_red'] \
- Jloss_qe['parasitic_absorption']
Jloss_qe['blue_loss'] = np.dot((100 - self.EQE[:idx_junc]) / 100,
AM15G_Jph[:idx_junc]) \
- Jloss['front_escape_blue']
self.Jloss_qe = Jloss_qe
# print(Jloss_qe)
def plot_EQE(self, ax):
line_EQE = ax.plot(self.wl, self.EQE, '-o', label='EQE')
ax.set_xlabel('Wavelength [$nm$]')
ax.set_ylabel('QE [%]')
ax.legend(loc='best')
ax.grid(True)
return line_EQE # xxx currently not working
def plot_IQE(self, ax):
ax.plot(self.wl, self.IQE, '-o', label='IQE')
ax.set_xlabel('Wavelength [$nm$]')
ax.set_ylabel('QE [%]')
ax.legend(loc='best')
ax.grid(True)
def plot_Jloss(self, ax):
waterfall(ax, list(self.Jloss_qe.values()), list(self.Jloss_qe.keys()))
def load(self, raw_data_file):
'''Loads EQE data into attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
# the other columns are ignored
data_array = np.genfromtxt(raw_data_file, usecols=(0, 1),
skip_header=1, skip_footer=8)
self.wl = data_array[:, 0]
self.EQE = data_array[:, 1]
f = open(raw_data_file, 'r')
d = {}
for line in f.readlines()[-7:-1]:
d.update(dict([line.strip('\n').split(':')]))
d['Jsc'] = round(float(d['Jsc']) / 1e3, 7)
self.output = d
class IVLight(object):
def __init__(self, fname):
self.load(fname)
def process(self, Rsh, Rs):
'''
Light IV calculations
caculates the ideal fill factors:
FF0
FFs
FF
The the loss from the current
FF_Rsh
FF_Rsh
FF_other
These are all stored within two dictionaries.
Inputs:
Rsh: The shunt resistance
Rs: The series resistance
Outputs:
None
'''
self.m = analysis.ideality_factor(
self.V, -1 * (self.J - self.output['Jsc']), Vth)
ideal_FF = OrderedDict()
ideal_FF['FF_0'] = analysis.ideal_FF(self.output['Voc'])
ideal_FF['FF_s'] = analysis.ideal_FF_series(self.output['Voc'],
self.output['Jsc'],
Rs)
ideal_FF['FF_s_sh'] = analysis.ideal_FF_series_shunt(self.output['Voc'],
self.output['Jsc'],
Rs, Rsh)
self.ideal_FF = ideal_FF
FF_loss = OrderedDict()
FF_loss['FF_0'] = analysis.ideal_FF(self.output['Voc'])
FF_loss['FF_Rs'] = analysis.FF_loss_series(self.output['Voc'],
self.output['Jsc'],
self.output['Jmp'],
Rs)
FF_loss['FF_Rsh'] = analysis.FF_loss_shunt(self.output['Voc'],
self.output['Jsc'],
self.output['Vmp'],
self.output['Jmp'],
Rs, Rsh)
# for waterfall plot
FF_loss['FF_other'] = (FF_loss['FF_0'] \
- self.output['FF'] \
- FF_loss['FF_Rs'] \
- FF_loss['FF_Rsh'])
self.FF_loss = FF_loss
def plot(self, ax):
'''
Plots the current voltage curve
inputs:
ax: A figure axes to which is plotted
'''
ax.plot(self.V, self.J, '-o', label='light IV')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Current Density [$A cm^{-2}$]')
ax.grid(True)
# ax.legend(loc='best')
def plot_m(self, ax):
# trims some noise at ends of array
ax.plot(self.V[10:-5], self.m[10:-5], '-o', label='Light IV')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Ideality Factor []')
ax.grid(True)
ax.legend(loc='best')
ax.set_ylim(ymin=0)
def plot_FF1(self, ax):
waterfall(ax, list(self.FF_loss.values()), list(self.FF_loss.keys()))
def load(self, raw_data_file):
'''Loads Light IV data into attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
f = open(raw_data_file, 'r')
d = OrderedDict()
# rows which contain floats in lightIV data file header
float_rows = [2]
float_rows.extend(list(range(6, 18)))
for i, line in enumerate(f.readlines()[1:19]):
# convert to float for future calculations
if i in float_rows:
key_temp, val = line.strip('\n').split(':\t')
key = key_temp.strip()
d[key] = float(val)
else:
d.update(dict([line.strip('\n').split(':\t')]))
data_array = np.genfromtxt(raw_data_file, skip_header=20)
self.V = data_array[:, 0]
self.J = data_array[:, 1] / d['Cell Area (sqr cm)']
self.output = d
class IVSuns(object):
filepath = None
filename = None
def __init__(self, fname):
self.load(fname)
def process(self):
'''Suns Voc calculations'''
self.m = analysis.ideality_factor(self.V, self.effsuns, Vth)
def plot_IV(self, ax):
ax.plot(self.V, self.J, '-o', label='suns Voc')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Current Density [$A cm^{-2}$]')
ax.grid(True)
ax.legend(loc='best')
ax.set_ylim(ymin=0)
def plot_tau(self, ax):
# TODO: trims off some noise, use better method?
ax.loglog(self.Dn[5:-5], self.tau_eff[5:-5], '-o',
label='Suns Voc')
ax.set_xlabel('$\Delta n$ [$cm^{-3}$]')
ax.set_ylabel(r'$\tau_{eff}$ [s]')
ax.grid(True)
ax.legend(loc='best')
# ax.set_xlim(xmin=1e11)
def plot_m(self, ax):
# trims some noise at ends of array
ax.plot(self.V[10:-5], self.m[10:-5], '-o', label='suns Voc')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Ideality Factor []')
ax.grid(True)
ax.legend(loc='best')
ax.set_ylim(ymin=0)
def plot_log_IV(self, ax):
# trims some noise at ends of array
# TODO: Link this to Jsc rather than this manual index
# check for real values
index = np.isfinite(self.J)
# find the meaured Jsc
Jsc_index = abs(self.V[index]) == np.min(abs(self.V[index]))
ax.plot(self.V, -1 * (
self.J - self.J[index][Jsc_index]), '-o', label='Suns Voc')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Ideality Factor []')
ax.grid(True)
ax.legend(loc='best')
def load(self, raw_data_file, text_format=False):
'''Loads Suns Voc data in attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
if text_format:
data_array = np.genfromtxt(raw_data_file, usecols=(0, 1, 2, 3, 4),
skip_header=1)
else:
# suppress annoying warning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
wb = openpyxl.load_workbook(raw_data_file, read_only=True,
data_only=True)
ws_RawData = wb.get_sheet_by_name('RawData')
ws_User = wb.get_sheet_by_name('User')
last_cell = 'J' + str(ws_RawData.max_row)
data_array = np.array([[i.value for i in j] for j in
ws_RawData['E2':last_cell]])
# try: ??
# np.asarray(xlSheet.Range("A9:I133").Value, dtype=np.float64)
params = [i.value for i in ws_User['A5':'F5'][0]]
vals = [i.value for i in ws_User['A6':'F6'][0]]
self.params = dict(zip(params, vals))
params = [i.value for i in ws_User['A8':'L8'][0]]
# Reduce 13 significant figures in .xlsx file to 6 (default of .format())
# vals = [float('{:f}'.format(i.value)) for i in
# ws_User['A6':'F6'][0]]
vals = [float('{:e}'.format(i.value))
for i in ws_User['A9':'L9'][0]]
self.output = dict(zip(params, vals))
self.effsuns = data_array[:, 0] # Effective Suns
self.V = data_array[:, 1]
self.J = data_array[:, 2]
self.P = data_array[:, 3]
self.Dn = data_array[:, 4]
self.tau_eff = data_array[:, 5]
class IVDark(object):
def __init__(self, fname):
self.load(fname)
def process(self):
'''
This performs the Dark IV calculations for loss analysis
It currently caculates:
the idealify factor as a function of voltage
'''
# Ideality factor
self.m = analysis.ideality_factor(self.V, self.J, Vth)
# Shunt resistance, at 30mV
# TODO: do linear fit with zero intercept?
Rsh = 0.03 / analysis.find_nearest(0.03, self.V, self.J)
return Rsh
def plot_log_IV(self, ax):
ax.semilogy(self.V, self.J, '-o', label='Dark IV')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Current Density [$A cm^{-2}$]')
ax.grid(True)
ax.legend(loc='best')
def plot_m(self, ax):
ax.plot(self.V, self.m, '-o', label='dark IV')
ax.set_xlabel('Voltage [$V$]')
ax.set_ylabel('Ideality Factor []')
ax.grid(True)
ax.legend(loc='best')
def load(self, raw_data_file):
'''Loads Dark IV data in attributes'''
self.filepath = raw_data_file
self.filename = os.path.basename(raw_data_file)
f = open(raw_data_file, 'r')
d = OrderedDict()
# rows which contain floats in lightIV data file header
float_rows = [1, 6, 7, 8]
for i, line in enumerate(f.readlines()[1:10]):
# convert to float for future calculations
key, val = line.strip('\n').split(':\t')
if i in float_rows:
d[key] = float(val)
else:
d[key] = val
# d.update(dict(re.findall(r'([\s\S]+)\s*:\t([^\n]+)', line)))
# d.update(dict([line.strip('\n').split(':\t')]))
# for line in f.readlines()[1:10]:
# d.update(dict(re.findall(r'([\s\S]+)\s*:\t([^\n]+)', line)))
# d['Cell Area in sqr cm'] = float(d['Cell Area in sqr cm'])
self.output = d
data_array = np.genfromtxt(
raw_data_file, usecols=(0, 1), skip_header=11)
self.V = data_array[:, 0]
self.J = data_array[:, 1] / d['Cell Area in sqr cm']
class Cell(object):
def __init__(self, thickness=None, **kwargs):
self.thickness = thickness # [cm]
self.sample_names = {}
self.input_errors = {}
self.refl = Refl(kwargs['reflectance_fname'])
self.qe = QE(kwargs['EQE_fname'])
self.sunsVoc = IVSuns(kwargs['suns Voc_fname'])
self.div = IVDark(kwargs['dark IV_fname'])
self.liv = IVLight(kwargs['light IV_fname'])
self.example_dir = os.path.join(os.pardir, 'example_cell')
self.check_input_vals()
def check_input_vals(self):
'''
Check the input cell parameters are consistent between measurements.
Gives the error as a percentage.
'''
# sample names
self.sample_names['Light IV'] = self.liv.output['Cell Name ']
self.sample_names['Suns Voc'] = self.sunsVoc.params['Sample Name']
self.sample_names['Dark IV'] = self.div.output['Cell Name']
# Cell area
# tolerance = 1e-3
area_liv = self.liv.output['Cell Area (sqr cm)']
area_div = self.div.output['Cell Area in sqr cm']
delta = (area_div - area_liv) / area_liv
self.input_errors['Cell Area'] = delta
# thickness
self.thickness = self.sunsVoc.params['Wafer Thickness (cm)']
tck_user_input = self.thickness
tck_sunsVoc = self.sunsVoc.params['Wafer Thickness (cm)']
delta = (tck_sunsVoc - tck_user_input) / tck_user_input
self.input_errors['Cell thickness'] = delta
# Voc
Voc_liv = self.liv.output['Voc']
Voc_div = self.sunsVoc.output['Voc (V)']
delta = (Voc_div - Voc_liv) / Voc_liv
self.input_errors['Voc'] = delta
# Jsc
Jsc_liv = self.liv.output['Jsc']
Jsc_iqe = self.qe.output['Jsc']
delta = (Jsc_iqe - Jsc_liv) / Jsc_liv
self.input_errors['Jsc'] = delta
# some checks on the data
assert abs(self.input_errors['Cell Area']
) < 0.01, "Provided sample area's disagrees: {0:.1f} cm^2 {1:.1f} cm^2".format(area_liv, area_div)
assert abs(self.input_errors['Cell thickness']
) < 0.01, "Provided sample thickness disagrees: {0:.4f} cm {1:.4f} cm".format(tck_user_input, tck_sunsVoc)
assert abs(self.input_errors['Voc']
) < 0.01, "Provided Voc disagree: {0:.0f} mV {1:.0f} mV".format(Voc_liv * 1000, Voc_div * 1000)
assert abs(self.input_errors['Jsc']
) < 0.1, "Provided Jsc disagree: {0:.0f} mA {1:.0f} mA".format(Jsc_liv * 1000, Jsc_iqe * 1000)
def collect_outputs(self):
'''Collects input and output parameters into self.output_list'''
output_list = []
def quick_print(key, val):
output_list.append('{:>30}, {:<20}'.format(key, val))
output_list.append('\n')
quick_print('##### Inputs check: Percentage difference', '',)
for key, val in self.sample_names.items():
quick_print(key, val)
for key, val in self.input_errors.items():
quick_print(key, '{:.3e}%'.format(val * 100))
output_list.append('\n')
quick_print('##### Reflectance', '')
quick_print('filename', self.refl.filename)
output_list.append('\n')
quick_print('##### QE', '')
quick_print('filename', self.qe.filename)
for key, val in self.qe.output.items():
quick_print(key, val)
quick_print('Basore fit Leff', '{:.3e}'.format(
self.qe.output_Basore_fit['Leff']))
quick_print('Basore fit eta_c', '{:.3f}'.format(
self.qe.output_Basore_fit['eta_c']))
output_list.append('\n')
quick_print('##### Light IV', '')
quick_print('filename', self.liv.filename)
for key, val in self.liv.output.items():
quick_print(key, val)
output_list.append('\n')
quick_print('##### Suns Voc', '')
quick_print('filename', self.sunsVoc.filename)
for key, val in self.sunsVoc.params.items():
quick_print(key, val)
for key, val in self.sunsVoc.output.items():
quick_print(key, val)
output_list.append('\n')
quick_print('##### Dark IV', '')
quick_print('filename', self.div.filename)
for key, val in self.div.output.items():
quick_print(key, val)
output_list.append('\n')
quick_print('##### Calclated', '')
quick_print('### Reflectance', '')
quick_print('AR', '{:.3f}'.format(self.refl.AR))
quick_print('WAR', '{:.3f}'.format(self.refl.WAR))
quick_print('f_metal', '{:.3f}'.format(self.refl.f_metal))
quick_print('### Parasitic resistances', '')
quick_print('Rsh (Ohm cm2)', '{:.3e}'.format(self.Rsh))
quick_print('Rs1 (Ohm cm2)', '{:.3e}'.format(self.Rs_1))
quick_print('Rs2 (Ohm cm2)', '{:.3e}'.format(self.Rs_2))
quick_print('### Current losses', '')
for key, val in self.qe.Jloss_qe.items():
quick_print(key + ' (mA)', '{:.3f}'.format(val))
quick_print('### Fill Factor', '')
for key, val in self.liv.ideal_FF.items():
quick_print(key, '{:.3f}'.format(val))
self.output_list = output_list
def print_output_to_file(self):
filename = self.cell_name + '_loss_analysis_summary.csv'
output_file = open(os.path.join(self.output_dir, filename), 'w')
for item in self.output_list:
output_file.write(item + '\r\n')
output_file.close()
def plot_all(self, save_fig_bool):
'''Plot the output of previous calculations'''
# for reflectance
fig_QE = plt.figure('QE', figsize=(30 / 2.54, 15 / 2.54))
fig_QE.clf()
ax_refl = fig_QE.add_subplot(2, 2, 1)
ax_QE = fig_QE.add_subplot(2, 2, 2)
ax_QE_fit = fig_QE.add_subplot(2, 2, 3)
ax_QE_layered = fig_QE.add_subplot(2, 2, 4)
self.refl.plot(ax_refl)
self.refl.plot(ax_QE)
self.qe.plot_EQE(ax_QE)
self.qe.plot_IQE(ax_QE)
# for light and dark IV
fig_IV = plt.figure('IV', figsize=(30 / 2.54, 15 / 2.54))
fig_IV.clf()
# get the plotting axes
ax_logIV = fig_IV.add_subplot(2, 2, 1)
ax_ideality = fig_IV.add_subplot(2, 2, 3)
ax_lightIV = fig_IV.add_subplot(2, 2, 2)
ax_tau = fig_IV.add_subplot(2, 2, 4)
# plot light IV first, as is typically the noisest
self.liv.plot_m(ax_ideality)
self.liv.plot(ax_lightIV)
# plot suns Voc
self.sunsVoc.plot_m(ax_ideality)
self.sunsVoc.plot_IV(ax_lightIV)
self.sunsVoc.plot_tau(ax_tau)
self.sunsVoc.plot_log_IV(ax_logIV)
# plot dark IV as least noisest
self.div.plot_log_IV(ax_logIV)
self.div.plot_m(ax_ideality)
# plot the EQE fitted data
self.qe.plot_Basore_fit(ax_QE_fit)
# this is doing some loss analysis filling
dummy_ones = np.ones(len(self.refl.wl))
ax_QE_layered.fill_between(self.refl.wl, dummy_ones * 100,
100 - dummy_ones * self.refl.f_metal, color='blue')
ax_QE_layered.fill_between(self.refl.wl,
100 - dummy_ones * self.refl.f_metal,
100 - self.refl.refl_wo_escape, color='green')
ax_QE_layered.fill_between(self.refl.wl, 100 - self.refl.refl_wo_escape,
100 - self.refl.refl, color='red')
ax_QE_layered.fill_between(self.refl.wl, 100 - self.refl.refl,
self.qe.EQE_xxx_unnamed, color='cyan')
# ax_QE_layered.plot(self.refl.wl, self.qe.EQE_xxx_unnamed)
ax_QE_layered.fill_between(self.refl.wl, self.qe.EQE_xxx_unnamed,
self.qe.EQE, color='magenta')
# line_EQE, = self.qe.plot_EQE(ax_QE_layered)
# line_EQE.set_marker('x')
# self.refl.plot_QE(ax_QE_layered)
# for loss analysis summary
fig_LA = plt.figure('LA', figsize=(30 / 2.54, 15 / 2.54))
fig_LA.clf()
ax_FF = fig_LA.add_subplot(2, 2, 1)
ax_Jloss = fig_LA.add_subplot(2, 2, 2)
self.liv.plot_FF1(ax_FF)
self.qe.plot_Jloss(ax_Jloss)
fig_QE.set_tight_layout(True)
fig_IV.set_tight_layout(True)
if save_fig_bool:
fig_QE.savefig(os.path.join(self.output_dir,
self.cell_name + '_QE.png'))
fig_IV.savefig(os.path.join(self.output_dir,
self.cell_name + '_IV.png'))
plt.show()
def process_all(self, save_fig_bool, output_dir, cell_name):
'''
A function that calls all the processing functions.
'''
if cell_name == '':
self.cell_name = self.liv.output['Cell Name ']
else:
self.cell_name = cell_name
self.output_dir = output_dir
self.sunsVoc.process()
self.refl.process()
self.qe.process(self.refl.wl, self.refl.refl, self.refl.refl_wo_escape,
self.refl.Jloss)
self.Rsh = self.div.process()
self.Rs_1 = analysis.Rs_calc_1(self.liv.output['Vmp'],
self.liv.output['Jmp'],
self.sunsVoc.V, self.sunsVoc.J)
self.Rs_2 = analysis.Rs_calc_2(self.liv.output['Voc'],
self.liv.output['Jsc'],
self.liv.output['FF'],
self.sunsVoc.output['PFF'])
self.liv.process(self.Rsh, self.Rs_1)
self.collect_outputs()
self.print_output_to_file()
self.plot_all(save_fig_bool)
if __name__ == "__main__":
example_dir = os.path.join(os.pardir, 'example_cell')
files = {
'reflectance_fname': os.path.join(example_dir, 'example_reflectance.csv'),
'EQE_fname': os.path.join(example_dir, 'example_EQE.txt'),
'light IV_fname': os.path.join(example_dir, 'example_lightIV.lgt'),
'suns Voc_fname': os.path.join(example_dir, 'example_sunsVoc.xlsm'),
'dark IV_fname': os.path.join(example_dir, 'example_darkIV.drk')}
cell1 = Cell(**files)
cell1.process_all()
| [
"numpy.polyfit",
"analysis.Rs_calc_2",
"numpy.array",
"numpy.isfinite",
"numpy.genfromtxt",
"analysis.find_nearest",
"analysis.ideal_FF",
"analysis.ideal_FF_series_shunt",
"numpy.dot",
"numpy.polyval",
"analysis.FF_loss_series",
"warnings.simplefilter",
"analysis.ideality_factor",
"collect... | [((27015, 27054), 'os.path.join', 'os.path.join', (['os.pardir', '"""example_cell"""'], {}), "(os.pardir, 'example_cell')\n", (27027, 27054), False, 'import os\n'), ((571, 582), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (579, 582), True, 'import numpy as np\n'), ((1371, 1407), 'numpy.trapz', 'np.trapz', (['(self.refl / 100)'], {'x': 'self.wl'}), '(self.refl / 100, x=self.wl)\n', (1379, 1407), True, 'import numpy as np\n'), ((1434, 1466), 'analysis.AM15G_resample', 'analysis.AM15G_resample', (['self.wl'], {}), '(self.wl)\n', (1457, 1466), False, 'import analysis\n'), ((2010, 2067), 'numpy.polyfit', 'np.polyfit', (['self.wl[index]', 'self.refl[index]', '(1)'], {'cov': '(True)'}), '(self.wl[index], self.refl[index], 1, cov=True)\n', (2020, 2067), True, 'import numpy as np\n'), ((2099, 2117), 'numpy.copy', 'np.copy', (['self.refl'], {}), '(self.refl)\n', (2106, 2117), True, 'import numpy as np\n'), ((2157, 2191), 'numpy.polyval', 'np.polyval', (['popt', 'self.wl[index_l]'], {}), '(popt, self.wl[index_l])\n', (2167, 2191), True, 'import numpy as np\n'), ((2294, 2307), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2305, 2307), False, 'from collections import OrderedDict\n'), ((2337, 2359), 'numpy.sum', 'np.sum', (['self.AM15G_Jph'], {}), '(self.AM15G_Jph)\n', (2343, 2359), True, 'import numpy as np\n'), ((3013, 3051), 'analysis.find_nearest', 'analysis.find_nearest', (['wljunc', 'self.wl'], {}), '(wljunc, self.wl)\n', (3034, 3051), False, 'import analysis\n'), ((3089, 3150), 'numpy.dot', 'np.dot', (['(self.refl[:idx_junc] / 100)', 'self.AM15G_Jph[:idx_junc]'], {}), '(self.refl[:idx_junc] / 100, self.AM15G_Jph[:idx_junc])\n', (3095, 3150), True, 'import numpy as np\n'), ((3231, 3292), 'numpy.dot', 'np.dot', (['(self.refl[idx_junc:] / 100)', 'self.AM15G_Jph[idx_junc:]'], {}), '(self.refl[idx_junc:] / 100, self.AM15G_Jph[idx_junc:])\n', (3237, 3292), True, 'import numpy as np\n'), ((3990, 4021), 'os.path.basename', 'os.path.basename', (['raw_data_file'], {}), '(raw_data_file)\n', (4006, 4021), False, 'import os\n'), ((4849, 4887), 'analysis.fit_Basore', 'analysis.fit_Basore', (['self.wl', 'self.IQE'], {}), '(self.wl, self.IQE)\n', (4868, 4887), False, 'import analysis\n'), ((4982, 5012), 'analysis.find_nearest', 'analysis.find_nearest', (['(750)', 'wl'], {}), '(750, wl)\n', (5003, 5012), False, 'import analysis\n'), ((5033, 5079), 'numpy.minimum', 'np.minimum', (['(100 - refl_wo_escape)', 'EQE_on_eta_c'], {}), '(100 - refl_wo_escape, EQE_on_eta_c)\n', (5043, 5079), True, 'import numpy as np\n'), ((5113, 5167), 'numpy.append', 'np.append', (['(100 - refl_wo_escape[:idx])', 'total_min[idx:]'], {}), '(100 - refl_wo_escape[:idx], total_min[idx:])\n', (5122, 5167), True, 'import numpy as np\n'), ((5230, 5262), 'analysis.AM15G_resample', 'analysis.AM15G_resample', (['self.wl'], {}), '(self.wl)\n', (5253, 5262), False, 'import analysis\n'), ((5397, 5435), 'analysis.find_nearest', 'analysis.find_nearest', (['wljunc', 'self.wl'], {}), '(wljunc, self.wl)\n', (5418, 5435), False, 'import analysis\n'), ((6952, 6983), 'os.path.basename', 'os.path.basename', (['raw_data_file'], {}), '(raw_data_file)\n', (6968, 6983), False, 'import os\n'), ((7046, 7120), 'numpy.genfromtxt', 'np.genfromtxt', (['raw_data_file'], {'usecols': '(0, 1)', 'skip_header': '(1)', 'skip_footer': '(8)'}), '(raw_data_file, usecols=(0, 1), skip_header=1, skip_footer=8)\n', (7059, 7120), True, 'import numpy as np\n'), ((7991, 8064), 'analysis.ideality_factor', 'analysis.ideality_factor', (['self.V', "(-1 * (self.J - self.output['Jsc']))", 'Vth'], {}), "(self.V, -1 * (self.J - self.output['Jsc']), Vth)\n", (8015, 8064), False, 'import analysis\n'), ((8098, 8111), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8109, 8111), False, 'from collections import OrderedDict\n'), ((8139, 8176), 'analysis.ideal_FF', 'analysis.ideal_FF', (["self.output['Voc']"], {}), "(self.output['Voc'])\n", (8156, 8176), False, 'import analysis\n'), ((8204, 8272), 'analysis.ideal_FF_series', 'analysis.ideal_FF_series', (["self.output['Voc']", "self.output['Jsc']", 'Rs'], {}), "(self.output['Voc'], self.output['Jsc'], Rs)\n", (8228, 8272), False, 'import analysis\n'), ((8407, 8486), 'analysis.ideal_FF_series_shunt', 'analysis.ideal_FF_series_shunt', (["self.output['Voc']", "self.output['Jsc']", 'Rs', 'Rsh'], {}), "(self.output['Voc'], self.output['Jsc'], Rs, Rsh)\n", (8437, 8486), False, 'import analysis\n'), ((8661, 8674), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8672, 8674), False, 'from collections import OrderedDict\n'), ((8701, 8738), 'analysis.ideal_FF', 'analysis.ideal_FF', (["self.output['Voc']"], {}), "(self.output['Voc'])\n", (8718, 8738), False, 'import analysis\n'), ((8766, 8858), 'analysis.FF_loss_series', 'analysis.FF_loss_series', (["self.output['Voc']", "self.output['Jsc']", "self.output['Jmp']", 'Rs'], {}), "(self.output['Voc'], self.output['Jsc'], self.output\n ['Jmp'], Rs)\n", (8789, 8858), False, 'import analysis\n'), ((9050, 9166), 'analysis.FF_loss_shunt', 'analysis.FF_loss_shunt', (["self.output['Voc']", "self.output['Jsc']", "self.output['Vmp']", "self.output['Jmp']", 'Rs', 'Rsh'], {}), "(self.output['Voc'], self.output['Jsc'], self.output[\n 'Vmp'], self.output['Jmp'], Rs, Rsh)\n", (9072, 9166), False, 'import analysis\n'), ((10594, 10625), 'os.path.basename', 'os.path.basename', (['raw_data_file'], {}), '(raw_data_file)\n', (10610, 10625), False, 'import os\n'), ((10676, 10689), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10687, 10689), False, 'from collections import OrderedDict\n'), ((11208, 11252), 'numpy.genfromtxt', 'np.genfromtxt', (['raw_data_file'], {'skip_header': '(20)'}), '(raw_data_file, skip_header=20)\n', (11221, 11252), True, 'import numpy as np\n'), ((11571, 11622), 'analysis.ideality_factor', 'analysis.ideality_factor', (['self.V', 'self.effsuns', 'Vth'], {}), '(self.V, self.effsuns, Vth)\n', (11595, 11622), False, 'import analysis\n'), ((12730, 12749), 'numpy.isfinite', 'np.isfinite', (['self.J'], {}), '(self.J)\n', (12741, 12749), True, 'import numpy as np\n'), ((13255, 13286), 'os.path.basename', 'os.path.basename', (['raw_data_file'], {}), '(raw_data_file)\n', (13271, 13286), False, 'import os\n'), ((15253, 15298), 'analysis.ideality_factor', 'analysis.ideality_factor', (['self.V', 'self.J', 'Vth'], {}), '(self.V, self.J, Vth)\n', (15277, 15298), False, 'import analysis\n'), ((16071, 16102), 'os.path.basename', 'os.path.basename', (['raw_data_file'], {}), '(raw_data_file)\n', (16087, 16102), False, 'import os\n'), ((16153, 16166), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16164, 16166), False, 'from collections import OrderedDict\n'), ((16923, 16983), 'numpy.genfromtxt', 'np.genfromtxt', (['raw_data_file'], {'usecols': '(0, 1)', 'skip_header': '(11)'}), '(raw_data_file, usecols=(0, 1), skip_header=11)\n', (16936, 16983), True, 'import numpy as np\n'), ((17554, 17593), 'os.path.join', 'os.path.join', (['os.pardir', '"""example_cell"""'], {}), "(os.pardir, 'example_cell')\n", (17566, 17593), False, 'import os\n'), ((22808, 22856), 'matplotlib.pyplot.figure', 'plt.figure', (['"""QE"""'], {'figsize': '(30 / 2.54, 15 / 2.54)'}), "('QE', figsize=(30 / 2.54, 15 / 2.54))\n", (22818, 22856), True, 'import matplotlib.pyplot as plt\n'), ((23246, 23294), 'matplotlib.pyplot.figure', 'plt.figure', (['"""IV"""'], {'figsize': '(30 / 2.54, 15 / 2.54)'}), "('IV', figsize=(30 / 2.54, 15 / 2.54))\n", (23256, 23294), True, 'import matplotlib.pyplot as plt\n'), ((25203, 25251), 'matplotlib.pyplot.figure', 'plt.figure', (['"""LA"""'], {'figsize': '(30 / 2.54, 15 / 2.54)'}), "('LA', figsize=(30 / 2.54, 15 / 2.54))\n", (25213, 25251), True, 'import matplotlib.pyplot as plt\n'), ((25802, 25812), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25810, 25812), True, 'import matplotlib.pyplot as plt\n'), ((26382, 26485), 'analysis.Rs_calc_1', 'analysis.Rs_calc_1', (["self.liv.output['Vmp']", "self.liv.output['Jmp']", 'self.sunsVoc.V', 'self.sunsVoc.J'], {}), "(self.liv.output['Vmp'], self.liv.output['Jmp'], self.\n sunsVoc.V, self.sunsVoc.J)\n", (26400, 26485), False, 'import analysis\n'), ((26580, 26702), 'analysis.Rs_calc_2', 'analysis.Rs_calc_2', (["self.liv.output['Voc']", "self.liv.output['Jsc']", "self.liv.output['FF']", "self.sunsVoc.output['PFF']"], {}), "(self.liv.output['Voc'], self.liv.output['Jsc'], self.liv\n .output['FF'], self.sunsVoc.output['PFF'])\n", (26598, 26702), False, 'import analysis\n'), ((27098, 27150), 'os.path.join', 'os.path.join', (['example_dir', '"""example_reflectance.csv"""'], {}), "(example_dir, 'example_reflectance.csv')\n", (27110, 27150), False, 'import os\n'), ((27173, 27217), 'os.path.join', 'os.path.join', (['example_dir', '"""example_EQE.txt"""'], {}), "(example_dir, 'example_EQE.txt')\n", (27185, 27217), False, 'import os\n'), ((27245, 27293), 'os.path.join', 'os.path.join', (['example_dir', '"""example_lightIV.lgt"""'], {}), "(example_dir, 'example_lightIV.lgt')\n", (27257, 27293), False, 'import os\n'), ((27321, 27370), 'os.path.join', 'os.path.join', (['example_dir', '"""example_sunsVoc.xlsm"""'], {}), "(example_dir, 'example_sunsVoc.xlsm')\n", (27333, 27370), False, 'import os\n'), ((27397, 27444), 'os.path.join', 'os.path.join', (['example_dir', '"""example_darkIV.drk"""'], {}), "(example_dir, 'example_darkIV.drk')\n", (27409, 27444), False, 'import os\n'), ((1523, 1574), 'numpy.dot', 'np.dot', (['self.refl[i_upper]', 'self.AM15G_Jph[i_upper]'], {}), '(self.refl[i_upper], self.AM15G_Jph[i_upper])\n', (1529, 1574), True, 'import numpy as np\n'), ((1597, 1628), 'numpy.sum', 'np.sum', (['self.AM15G_Jph[i_upper]'], {}), '(self.AM15G_Jph[i_upper])\n', (1603, 1628), True, 'import numpy as np\n'), ((1729, 1754), 'numpy.amin', 'np.amin', (['self.refl[index]'], {}), '(self.refl[index])\n', (1736, 1754), True, 'import numpy as np\n'), ((2583, 2632), 'numpy.dot', 'np.dot', (['(self.refl_wo_escape / 100)', 'self.AM15G_Jph'], {}), '(self.refl_wo_escape / 100, self.AM15G_Jph)\n', (2589, 2632), True, 'import numpy as np\n'), ((5479, 5554), 'numpy.dot', 'np.dot', (['((100 - self.EQE_xxx_unnamed[idx_junc:]) / 100)', 'AM15G_Jph[idx_junc:]'], {}), '((100 - self.EQE_xxx_unnamed[idx_junc:]) / 100, AM15G_Jph[idx_junc:])\n', (5485, 5554), True, 'import numpy as np\n'), ((5977, 6040), 'numpy.dot', 'np.dot', (['((100 - self.EQE[:idx_junc]) / 100)', 'AM15G_Jph[:idx_junc]'], {}), '((100 - self.EQE[:idx_junc]) / 100, AM15G_Jph[:idx_junc])\n', (5983, 6040), True, 'import numpy as np\n'), ((13337, 13405), 'numpy.genfromtxt', 'np.genfromtxt', (['raw_data_file'], {'usecols': '(0, 1, 2, 3, 4)', 'skip_header': '(1)'}), '(raw_data_file, usecols=(0, 1, 2, 3, 4), skip_header=1)\n', (13350, 13405), True, 'import numpy as np\n'), ((13914, 13982), 'numpy.array', 'np.array', (["[[i.value for i in j] for j in ws_RawData['E2':last_cell]]"], {}), "([[i.value for i in j] for j in ws_RawData['E2':last_cell]])\n", (13922, 13982), True, 'import numpy as np\n'), ((15408, 15451), 'analysis.find_nearest', 'analysis.find_nearest', (['(0.03)', 'self.V', 'self.J'], {}), '(0.03, self.V, self.J)\n', (15429, 15451), False, 'import analysis\n'), ((22510, 22549), 'os.path.join', 'os.path.join', (['self.output_dir', 'filename'], {}), '(self.output_dir, filename)\n', (22522, 22549), False, 'import os\n'), ((2777, 2816), 'numpy.dot', 'np.dot', (['(self.refl / 100)', 'self.AM15G_Jph'], {}), '(self.refl / 100, self.AM15G_Jph)\n', (2783, 2816), True, 'import numpy as np\n'), ((4044, 4118), 'numpy.genfromtxt', 'np.genfromtxt', (['raw_data_file'], {'usecols': '(0, 1)', 'skip_header': '(1)', 'delimiter': '""","""'}), "(raw_data_file, usecols=(0, 1), skip_header=1, delimiter=',')\n", (4057, 4118), True, 'import numpy as np\n'), ((5705, 5768), 'numpy.dot', 'np.dot', (['((100 - self.EQE[idx_junc:]) / 100)', 'AM15G_Jph[idx_junc:]'], {}), '((100 - self.EQE[idx_junc:]) / 100, AM15G_Jph[idx_junc:])\n', (5711, 5768), True, 'import numpy as np\n'), ((13516, 13541), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (13539, 13541), False, 'import warnings\n'), ((13559, 13590), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (13580, 13590), False, 'import warnings\n'), ((13612, 13681), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['raw_data_file'], {'read_only': '(True)', 'data_only': '(True)'}), '(raw_data_file, read_only=True, data_only=True)\n', (13634, 13681), False, 'import openpyxl\n'), ((25568, 25625), 'os.path.join', 'os.path.join', (['self.output_dir', "(self.cell_name + '_QE.png')"], {}), "(self.output_dir, self.cell_name + '_QE.png')\n", (25580, 25625), False, 'import os\n'), ((25694, 25751), 'os.path.join', 'os.path.join', (['self.output_dir', "(self.cell_name + '_IV.png')"], {}), "(self.output_dir, self.cell_name + '_IV.png')\n", (25706, 25751), False, 'import os\n')] |
import os
import numpy as np
import pkg_resources
from sklearn.pipeline import make_pipeline
import bob.io.base
import bob.io.image
from bob.pipelines.sample_loaders import AnnotationsLoader, CSVToSampleLoader
def test_sample_loader():
path = pkg_resources.resource_filename(
__name__, os.path.join("data", "samples")
)
sample_loader = CSVToSampleLoader(
data_loader=bob.io.base.load,
dataset_original_directory=path,
extension=".pgm",
)
f = open(os.path.join(path, "samples.csv"))
samples = sample_loader.transform(f)
assert len(samples) == 2
assert np.alltrue([s.data.shape == (112, 92) for s in samples])
def test_annotations_loader():
path = pkg_resources.resource_filename(
__name__, os.path.join("data", "samples")
)
csv_sample_loader = CSVToSampleLoader(
data_loader=bob.io.base.load,
dataset_original_directory=path,
extension=".pgm",
)
annotation_loader = AnnotationsLoader(
annotation_directory=path,
annotation_extension=".pos",
annotation_type="eyecenter",
)
sample_loader = make_pipeline(csv_sample_loader, annotation_loader)
f = open(os.path.join(path, "samples.csv"))
samples = sample_loader.transform(f)
assert len(samples) == 2
assert np.alltrue([s.data.shape == (112, 92) for s in samples])
assert np.alltrue([isinstance(s.annotations, dict) for s in samples])
| [
"bob.pipelines.sample_loaders.CSVToSampleLoader",
"numpy.alltrue",
"os.path.join",
"sklearn.pipeline.make_pipeline",
"bob.pipelines.sample_loaders.AnnotationsLoader"
] | [((363, 466), 'bob.pipelines.sample_loaders.CSVToSampleLoader', 'CSVToSampleLoader', ([], {'data_loader': 'bob.io.base.load', 'dataset_original_directory': 'path', 'extension': '""".pgm"""'}), "(data_loader=bob.io.base.load, dataset_original_directory=\n path, extension='.pgm')\n", (380, 466), False, 'from bob.pipelines.sample_loaders import AnnotationsLoader, CSVToSampleLoader\n'), ((624, 682), 'numpy.alltrue', 'np.alltrue', (['[(s.data.shape == (112, 92)) for s in samples]'], {}), '([(s.data.shape == (112, 92)) for s in samples])\n', (634, 682), True, 'import numpy as np\n'), ((839, 942), 'bob.pipelines.sample_loaders.CSVToSampleLoader', 'CSVToSampleLoader', ([], {'data_loader': 'bob.io.base.load', 'dataset_original_directory': 'path', 'extension': '""".pgm"""'}), "(data_loader=bob.io.base.load, dataset_original_directory=\n path, extension='.pgm')\n", (856, 942), False, 'from bob.pipelines.sample_loaders import AnnotationsLoader, CSVToSampleLoader\n'), ((993, 1099), 'bob.pipelines.sample_loaders.AnnotationsLoader', 'AnnotationsLoader', ([], {'annotation_directory': 'path', 'annotation_extension': '""".pos"""', 'annotation_type': '"""eyecenter"""'}), "(annotation_directory=path, annotation_extension='.pos',\n annotation_type='eyecenter')\n", (1010, 1099), False, 'from bob.pipelines.sample_loaders import AnnotationsLoader, CSVToSampleLoader\n'), ((1148, 1199), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['csv_sample_loader', 'annotation_loader'], {}), '(csv_sample_loader, annotation_loader)\n', (1161, 1199), False, 'from sklearn.pipeline import make_pipeline\n'), ((1331, 1389), 'numpy.alltrue', 'np.alltrue', (['[(s.data.shape == (112, 92)) for s in samples]'], {}), '([(s.data.shape == (112, 92)) for s in samples])\n', (1341, 1389), True, 'import numpy as np\n'), ((304, 335), 'os.path.join', 'os.path.join', (['"""data"""', '"""samples"""'], {}), "('data', 'samples')\n", (316, 335), False, 'import os\n'), ((507, 540), 'os.path.join', 'os.path.join', (['path', '"""samples.csv"""'], {}), "(path, 'samples.csv')\n", (519, 540), False, 'import os\n'), ((776, 807), 'os.path.join', 'os.path.join', (['"""data"""', '"""samples"""'], {}), "('data', 'samples')\n", (788, 807), False, 'import os\n'), ((1214, 1247), 'os.path.join', 'os.path.join', (['path', '"""samples.csv"""'], {}), "(path, 'samples.csv')\n", (1226, 1247), False, 'import os\n')] |
import contextlib
import math
from collections import defaultdict
from time import perf_counter
from warnings import filterwarnings
import numpy
import dask
from dask.base import tokenize
from dask.dataframe.core import new_dd_object
from dask.distributed import Client, performance_report, wait
from dask.utils import format_bytes, format_time, parse_bytes
from dask_cuda.benchmarks.utils import (
get_cluster_options,
get_scheduler_workers,
parse_benchmark_args,
plot_benchmark,
setup_memory_pool,
)
from dask_cuda.utils import all_to_all
# Benchmarking cuDF merge operation based on
# <https://gist.github.com/rjzamora/0ffc35c19b5180ab04bbf7c793c45955>
def generate_chunk(i_chunk, local_size, num_chunks, chunk_type, frac_match, gpu):
# Setting a seed that triggers max amount of comm in the two-GPU case.
if gpu:
import cupy as xp
import cudf as xdf
else:
import numpy as xp
import pandas as xdf
xp.random.seed(2 ** 32 - 1)
chunk_type = chunk_type or "build"
frac_match = frac_match or 1.0
if chunk_type == "build":
# Build dataframe
#
# "key" column is a unique sample within [0, local_size * num_chunks)
#
# "shuffle" column is a random selection of partitions (used for shuffle)
#
# "payload" column is a random permutation of the chunk_size
start = local_size * i_chunk
stop = start + local_size
parts_array = xp.arange(num_chunks, dtype="int64")
suffle_array = xp.repeat(parts_array, math.ceil(local_size / num_chunks))
df = xdf.DataFrame(
{
"key": xp.arange(start, stop=stop, dtype="int64"),
"shuffle": xp.random.permutation(suffle_array)[:local_size],
"payload": xp.random.permutation(xp.arange(local_size, dtype="int64")),
}
)
else:
# Other dataframe
#
# "key" column matches values from the build dataframe
# for a fraction (`frac_match`) of the entries. The matching
# entries are perfectly balanced across each partition of the
# "base" dataframe.
#
# "payload" column is a random permutation of the chunk_size
# Step 1. Choose values that DO match
sub_local_size = local_size // num_chunks
sub_local_size_use = max(int(sub_local_size * frac_match), 1)
arrays = []
for i in range(num_chunks):
bgn = (local_size * i) + (sub_local_size * i_chunk)
end = bgn + sub_local_size
ar = xp.arange(bgn, stop=end, dtype="int64")
arrays.append(xp.random.permutation(ar)[:sub_local_size_use])
key_array_match = xp.concatenate(tuple(arrays), axis=0)
# Step 2. Add values that DON'T match
missing_size = local_size - key_array_match.shape[0]
start = local_size * num_chunks + local_size * i_chunk
stop = start + missing_size
key_array_no_match = xp.arange(start, stop=stop, dtype="int64")
# Step 3. Combine and create the final dataframe chunk (dask_cudf partition)
key_array_combine = xp.concatenate(
(key_array_match, key_array_no_match), axis=0
)
df = xdf.DataFrame(
{
"key": xp.random.permutation(key_array_combine),
"payload": xp.random.permutation(xp.arange(local_size, dtype="int64")),
}
)
return df
def get_random_ddf(chunk_size, num_chunks, frac_match, chunk_type, args):
parts = [chunk_size for i in range(num_chunks)]
device_type = True if args.type == "gpu" else False
meta = generate_chunk(0, 4, 1, chunk_type, None, device_type)
divisions = [None] * (len(parts) + 1)
name = "generate-data-" + tokenize(chunk_size, num_chunks, frac_match, chunk_type)
graph = {
(name, i): (
generate_chunk,
i,
part,
len(parts),
chunk_type,
frac_match,
device_type,
)
for i, part in enumerate(parts)
}
ddf = new_dd_object(graph, name, meta, divisions)
if chunk_type == "build":
if not args.no_shuffle:
divisions = [i for i in range(num_chunks)] + [num_chunks]
return ddf.set_index("shuffle", divisions=tuple(divisions))
else:
del ddf["shuffle"]
return ddf
def merge(args, ddf1, ddf2):
# Allow default broadcast behavior, unless
# "--shuffle-join" or "--broadcast-join" was
# specified (with "--shuffle-join" taking
# precedence)
broadcast = False if args.shuffle_join else (True if args.broadcast_join else None)
# The merge/join operation
ddf_join = ddf1.merge(ddf2, on=["key"], how="inner", broadcast=broadcast)
if args.set_index:
ddf_join = ddf_join.set_index("key")
wait(ddf_join.persist())
def run(client, args, n_workers, write_profile=None):
# Generate random Dask dataframes
ddf_base = get_random_ddf(
args.chunk_size, args.base_chunks, args.frac_match, "build", args
).persist()
ddf_other = get_random_ddf(
args.chunk_size, args.other_chunks, args.frac_match, "other", args
).persist()
wait(ddf_base)
wait(ddf_other)
assert len(ddf_base.dtypes) == 2
assert len(ddf_other.dtypes) == 2
data_processed = len(ddf_base) * sum([t.itemsize for t in ddf_base.dtypes])
data_processed += len(ddf_other) * sum([t.itemsize for t in ddf_other.dtypes])
# Get contexts to use (defaults to null contexts that doesn't do anything)
ctx1, ctx2 = contextlib.nullcontext(), contextlib.nullcontext()
if args.backend == "explicit-comms":
ctx1 = dask.config.set(explicit_comms=True)
if write_profile is not None:
ctx2 = performance_report(filename=args.profile)
with ctx1:
with ctx2:
t1 = perf_counter()
merge(args, ddf_base, ddf_other)
t2 = perf_counter()
return (data_processed, t2 - t1)
def main(args):
cluster_options = get_cluster_options(args)
Cluster = cluster_options["class"]
cluster_args = cluster_options["args"]
cluster_kwargs = cluster_options["kwargs"]
scheduler_addr = cluster_options["scheduler_addr"]
if args.sched_addr:
client = Client(args.sched_addr)
else:
filterwarnings(
"ignore", message=".*NVLink.*rmm_pool_size.*", category=UserWarning
)
cluster = Cluster(*cluster_args, **cluster_kwargs)
if args.multi_node:
import time
# Allow some time for workers to start and connect to scheduler
# TODO: make this a command-line argument?
time.sleep(15)
client = Client(scheduler_addr if args.multi_node else cluster)
if args.type == "gpu":
client.run(
setup_memory_pool,
pool_size=args.rmm_pool_size,
disable_pool=args.disable_rmm_pool,
log_directory=args.rmm_log_directory,
)
# Create an RMM pool on the scheduler due to occasional deserialization
# of CUDA objects. May cause issues with InfiniBand otherwise.
client.run_on_scheduler(
setup_memory_pool,
pool_size=1e9,
disable_pool=args.disable_rmm_pool,
log_directory=args.rmm_log_directory,
)
scheduler_workers = client.run_on_scheduler(get_scheduler_workers)
n_workers = len(scheduler_workers)
client.wait_for_workers(n_workers)
# Allow the number of chunks to vary between
# the "base" and "other" DataFrames
args.base_chunks = args.base_chunks or n_workers
args.other_chunks = args.other_chunks or n_workers
if args.all_to_all:
all_to_all(client)
took_list = []
for _ in range(args.runs - 1):
took_list.append(run(client, args, n_workers, write_profile=None))
took_list.append(
run(client, args, n_workers, write_profile=args.profile)
) # Only profiling the last run
# Collect, aggregate, and print peer-to-peer bandwidths
incoming_logs = client.run(lambda dask_worker: dask_worker.incoming_transfer_log)
bandwidths = defaultdict(list)
total_nbytes = defaultdict(list)
for k, L in incoming_logs.items():
for d in L:
if d["total"] >= args.ignore_size:
bandwidths[k, d["who"]].append(d["bandwidth"])
total_nbytes[k, d["who"]].append(d["total"])
bandwidths = {
(scheduler_workers[w1].name, scheduler_workers[w2].name): [
"%s/s" % format_bytes(x) for x in numpy.quantile(v, [0.25, 0.50, 0.75])
]
for (w1, w2), v in bandwidths.items()
}
total_nbytes = {
(scheduler_workers[w1].name, scheduler_workers[w2].name,): format_bytes(sum(nb))
for (w1, w2), nb in total_nbytes.items()
}
broadcast = (
False if args.shuffle_join else (True if args.broadcast_join else "default")
)
t_runs = numpy.empty(len(took_list))
if args.markdown:
print("```")
print("Merge benchmark")
print("-------------------------------")
print(f"backend | {args.backend}")
print(f"merge type | {args.type}")
print(f"rows-per-chunk | {args.chunk_size}")
print(f"base-chunks | {args.base_chunks}")
print(f"other-chunks | {args.other_chunks}")
print(f"broadcast | {broadcast}")
print(f"protocol | {args.protocol}")
print(f"device(s) | {args.devs}")
print(f"rmm-pool | {(not args.disable_rmm_pool)}")
print(f"frac-match | {args.frac_match}")
if args.protocol == "ucx":
print(f"tcp | {args.enable_tcp_over_ucx}")
print(f"ib | {args.enable_infiniband}")
print(f"nvlink | {args.enable_nvlink}")
print(f"data-processed | {format_bytes(took_list[0][0])}")
print("===============================")
print("Wall-clock | Throughput")
print("-------------------------------")
for idx, (data_processed, took) in enumerate(took_list):
throughput = int(data_processed / took)
m = format_time(took)
m += " " * (15 - len(m))
print(f"{m}| {format_bytes(throughput)}/s")
t_runs[idx] = float(format_bytes(throughput).split(" ")[0])
print("===============================")
if args.markdown:
print("\n```")
if args.plot is not None:
plot_benchmark(t_runs, args.plot, historical=True)
if args.backend == "dask":
if args.markdown:
print("<details>\n<summary>Worker-Worker Transfer Rates</summary>\n\n```")
print("(w1,w2) | 25% 50% 75% (total nbytes)")
print("-------------------------------")
for (d1, d2), bw in sorted(bandwidths.items()):
fmt = (
"(%s,%s) | %s %s %s (%s)"
if args.multi_node or args.sched_addr
else "(%02d,%02d) | %s %s %s (%s)"
)
print(fmt % (d1, d2, bw[0], bw[1], bw[2], total_nbytes[(d1, d2)]))
if args.markdown:
print("```\n</details>\n")
if args.multi_node:
client.shutdown()
client.close()
def parse_args():
special_args = [
{
"name": ["-b", "--backend",],
"choices": ["dask", "explicit-comms"],
"default": "dask",
"type": str,
"help": "The backend to use.",
},
{
"name": ["-t", "--type",],
"choices": ["cpu", "gpu"],
"default": "gpu",
"type": str,
"help": "Do merge with GPU or CPU dataframes",
},
{
"name": ["-c", "--chunk-size",],
"default": 1_000_000,
"metavar": "n",
"type": int,
"help": "Chunk size (default 1_000_000)",
},
{
"name": "--base-chunks",
"default": None,
"type": int,
"help": "Number of base-DataFrame partitions (default: n_workers)",
},
{
"name": "--other-chunks",
"default": None,
"type": int,
"help": "Number of other-DataFrame partitions (default: n_workers)",
},
{
"name": "--broadcast-join",
"action": "store_true",
"help": "Use broadcast join when possible.",
},
{
"name": "--shuffle-join",
"action": "store_true",
"help": "Use shuffle join (takes precedence over '--broadcast-join').",
},
{
"name": "--ignore-size",
"default": "1 MiB",
"metavar": "nbytes",
"type": parse_bytes,
"help": "Ignore messages smaller than this (default '1 MB')",
},
{
"name": "--frac-match",
"default": 0.3,
"type": float,
"help": "Fraction of rows that matches (default 0.3)",
},
{
"name": "--no-shuffle",
"action": "store_true",
"help": "Don't shuffle the keys of the left (base) dataframe.",
},
{
"name": "--markdown",
"action": "store_true",
"help": "Write output as markdown",
},
{"name": "--runs", "default": 3, "type": int, "help": "Number of runs",},
{
"name": ["-s", "--set-index",],
"action": "store_true",
"help": "Call set_index on the key column to sort the joined dataframe.",
},
]
return parse_benchmark_args(
description="Distributed merge (dask/cudf) benchmark", args_list=special_args
)
if __name__ == "__main__":
main(parse_args())
| [
"time.sleep",
"dask.distributed.wait",
"dask.base.tokenize",
"numpy.arange",
"time.perf_counter",
"dask.utils.format_bytes",
"numpy.random.seed",
"numpy.concatenate",
"numpy.random.permutation",
"dask.config.set",
"dask_cuda.utils.all_to_all",
"dask.distributed.performance_report",
"dask.uti... | [((976, 1003), 'numpy.random.seed', 'xp.random.seed', (['(2 ** 32 - 1)'], {}), '(2 ** 32 - 1)\n', (990, 1003), True, 'import numpy as xp\n'), ((4135, 4178), 'dask.dataframe.core.new_dd_object', 'new_dd_object', (['graph', 'name', 'meta', 'divisions'], {}), '(graph, name, meta, divisions)\n', (4148, 4178), False, 'from dask.dataframe.core import new_dd_object\n'), ((5274, 5288), 'dask.distributed.wait', 'wait', (['ddf_base'], {}), '(ddf_base)\n', (5278, 5288), False, 'from dask.distributed import Client, performance_report, wait\n'), ((5293, 5308), 'dask.distributed.wait', 'wait', (['ddf_other'], {}), '(ddf_other)\n', (5297, 5308), False, 'from dask.distributed import Client, performance_report, wait\n'), ((6102, 6127), 'dask_cuda.benchmarks.utils.get_cluster_options', 'get_cluster_options', (['args'], {}), '(args)\n', (6121, 6127), False, 'from dask_cuda.benchmarks.utils import get_cluster_options, get_scheduler_workers, parse_benchmark_args, plot_benchmark, setup_memory_pool\n'), ((8243, 8260), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8254, 8260), False, 'from collections import defaultdict\n'), ((8280, 8297), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8291, 8297), False, 'from collections import defaultdict\n'), ((13664, 13767), 'dask_cuda.benchmarks.utils.parse_benchmark_args', 'parse_benchmark_args', ([], {'description': '"""Distributed merge (dask/cudf) benchmark"""', 'args_list': 'special_args'}), "(description='Distributed merge (dask/cudf) benchmark',\n args_list=special_args)\n", (13684, 13767), False, 'from dask_cuda.benchmarks.utils import get_cluster_options, get_scheduler_workers, parse_benchmark_args, plot_benchmark, setup_memory_pool\n'), ((1489, 1525), 'numpy.arange', 'xp.arange', (['num_chunks'], {'dtype': '"""int64"""'}), "(num_chunks, dtype='int64')\n", (1498, 1525), True, 'import numpy as xp\n'), ((3019, 3061), 'numpy.arange', 'xp.arange', (['start'], {'stop': 'stop', 'dtype': '"""int64"""'}), "(start, stop=stop, dtype='int64')\n", (3028, 3061), True, 'import numpy as xp\n'), ((3176, 3237), 'numpy.concatenate', 'xp.concatenate', (['(key_array_match, key_array_no_match)'], {'axis': '(0)'}), '((key_array_match, key_array_no_match), axis=0)\n', (3190, 3237), True, 'import numpy as xp\n'), ((3817, 3873), 'dask.base.tokenize', 'tokenize', (['chunk_size', 'num_chunks', 'frac_match', 'chunk_type'], {}), '(chunk_size, num_chunks, frac_match, chunk_type)\n', (3825, 3873), False, 'from dask.base import tokenize\n'), ((5645, 5669), 'contextlib.nullcontext', 'contextlib.nullcontext', ([], {}), '()\n', (5667, 5669), False, 'import contextlib\n'), ((5671, 5695), 'contextlib.nullcontext', 'contextlib.nullcontext', ([], {}), '()\n', (5693, 5695), False, 'import contextlib\n'), ((5752, 5788), 'dask.config.set', 'dask.config.set', ([], {'explicit_comms': '(True)'}), '(explicit_comms=True)\n', (5767, 5788), False, 'import dask\n'), ((5838, 5879), 'dask.distributed.performance_report', 'performance_report', ([], {'filename': 'args.profile'}), '(filename=args.profile)\n', (5856, 5879), False, 'from dask.distributed import Client, performance_report, wait\n'), ((6354, 6377), 'dask.distributed.Client', 'Client', (['args.sched_addr'], {}), '(args.sched_addr)\n', (6360, 6377), False, 'from dask.distributed import Client, performance_report, wait\n'), ((6396, 6484), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'message': '""".*NVLink.*rmm_pool_size.*"""', 'category': 'UserWarning'}), "('ignore', message='.*NVLink.*rmm_pool_size.*', category=\n UserWarning)\n", (6410, 6484), False, 'from warnings import filterwarnings\n'), ((6791, 6845), 'dask.distributed.Client', 'Client', (['(scheduler_addr if args.multi_node else cluster)'], {}), '(scheduler_addr if args.multi_node else cluster)\n', (6797, 6845), False, 'from dask.distributed import Client, performance_report, wait\n'), ((7806, 7824), 'dask_cuda.utils.all_to_all', 'all_to_all', (['client'], {}), '(client)\n', (7816, 7824), False, 'from dask_cuda.utils import all_to_all\n'), ((10201, 10218), 'dask.utils.format_time', 'format_time', (['took'], {}), '(took)\n', (10212, 10218), False, 'from dask.utils import format_bytes, format_time, parse_bytes\n'), ((10501, 10551), 'dask_cuda.benchmarks.utils.plot_benchmark', 'plot_benchmark', (['t_runs', 'args.plot'], {'historical': '(True)'}), '(t_runs, args.plot, historical=True)\n', (10515, 10551), False, 'from dask_cuda.benchmarks.utils import get_cluster_options, get_scheduler_workers, parse_benchmark_args, plot_benchmark, setup_memory_pool\n'), ((1572, 1606), 'math.ceil', 'math.ceil', (['(local_size / num_chunks)'], {}), '(local_size / num_chunks)\n', (1581, 1606), False, 'import math\n'), ((2605, 2644), 'numpy.arange', 'xp.arange', (['bgn'], {'stop': 'end', 'dtype': '"""int64"""'}), "(bgn, stop=end, dtype='int64')\n", (2614, 2644), True, 'import numpy as xp\n'), ((5932, 5946), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5944, 5946), False, 'from time import perf_counter\n'), ((6009, 6023), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6021, 6023), False, 'from time import perf_counter\n'), ((6758, 6772), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (6768, 6772), False, 'import time\n'), ((1674, 1716), 'numpy.arange', 'xp.arange', (['start'], {'stop': 'stop', 'dtype': '"""int64"""'}), "(start, stop=stop, dtype='int64')\n", (1683, 1716), True, 'import numpy as xp\n'), ((3325, 3365), 'numpy.random.permutation', 'xp.random.permutation', (['key_array_combine'], {}), '(key_array_combine)\n', (3346, 3365), True, 'import numpy as xp\n'), ((8636, 8651), 'dask.utils.format_bytes', 'format_bytes', (['x'], {}), '(x)\n', (8648, 8651), False, 'from dask.utils import format_bytes, format_time, parse_bytes\n'), ((8661, 8697), 'numpy.quantile', 'numpy.quantile', (['v', '[0.25, 0.5, 0.75]'], {}), '(v, [0.25, 0.5, 0.75])\n', (8675, 8697), False, 'import numpy\n'), ((9916, 9945), 'dask.utils.format_bytes', 'format_bytes', (['took_list[0][0]'], {}), '(took_list[0][0])\n', (9928, 9945), False, 'from dask.utils import format_bytes, format_time, parse_bytes\n'), ((1745, 1780), 'numpy.random.permutation', 'xp.random.permutation', (['suffle_array'], {}), '(suffle_array)\n', (1766, 1780), True, 'import numpy as xp\n'), ((1844, 1880), 'numpy.arange', 'xp.arange', (['local_size'], {'dtype': '"""int64"""'}), "(local_size, dtype='int64')\n", (1853, 1880), True, 'import numpy as xp\n'), ((2671, 2696), 'numpy.random.permutation', 'xp.random.permutation', (['ar'], {}), '(ar)\n', (2692, 2696), True, 'import numpy as xp\n'), ((3416, 3452), 'numpy.arange', 'xp.arange', (['local_size'], {'dtype': '"""int64"""'}), "(local_size, dtype='int64')\n", (3425, 3452), True, 'import numpy as xp\n'), ((10274, 10298), 'dask.utils.format_bytes', 'format_bytes', (['throughput'], {}), '(throughput)\n', (10286, 10298), False, 'from dask.utils import format_bytes, format_time, parse_bytes\n'), ((10332, 10356), 'dask.utils.format_bytes', 'format_bytes', (['throughput'], {}), '(throughput)\n', (10344, 10356), False, 'from dask.utils import format_bytes, format_time, parse_bytes\n')] |
#! /etc/bin/env python3
"""
transparent_images.py
Converts a RGB image to a RGBA image with transparency,
depending on colors in each of the four corners of the image.
"""
from collections import Counter
from glob import glob
from os import chdir, makedirs, path
import sys
from matplotlib.image import imsave
from numpy import array
from PIL import Image
import PySimpleGUI as sg
if len(sys.argv) == 1:
event, (fname,) = sg.Window('My Script').Layout([[sg.Text('Folder to open')],
[sg.In(), sg.FolderBrowse()],
[sg.CloseButton('Open'), sg.CloseButton('Cancel')]]).Read()
else:
fname = sys.argv[1]
if not fname:
sg.Popup("Cancel", "No filename supplied")
raise SystemExit("Cancelling: no filename supplied")
# print(event, fname)
if event == "Open":
chdir(fname)
png_file_list = glob("*.png")
# print(len(png_file_list), png_file_list)
new_file_folder = fname + "/transparent"
# print(new_file_folder)
if png_file_list and not path.exists(new_file_folder):
makedirs(new_file_folder)
for file_name in png_file_list:
old_file_path = fname + "/" + file_name
filename_list = path.splitext(file_name)
new_file_name = filename_list[0] + "_trans" + filename_list[1]
# print(file_name, new_file_name)
img = Image.open(old_file_path).convert('RGBA')
corner_pix = []
corner_pix.append((img.getpixel((0, 0))))
corner_pix.append((img.getpixel((img.width-1, 0))))
corner_pix.append((img.getpixel((0, img.height-1))))
corner_pix.append((img.getpixel((img.width-1, img.height-1))))
color = array(Counter(corner_pix).most_common(1)[0][0])
# print(f'corner_pix: {corner_pix}')
corner_pix_set = set(corner_pix)
corner_pix_count = len(corner_pix_set)
# print(f'corner color count: {corner_pix_count}, corner color set: {corner_pix_set}')
if corner_pix_count < 4: # if a color appears more than once in the corners
alpha_color = array([255, 255, 255, 0])
# print(corner_pix[0], color, alpha_color)
pixdata = array(img, dtype='i')
# old_pix_data = imread(old_file_path)
# print('Pillow Data: ', pixdata[15], 'MatPlotLib Data: ', old_pix_data)
width, height = img.size
# print(width, height)
# print(color[3], color[3] < alpha_color[3])
# exit()
if color[3] > alpha_color[3]:
for y in range(height):
leading_alpha = 0
for x in range(width):
# print(x, y, pixdata[y, x], color)
if (pixdata[y, x] == color).all():
leading_alpha = 127
# print(x, y, color, alpha_color, pixdata[y,x])
pixdata[y, x] = alpha_color
# print(color, alpha_color, pixdata[y,x])
elif leading_alpha == 127:
pixdata[y, x][3] = leading_alpha
leading_alpha = 0
new_file_path = new_file_folder + "/" + new_file_name
# print(new_file_path)
# pixdata = pixdata.reshape(pixdata.shape[0]*pixdata.shape[1], pixdata.shape[2])
# print(pixdata.shape)
# new_img = Image.fromarray(pixdata, mode='RGBA') # RGBA?
# new_pix_data = array(new_img, dtype = 'i') # i?
# print(pixdata[15], new_pix_data[15])
imsave(new_file_path, pixdata)
# new_img.show()
# new_img.save(new_file_path)
# exit()
| [
"os.path.exists",
"PIL.Image.open",
"PySimpleGUI.Popup",
"os.makedirs",
"PySimpleGUI.FolderBrowse",
"PySimpleGUI.In",
"os.path.splitext",
"matplotlib.image.imsave",
"PySimpleGUI.Text",
"os.chdir",
"numpy.array",
"PySimpleGUI.CloseButton",
"collections.Counter",
"PySimpleGUI.Window",
"glo... | [((722, 764), 'PySimpleGUI.Popup', 'sg.Popup', (['"""Cancel"""', '"""No filename supplied"""'], {}), "('Cancel', 'No filename supplied')\n", (730, 764), True, 'import PySimpleGUI as sg\n'), ((874, 886), 'os.chdir', 'chdir', (['fname'], {}), '(fname)\n', (879, 886), False, 'from os import chdir, makedirs, path\n'), ((908, 921), 'glob.glob', 'glob', (['"""*.png"""'], {}), "('*.png')\n", (912, 921), False, 'from glob import glob\n'), ((1117, 1142), 'os.makedirs', 'makedirs', (['new_file_folder'], {}), '(new_file_folder)\n', (1125, 1142), False, 'from os import chdir, makedirs, path\n'), ((1256, 1280), 'os.path.splitext', 'path.splitext', (['file_name'], {}), '(file_name)\n', (1269, 1280), False, 'from os import chdir, makedirs, path\n'), ((1078, 1106), 'os.path.exists', 'path.exists', (['new_file_folder'], {}), '(new_file_folder)\n', (1089, 1106), False, 'from os import chdir, makedirs, path\n'), ((2141, 2166), 'numpy.array', 'array', (['[255, 255, 255, 0]'], {}), '([255, 255, 255, 0])\n', (2146, 2166), False, 'from numpy import array\n'), ((2248, 2269), 'numpy.array', 'array', (['img'], {'dtype': '"""i"""'}), "(img, dtype='i')\n", (2253, 2269), False, 'from numpy import array\n'), ((3709, 3739), 'matplotlib.image.imsave', 'imsave', (['new_file_path', 'pixdata'], {}), '(new_file_path, pixdata)\n', (3715, 3739), False, 'from matplotlib.image import imsave\n'), ((1413, 1438), 'PIL.Image.open', 'Image.open', (['old_file_path'], {}), '(old_file_path)\n', (1423, 1438), False, 'from PIL import Image\n'), ((460, 482), 'PySimpleGUI.Window', 'sg.Window', (['"""My Script"""'], {}), "('My Script')\n", (469, 482), True, 'import PySimpleGUI as sg\n'), ((492, 517), 'PySimpleGUI.Text', 'sg.Text', (['"""Folder to open"""'], {}), "('Folder to open')\n", (499, 517), True, 'import PySimpleGUI as sg\n'), ((550, 557), 'PySimpleGUI.In', 'sg.In', ([], {}), '()\n', (555, 557), True, 'import PySimpleGUI as sg\n'), ((559, 576), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', ([], {}), '()\n', (574, 576), True, 'import PySimpleGUI as sg\n'), ((609, 631), 'PySimpleGUI.CloseButton', 'sg.CloseButton', (['"""Open"""'], {}), "('Open')\n", (623, 631), True, 'import PySimpleGUI as sg\n'), ((633, 657), 'PySimpleGUI.CloseButton', 'sg.CloseButton', (['"""Cancel"""'], {}), "('Cancel')\n", (647, 657), True, 'import PySimpleGUI as sg\n'), ((1749, 1768), 'collections.Counter', 'Counter', (['corner_pix'], {}), '(corner_pix)\n', (1756, 1768), False, 'from collections import Counter\n')] |
# Copyright (c) 2021, Intel Corporation
#
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import (AutoMinorLocator, FuncFormatter, MaxNLocator,
FormatStrFormatter, LogLocator)
class BiHistogram():
def __init__(self, values_a, values_b):
self.values_a = values_a
self.values_b = values_b
self.xlabel = 'Latency (us)'
def _plot_hist(self, axis, label, data, colour):
hist, edges = np.histogram(data, bins='sqrt')
axis.fill_between(edges[:-1], hist, color=colour, antialiased=False,
rasterized=True)
axis.set_yscale('log')
axis.set_xscale('log')
axis.tick_params(axis='y', which='both', labelsize='xx-small')
axis.set_ylabel(label)
axis.grid(True, which='both', axis='x', linewidth=0.3)
def plot(self, title, filename):
fig, axes = plt.subplots(2, 1, gridspec_kw={'hspace': 0.01},
sharex=True)
self._plot_hist(axes[0], self.values_a[0], self.values_a[1], 'orange')
self._plot_hist(axes[1], self.values_b[0], self.values_b[1],
'cornflowerblue')
axes[1].set_xlabel(self.xlabel, fontsize='x-small')
axes[1].tick_params(axis='x', which='both', labelsize='xx-small',
labelrotation=45)
axes[1].invert_yaxis()
ax = axes[1].get_xaxis()
ax.set_minor_formatter(FormatStrFormatter("%.2f"))
ax.set_major_formatter(FormatStrFormatter("%.2f"))
for ax in fig.get_axes():
ax.label_outer()
fig.suptitle(title)
plt.savefig(filename, dpi=300)
plt.close()
class StackedBarChart():
def __init__(self):
self.colours = ['gold', 'lightgreen', 'lightsalmon', 'violet',
'cornflowerblue', 'lightcoral']
self.bars = []
self.bar_distance = 2
self.ylabel = 'Latency (us)'
def add_bar(self, legend, values):
self.bars.append((legend, values))
def __attribute_colours(self):
values_names = sorted({value[0]
for bar in self.bars
for value in bar[1]})
if len(values_names) > len(self.colours):
raise Exception('Add more self.colours for stacked bar chart!')
return dict(zip(values_names, self.colours))
def plot(self, title, filename):
values_colours = self.__attribute_colours()
indices = []
index = 0
for bar in self.bars:
i = 0
cumu_col = 0
for value in bar[1]:
height = value[1]
plt.bar(index, height, label=value[0],
color=values_colours[value[0]], bottom=cumu_col)
plt.text(index, cumu_col + height / 2, "%.3f" % height,
ha='center', va='center', fontsize=7)
cumu_col = height + cumu_col
i = i + 1
indices.append(index)
# Bigger increase to better space the bars
index = index + self.bar_distance
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
# Avoid legend repetition by using the label as a key to dict
labels, handles = zip(*dict(zip(labels, handles)).items())
plt.subplots_adjust(right=0.8)
plt.legend(reversed(handles), reversed(labels), loc='upper left',
fontsize='x-small', ncol=1, bbox_to_anchor=(1.01, 1.))
ax.set_xbound(-1, 4)
ax.set_xticks(indices)
ax.set_xticklabels([bar[0] for bar in self.bars])
plt.title(title)
plt.xticks(fontsize='x-small')
plt.ylabel(self.ylabel)
plt.savefig(filename, dpi=300)
plt.close()
class HistogramGroupPlot():
def __init__(self, group_size, min_latency, max_latency, iterations):
self.min_latency = min_latency
self.max_latency = max_latency
self.iterations = iterations
self.fig, self.plots = plt.subplots(group_size, 1, sharex=True)
if not isinstance(self.plots, np.ndarray):
self.plots = np.array([self.plots])
self.plot_idx = 0
self.xlabel = 'Latency (us)'
def add_histogram(self, data, edges, mean, stdev, ylabel):
if self.plot_idx >= len(self.plots):
raise Exception("Can't add more histograms: group_size too small")
plot = self.plots[self.plot_idx]
self.plot_idx += 1
plot.fill_between(edges[1:], data, antialiased=False, rasterized=True)
plot.set_xscale('log', subsx=[2, 4, 6, 8])
plot.set_yscale('log')
# Set the labels
plot.text(0.8, 0.8, f'Mean {mean:.2f} us', fontsize=5,
transform=plot.transAxes)
plot.text(0.8, 0.7, f'STDEV {stdev:.2f} us', fontsize=5,
transform=plot.transAxes)
plot.set_ylabel(ylabel)
# Set limits and locations of ticks
# Set ylim a bit bigger than strictly needed so there's some headspace
# on plots
plot.set_ylim(0.5, self.iterations * 2)
ax = plot.get_xaxis()
ax.limit_range_for_scale(self.min_latency, self.max_latency)
# There isn't any one-size-fits-all for placing Ticks. So, choose Tick
# Locator depending on the range of data.
if self.max_latency - self.min_latency < 100:
ax.set_major_locator(MaxNLocator(nbins=5, steps=[1, 2, 3, 4, 5],
min_n_ticks=4))
plot.minorticks_off()
else:
ax.set_major_locator(LogLocator())
# Format the ticks and enable grid
ax.set_minor_formatter(FormatStrFormatter("%.2f"))
ax.set_major_formatter(FormatStrFormatter("%.2f"))
plot.tick_params(axis='x', which='both', labelsize='xx-small',
labelrotation=45)
plot.grid(b=True, which='both', axis='x', linewidth=0.3)
def plot(self, title, filename):
for ax in self.fig.get_axes():
ax.label_outer()
self.plots[-1].set_xlabel(self.xlabel)
plt.tight_layout(pad=1.5)
self.fig.suptitle(title, y=0.99)
self.fig.savefig(filename, dpi=300)
plt.close()
class RunSequencePlot:
def __init__(self, data):
self.data = data
self.colour_masks = [
{'mask': np.full_like(self.data[1], True, dtype=bool),
'colour': 'C0'}]
def _format_xtick(self, x, pos):
return x / 1000000
def _plot_x_label(self, axis):
xaxis = axis.get_xaxis()
if isinstance(self.data[0], np.ndarray):
xaxis.set_major_formatter(FuncFormatter(self._format_xtick))
axis.set_xlabel('Iterations (millions)', fontsize='x-small')
elif np.issubdtype(self.data[0], np.datetime64):
xaxis_fmt = mdates.DateFormatter("%H:%M:%S")
xaxis.set_major_formatter(xaxis_fmt)
axis.set_xlabel('Time (hh:mm:ss)', fontsize='x-small')
axis.tick_params('x', labelrotation=90)
plt.subplots_adjust(bottom=0.2)
else:
raise Exception('Unknown indices type')
xaxis.set_minor_locator(AutoMinorLocator())
xaxis.set_major_locator(MaxNLocator(nbins='auto', prune='upper'))
def _plot_y_label(self, axis):
axis.set_ylabel('Latency (us)', fontsize='x-small')
axis.tick_params(labelsize='xx-small')
axis.margins(x=0)
def _plot_scatter(self, axis, indices, values):
for mask in self.colour_masks:
axis.plot(indices[mask['mask']], values[mask['mask']], marker='.',
markersize=1, linestyle='', c=mask['colour'],
rasterized=True)
def _plot_histogram(self, axis, values):
hist, edges = np.histogram(values, bins='sqrt')
axis.fill_betweenx(edges[:-1], hist, color='#9ec0ff',
antialiased=False, rasterized=True)
axis.set_yticks([])
axis.set_xscale('log')
axis.set_xlim(left=0.9, right=len(values))
axis.minorticks_on()
axis.tick_params(labelsize='xx-small')
axis.grid(True, which='both', axis='x', linewidth=0.3)
axis.set_xlabel('Frequency', fontsize='x-small')
def plot(self, title, filename):
fig, axes = plt.subplots(1, 2, gridspec_kw={'width_ratios': [2, 1],
'wspace': 0.01})
indices = self.data[0]
values = self.data[1]
self._plot_x_label(axes[0])
self._plot_y_label(axes[0])
self._plot_scatter(axes[0], indices, values)
self._plot_histogram(axes[1], values)
fig.suptitle(title, fontsize=8)
plt.savefig(filename, dpi=300)
plt.close()
class RunSequenceGroupPlot(RunSequencePlot):
def __init__(self, data_list):
self.data_list = data_list
self.colour_masks = dict()
for data in self.data_list:
self.colour_masks[data[0]] = [
{'mask': np.full_like(data[2], True, dtype=bool),
'colour': 'C0'}]
def plot(self, title, filename):
fig, axes = plt.subplots(len(self.data_list), 2,
gridspec_kw={'width_ratios': [2, 1],
'wspace': 0.01})
if not isinstance(axes[0], np.ndarray):
axes = np.array([axes])
# We'll lie to parent that colour_masks is a simple array of masks
all_colour_masks = self.colour_masks
for (ax, data) in zip(axes, self.data_list):
self.data = data[1:]
name = data[0]
indices = data[1]
values = data[2]
# Here we lie
self.colour_masks = all_colour_masks[data[0]]
self._plot_x_label(ax[0])
self._plot_y_label(ax[0], f'{name} (us)')
self._plot_scatter(ax[0], indices, values)
self._plot_histogram(ax[1], values)
# Undo the lie
self.colour_masks = all_colour_masks
for ax in fig.get_axes():
ax.label_outer()
fig.suptitle(title, fontsize=8)
plt.savefig(filename, dpi=300)
plt.close()
def _plot_y_label(self, axis, label):
axis.set_ylabel(label, fontsize='x-small')
axis.tick_params(labelsize='xx-small')
axis.margins(x=0)
| [
"matplotlib.ticker.LogLocator",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.ticker.MaxNLocator",
"matplotlib.ticker.AutoMinorLocator",
"numpy.histogram",
"numpy.full_like",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.close",
"numpy.issubdtype",
"matplotlib.pyplot.savefig",
... | [((108, 122), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (115, 122), True, 'import matplotlib as mpl\n'), ((595, 626), 'numpy.histogram', 'np.histogram', (['data'], {'bins': '"""sqrt"""'}), "(data, bins='sqrt')\n", (607, 626), True, 'import numpy as np\n'), ((1032, 1093), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'gridspec_kw': "{'hspace': 0.01}", 'sharex': '(True)'}), "(2, 1, gridspec_kw={'hspace': 0.01}, sharex=True)\n", (1044, 1093), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1812), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)'}), '(filename, dpi=300)\n', (1793, 1812), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1832), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1830, 1832), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3308), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3306, 3308), True, 'import matplotlib.pyplot as plt\n'), ((3511, 3541), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.8)'}), '(right=0.8)\n', (3530, 3541), True, 'import matplotlib.pyplot as plt\n'), ((3816, 3832), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3825, 3832), True, 'import matplotlib.pyplot as plt\n'), ((3841, 3871), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '"""x-small"""'}), "(fontsize='x-small')\n", (3851, 3871), True, 'import matplotlib.pyplot as plt\n'), ((3880, 3903), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.ylabel'], {}), '(self.ylabel)\n', (3890, 3903), True, 'import matplotlib.pyplot as plt\n'), ((3912, 3942), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)'}), '(filename, dpi=300)\n', (3923, 3942), True, 'import matplotlib.pyplot as plt\n'), ((3951, 3962), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3960, 3962), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4253), 'matplotlib.pyplot.subplots', 'plt.subplots', (['group_size', '(1)'], {'sharex': '(True)'}), '(group_size, 1, sharex=True)\n', (4225, 4253), True, 'import matplotlib.pyplot as plt\n'), ((6319, 6344), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1.5)'}), '(pad=1.5)\n', (6335, 6344), True, 'import matplotlib.pyplot as plt\n'), ((6438, 6449), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6447, 6449), True, 'import matplotlib.pyplot as plt\n'), ((8032, 8065), 'numpy.histogram', 'np.histogram', (['values'], {'bins': '"""sqrt"""'}), "(values, bins='sqrt')\n", (8044, 8065), True, 'import numpy as np\n'), ((8555, 8627), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'gridspec_kw': "{'width_ratios': [2, 1], 'wspace': 0.01}"}), "(1, 2, gridspec_kw={'width_ratios': [2, 1], 'wspace': 0.01})\n", (8567, 8627), True, 'import matplotlib.pyplot as plt\n'), ((8963, 8993), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)'}), '(filename, dpi=300)\n', (8974, 8993), True, 'import matplotlib.pyplot as plt\n'), ((9002, 9013), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9011, 9013), True, 'import matplotlib.pyplot as plt\n'), ((10425, 10455), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)'}), '(filename, dpi=300)\n', (10436, 10455), True, 'import matplotlib.pyplot as plt\n'), ((10464, 10475), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10473, 10475), True, 'import matplotlib.pyplot as plt\n'), ((1595, 1621), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (1613, 1621), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((1654, 1680), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (1672, 1680), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((4330, 4352), 'numpy.array', 'np.array', (['[self.plots]'], {}), '([self.plots])\n', (4338, 4352), True, 'import numpy as np\n'), ((5891, 5917), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (5909, 5917), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((5950, 5976), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (5968, 5976), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((7011, 7053), 'numpy.issubdtype', 'np.issubdtype', (['self.data[0]', 'np.datetime64'], {}), '(self.data[0], np.datetime64)\n', (7024, 7053), True, 'import numpy as np\n'), ((7423, 7441), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (7439, 7441), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((7475, 7515), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'nbins': '"""auto"""', 'prune': '"""upper"""'}), "(nbins='auto', prune='upper')\n", (7486, 7515), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((9651, 9667), 'numpy.array', 'np.array', (['[axes]'], {}), '([axes])\n', (9659, 9667), True, 'import numpy as np\n'), ((2830, 2921), 'matplotlib.pyplot.bar', 'plt.bar', (['index', 'height'], {'label': 'value[0]', 'color': 'values_colours[value[0]]', 'bottom': 'cumu_col'}), '(index, height, label=value[0], color=values_colours[value[0]],\n bottom=cumu_col)\n', (2837, 2921), True, 'import matplotlib.pyplot as plt\n'), ((2958, 3056), 'matplotlib.pyplot.text', 'plt.text', (['index', '(cumu_col + height / 2)', "('%.3f' % height)"], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': '(7)'}), "(index, cumu_col + height / 2, '%.3f' % height, ha='center', va=\n 'center', fontsize=7)\n", (2966, 3056), True, 'import matplotlib.pyplot as plt\n'), ((5616, 5674), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'nbins': '(5)', 'steps': '[1, 2, 3, 4, 5]', 'min_n_ticks': '(4)'}), '(nbins=5, steps=[1, 2, 3, 4, 5], min_n_ticks=4)\n', (5627, 5674), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((5802, 5814), 'matplotlib.ticker.LogLocator', 'LogLocator', ([], {}), '()\n', (5812, 5814), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((6585, 6629), 'numpy.full_like', 'np.full_like', (['self.data[1]', '(True)'], {'dtype': 'bool'}), '(self.data[1], True, dtype=bool)\n', (6597, 6629), True, 'import numpy as np\n'), ((6890, 6923), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['self._format_xtick'], {}), '(self._format_xtick)\n', (6903, 6923), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, MaxNLocator, FormatStrFormatter, LogLocator\n'), ((7079, 7111), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (7099, 7111), True, 'import matplotlib.dates as mdates\n'), ((7292, 7323), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (7311, 7323), True, 'import matplotlib.pyplot as plt\n'), ((9274, 9313), 'numpy.full_like', 'np.full_like', (['data[2]', '(True)'], {'dtype': 'bool'}), '(data[2], True, dtype=bool)\n', (9286, 9313), True, 'import numpy as np\n')] |
import gc
from functools import reduce
from typing import Callable, Iterable, List, Optional, Tuple, TypeVar
import numpy as np
import pandas as pd
from .graph import AttrMap, Graph
from .trace import AddOp, TraceKey
from .utils import filter_not_null
from .utils.fs import IOAction
from .utils.ray import ray_iter
__all__ = [
"calc_iou",
"calc_iou_compact",
"calc_trace_side_overlap",
"calc_weighted_iou",
"calc_class_trace_side_overlap",
"calc_class_trace_side_overlap_norm",
"calc_trace_side_overlap_compact",
"calc_class_trace_side_overlap_compact",
"calc_trace_side_overlap_both_compact",
"calc_density",
"calc_density_compact",
"calc_space",
"calc_skip_ratio",
"calc_trace_size",
"calc_density_compact_per_layer",
"self_similarity_matrix",
"self_similarity_matrix_ray",
]
T = TypeVar("T")
def calc_iou(trace1: AttrMap, trace2: AttrMap, key: str = TraceKey.EDGE) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = TraceKey.to_array(node_trace1[key])
trace_set2 = TraceKey.to_array(node_trace2[key])
intersect = np.intersect1d(trace_set1, trace_set2)
union = np.union1d(trace_set1, trace_set2)
return len(intersect), len(union)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
return intersect_size / union_size
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in trace1.nodes]
),
)
)
return iou
def calc_iou_frequency(
trace1: AttrMap, trace2: AttrMap, frequency: int, key: str = TraceKey.EDGE
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = (
node_trace1[key].index[node_trace1[key]["count"] > frequency].values
)
trace_set2 = (
node_trace2[key].index[node_trace2[key]["count"] > frequency].values
)
intersect = np.intersect1d(trace_set1, trace_set2)
union = np.union1d(trace_set1, trace_set2)
return len(intersect), len(union)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in trace1.nodes]
),
)
)
return iou
def calc_iou_frequency_per_layer(
trace1: AttrMap,
trace2: AttrMap,
node_name: str,
frequency: int,
key: str = TraceKey.EDGE,
) -> float:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = (
node_trace1[key].index[node_trace1[key]["count"] > frequency].values
)
trace_set2 = (
node_trace2[key].index[node_trace2[key]["count"] > frequency].values
)
intersect = np.intersect1d(trace_set1, trace_set2)
union = np.union1d(trace_set1, trace_set2)
if len(union) != 0:
return len(intersect) / len(union)
else:
return 0
else:
return None
def calc_iou_per_layer(
trace1: AttrMap, trace2: AttrMap, node_name: str, key: str = TraceKey.EDGE
) -> float:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = TraceKey.to_array(node_trace1[key])
trace_set2 = TraceKey.to_array(node_trace2[key])
intersect = np.intersect1d(trace_set1, trace_set2)
union = np.union1d(trace_set1, trace_set2)
return len(intersect) / len(union)
else:
return None
def calc_class_trace_side_overlap(
class_trace: AttrMap, trace: AttrMap, key: str = TraceKey.EDGE
) -> float:
def intersect(node_name: str) -> Optional[int]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(node_class_trace[key])
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect)
else:
return None
iou = (
sum(filter_not_null([intersect(node_name) for node_name in class_trace.nodes]))
/ class_trace.attrs[TraceKey.max_of(TraceKey.num_of(key))]
)
return iou
def calc_class_trace_side_overlap_norm(
class_trace: AttrMap, trace: AttrMap, key: str = TraceKey.EDGE
) -> float:
def intersect(node_name: str) -> Optional[int]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(node_class_trace[key])
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect)
else:
return None
iou = (
sum(filter_not_null([intersect(node_name) for node_name in class_trace.nodes]))
- class_trace.attrs[TraceKey.min_of(TraceKey.num_of(key))]
) / class_trace.attrs[TraceKey.max_of(TraceKey.num_of(key))]
return iou
def calc_trace_side_overlap(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
# if key == TraceKey.EDGE and node_name.startswith("max"):
# return None
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(node_class_trace[key])
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect), len(trace_set)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
return intersect_size / union_size
if node_name is None:
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
)
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_trace_size(
trace: AttrMap, key: str = TraceKey.EDGE, compact: bool = False
) -> Optional[int]:
def trace_size(node_name: str) -> Optional[int]:
node_trace = trace.nodes[node_name]
if key in node_trace:
if compact:
return np.count_nonzero(np.unpackbits(node_trace[key]))
else:
return TraceKey.to_array(node_trace[key]).size
else:
return None
return sum(filter_not_null([trace_size(node_name) for node_name in trace.nodes]))
def calc_trace_size_per_layer(
trace: AttrMap, layer_name: str, key: str = TraceKey.EDGE, compact: bool = False
) -> Optional[int]:
def trace_size(node_name: str) -> Optional[int]:
node_trace = trace.nodes[node_name]
if key in node_trace:
if compact:
return np.count_nonzero(np.unpackbits(node_trace[key]))
else:
return TraceKey.to_array(node_trace[key]).size
else:
return None
return trace_size(layer_name)
def calc_trace_path_num(trace: AttrMap, layer: str) -> int:
return trace.tensors[layer][TraceKey.PATH]["count"].sum()
def calc_trace_side_overlap_compact(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(
np.argwhere(np.unpackbits(node_class_trace[key]))
)
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect), len(trace_set)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
if node_name is None:
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
)
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_trace_side_overlap_both_compact(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
return_size: bool = False,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = node_class_trace[key]
trace_set = node_trace[key]
intersect = np.bitwise_and(class_trace_set, trace_set)
return (
np.count_nonzero(np.unpackbits(intersect)),
np.count_nonzero(np.unpackbits(trace_set)),
)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
if node_name is None:
intersect_size, union_size = reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
iou = get_iou((intersect_size, union_size))
if return_size:
return iou, intersect_size
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_class_trace_side_overlap_compact(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(
np.argwhere(np.unpackbits(node_class_trace[key]))
)
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect), len(class_trace_set)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
if node_name is None:
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
)
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_weighted_iou(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[float, float]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
frame_class_trace = node_class_trace[key]
intersect_mask = np.isin(
TraceKey.to_array(frame_class_trace),
node_trace[key],
assume_unique=True,
)
return (
frame_class_trace["count"].values[intersect_mask].sum()
/ class_trace.attrs[TraceKey.COUNT],
frame_class_trace["count"].values.sum()
/ class_trace.attrs[TraceKey.COUNT],
)
else:
return None
def get_iou(args: Tuple[float, float]) -> float:
intersect_size, union_size = args
return intersect_size / union_size
if node_name is None:
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
)
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_iou_compact(
trace1: AttrMap, trace2: AttrMap, key: str = TraceKey.EDGE
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = node_trace1[key]
trace_set2 = node_trace2[key]
intersect = np.bitwise_and(trace_set1, trace_set2)
union = np.bitwise_or(trace_set1, trace_set2)
return (
np.count_nonzero(np.unpackbits(intersect)),
np.count_nonzero(np.unpackbits(union)),
)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in trace1.nodes]
),
)
)
return iou
def calc_iou_compact_per_layer(
trace1: AttrMap, trace2: AttrMap, node_name: str, key: str = TraceKey.EDGE
) -> float:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = node_trace1[key]
trace_set2 = node_trace2[key]
intersect = np.bitwise_and(trace_set1, trace_set2)
union = np.bitwise_or(trace_set1, trace_set2)
return np.count_nonzero(np.unpackbits(intersect)) / np.count_nonzero(
np.unpackbits(union)
)
else:
return None
def self_similarity_matrix(
iterable: Iterable[T],
trace_fn: Callable[[T], AttrMap],
similarity_fn: Callable[[AttrMap, AttrMap], float],
) -> np.ndarray:
if not isinstance(iterable, list):
iterable = list(iterable)
size = len(iterable)
matrix = np.eye(size, dtype=float)
for i in range(0, size):
for j in range(i + 1, size):
trace_i = trace_fn(iterable[i])
trace_j = trace_fn(iterable[j])
similarity = similarity_fn(trace_i, trace_j)
matrix[i][j] = similarity
matrix[j][i] = similarity
return matrix
def self_similarity_matrix_ray(
partial_path: str,
iterable: Iterable[T],
trace_fn: Callable[[T], AttrMap],
similarity_fn: Callable[[AttrMap, AttrMap], float],
key: str = TraceKey.EDGE,
) -> np.ndarray:
if not isinstance(iterable, list):
iterable = list(iterable)
size = len(iterable)
def calc_similarity(iter_i, iter_j):
trace_i = trace_fn(iter_i)
trace_j = trace_fn(iter_j)
if trace_i is None or trace_j is None:
return 0.0
else:
similarity = similarity_fn(trace_i, trace_j, key=key)
return similarity
def save_and_load_similarity(i, j):
# tr.print_diff()
iter_i = iterable[i]
iter_j = iterable[j]
action = IOAction(
f"{partial_path}/{iter_i}_{iter_j}.pkl",
init_fn=lambda: calc_similarity(iter_i, iter_j),
cache=True,
)
action.save()
return i, j, action.load()
# tr = tracker.SummaryTracker()
similarity_list = ray_iter(
save_and_load_similarity,
[(i, j) for i in range(0, size) for j in range(i + 1, size)],
out_of_order=True,
chunksize=1,
)
matrix = np.eye(size, dtype=float)
for i, j, similarity in similarity_list:
matrix[i][j] = similarity
matrix[j][i] = similarity
print(f"finish i={i}, j={j}")
return matrix
def inter_class_similarity_matrix_ray(
partial_path: str,
iterable: Iterable[T],
trace_fn: Callable[[T, str], AttrMap],
similarity_fn: Callable[[AttrMap, AttrMap], float],
key: str = TraceKey.EDGE,
) -> np.ndarray:
if not isinstance(iterable, list):
iterable = list(iterable)
size = len(iterable)
def calc_similarity(iter_i, iter_j):
trace_i = trace_fn(iter_i, "left")
trace_j = trace_fn(iter_j, "right")
if trace_i is None or trace_j is None:
return 0.0
else:
similarity = similarity_fn(trace_i, trace_j, key=key)
del trace_i
del trace_j
gc.collect()
return similarity
def save_and_load_similarity(i, j):
# tr.print_diff()
iter_i = iterable[i]
iter_j = iterable[j]
action = IOAction(
f"{partial_path}/{iter_i}_{iter_j}.pkl",
init_fn=lambda: calc_similarity(iter_i, iter_j),
cache=True,
)
action.save()
return i, j, action.load()
# tr = tracker.SummaryTracker()
similarity_list = ray_iter(
save_and_load_similarity,
[(i, j) for i in range(0, size) for j in range(i, size)],
out_of_order=True,
chunksize=1,
)
matrix = np.zeros((size, size), dtype=float)
for i, j, similarity in similarity_list:
matrix[i][j] = similarity
matrix[j][i] = similarity
print(f"finish i={i}, j={j}")
return matrix
def calc_density(trace: AttrMap, key: str) -> float:
density = sum(
node[key].size for name, node in trace.nodes.items() if key in node
) / sum(
np.prod(node[key + "_shape"])
for name, node in trace.nodes.items()
if key in node
)
return density
def calc_density_compact(trace: AttrMap, key: str) -> float:
density = sum(
np.count_nonzero(np.unpackbits(node[key]))
for name, node in trace.nodes.items()
if key in node
) / sum(
np.prod(node[key + "_shape"])
for name, node in trace.nodes.items()
if key in node
)
return density
def calc_density_compact_per_layer(
trace: AttrMap, layers: List[str], key: str
) -> pd.DataFrame:
result_layers = []
densities = []
for layer_name in layers:
node = trace.nodes[layer_name]
if key in node:
result_layers.append(layer_name)
densities.append(
np.count_nonzero(np.unpackbits(node[key]))
/ np.prod(node[key + "_shape"])
)
return pd.DataFrame(dict(density=densities), index=result_layers).rename_axis(
"layer"
)
def calc_metrics_compact_per_layer(trace: AttrMap, layers: List[str]) -> pd.DataFrame:
result_layers = []
metrics = []
for layer_name in layers:
node = trace.nodes[layer_name]
for metric_name in TraceKey.METRICS:
result_layers.append(f"{layer_name}/{metric_name}")
metrics.append(node[metric_name])
return pd.DataFrame(dict(value=metrics), index=result_layers).rename_axis(
"layer_metric"
)
def calc_skip_ratio(graph: Graph, layers: List[str]) -> pd.DataFrame:
result_layers = []
skip_ratios = []
for node_name in layers:
node = graph.node(graph.id(node_name))
if isinstance(node, AddOp):
traced_edges = np.unpackbits(node.attrs[TraceKey.EDGE]).reshape(
node.attrs[TraceKey.EDGE_SHAPE]
)
result_layers.append(node.name)
skip_ratios.append(
np.count_nonzero(traced_edges[1]) / np.count_nonzero(traced_edges)
)
return pd.DataFrame(dict(skip_ratio=skip_ratios), index=result_layers).rename_axis(
"layer"
)
def calc_space(trace: AttrMap, key: str) -> int:
return sum(
np.prod(node[key + "_shape"])
for name, node in trace.nodes.items()
if key in node
)
| [
"numpy.intersect1d",
"numpy.eye",
"numpy.prod",
"numpy.bitwise_or",
"numpy.union1d",
"numpy.unpackbits",
"numpy.bitwise_and",
"numpy.count_nonzero",
"numpy.zeros",
"gc.collect",
"typing.TypeVar"
] | [((854, 866), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (861, 866), False, 'from typing import Callable, Iterable, List, Optional, Tuple, TypeVar\n'), ((16024, 16049), 'numpy.eye', 'np.eye', (['size'], {'dtype': 'float'}), '(size, dtype=float)\n', (16030, 16049), True, 'import numpy as np\n'), ((17567, 17592), 'numpy.eye', 'np.eye', (['size'], {'dtype': 'float'}), '(size, dtype=float)\n', (17573, 17592), True, 'import numpy as np\n'), ((19072, 19107), 'numpy.zeros', 'np.zeros', (['(size, size)'], {'dtype': 'float'}), '((size, size), dtype=float)\n', (19080, 19107), True, 'import numpy as np\n'), ((3595, 3633), 'numpy.intersect1d', 'np.intersect1d', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (3609, 3633), True, 'import numpy as np\n'), ((3650, 3684), 'numpy.union1d', 'np.union1d', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (3660, 3684), True, 'import numpy as np\n'), ((4191, 4229), 'numpy.intersect1d', 'np.intersect1d', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (4205, 4229), True, 'import numpy as np\n'), ((4246, 4280), 'numpy.union1d', 'np.union1d', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (4256, 4280), True, 'import numpy as np\n'), ((15501, 15539), 'numpy.bitwise_and', 'np.bitwise_and', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (15515, 15539), True, 'import numpy as np\n'), ((15556, 15593), 'numpy.bitwise_or', 'np.bitwise_or', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (15569, 15593), True, 'import numpy as np\n'), ((1299, 1337), 'numpy.intersect1d', 'np.intersect1d', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (1313, 1337), True, 'import numpy as np\n'), ((1358, 1392), 'numpy.union1d', 'np.union1d', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (1368, 1392), True, 'import numpy as np\n'), ((2449, 2487), 'numpy.intersect1d', 'np.intersect1d', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (2463, 2487), True, 'import numpy as np\n'), ((2508, 2542), 'numpy.union1d', 'np.union1d', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (2518, 2542), True, 'import numpy as np\n'), ((4816, 4858), 'numpy.intersect1d', 'np.intersect1d', (['class_trace_set', 'trace_set'], {}), '(class_trace_set, trace_set)\n', (4830, 4858), True, 'import numpy as np\n'), ((5587, 5629), 'numpy.intersect1d', 'np.intersect1d', (['class_trace_set', 'trace_set'], {}), '(class_trace_set, trace_set)\n', (5601, 5629), True, 'import numpy as np\n'), ((6565, 6607), 'numpy.intersect1d', 'np.intersect1d', (['class_trace_set', 'trace_set'], {}), '(class_trace_set, trace_set)\n', (6579, 6607), True, 'import numpy as np\n'), ((9010, 9052), 'numpy.intersect1d', 'np.intersect1d', (['class_trace_set', 'trace_set'], {}), '(class_trace_set, trace_set)\n', (9024, 9052), True, 'import numpy as np\n'), ((10283, 10325), 'numpy.bitwise_and', 'np.bitwise_and', (['class_trace_set', 'trace_set'], {}), '(class_trace_set, trace_set)\n', (10297, 10325), True, 'import numpy as np\n'), ((11814, 11856), 'numpy.intersect1d', 'np.intersect1d', (['class_trace_set', 'trace_set'], {}), '(class_trace_set, trace_set)\n', (11828, 11856), True, 'import numpy as np\n'), ((14434, 14472), 'numpy.bitwise_and', 'np.bitwise_and', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (14448, 14472), True, 'import numpy as np\n'), ((14493, 14530), 'numpy.bitwise_or', 'np.bitwise_or', (['trace_set1', 'trace_set2'], {}), '(trace_set1, trace_set2)\n', (14506, 14530), True, 'import numpy as np\n'), ((18436, 18448), 'gc.collect', 'gc.collect', ([], {}), '()\n', (18446, 18448), False, 'import gc\n'), ((21646, 21675), 'numpy.prod', 'np.prod', (["node[key + '_shape']"], {}), "(node[key + '_shape'])\n", (21653, 21675), True, 'import numpy as np\n'), ((15626, 15650), 'numpy.unpackbits', 'np.unpackbits', (['intersect'], {}), '(intersect)\n', (15639, 15650), True, 'import numpy as np\n'), ((15684, 15704), 'numpy.unpackbits', 'np.unpackbits', (['union'], {}), '(union)\n', (15697, 15704), True, 'import numpy as np\n'), ((19448, 19477), 'numpy.prod', 'np.prod', (["node[key + '_shape']"], {}), "(node[key + '_shape'])\n", (19455, 19477), True, 'import numpy as np\n'), ((19795, 19824), 'numpy.prod', 'np.prod', (["node[key + '_shape']"], {}), "(node[key + '_shape'])\n", (19802, 19824), True, 'import numpy as np\n'), ((7553, 7583), 'numpy.unpackbits', 'np.unpackbits', (['node_trace[key]'], {}), '(node_trace[key])\n', (7566, 7583), True, 'import numpy as np\n'), ((8120, 8150), 'numpy.unpackbits', 'np.unpackbits', (['node_trace[key]'], {}), '(node_trace[key])\n', (8133, 8150), True, 'import numpy as np\n'), ((8875, 8911), 'numpy.unpackbits', 'np.unpackbits', (['node_class_trace[key]'], {}), '(node_class_trace[key])\n', (8888, 8911), True, 'import numpy as np\n'), ((10380, 10404), 'numpy.unpackbits', 'np.unpackbits', (['intersect'], {}), '(intersect)\n', (10393, 10404), True, 'import numpy as np\n'), ((10440, 10464), 'numpy.unpackbits', 'np.unpackbits', (['trace_set'], {}), '(trace_set)\n', (10453, 10464), True, 'import numpy as np\n'), ((11679, 11715), 'numpy.unpackbits', 'np.unpackbits', (['node_class_trace[key]'], {}), '(node_class_trace[key])\n', (11692, 11715), True, 'import numpy as np\n'), ((14585, 14609), 'numpy.unpackbits', 'np.unpackbits', (['intersect'], {}), '(intersect)\n', (14598, 14609), True, 'import numpy as np\n'), ((14645, 14665), 'numpy.unpackbits', 'np.unpackbits', (['union'], {}), '(union)\n', (14658, 14665), True, 'import numpy as np\n'), ((19679, 19703), 'numpy.unpackbits', 'np.unpackbits', (['node[key]'], {}), '(node[key])\n', (19692, 19703), True, 'import numpy as np\n'), ((20311, 20340), 'numpy.prod', 'np.prod', (["node[key + '_shape']"], {}), "(node[key + '_shape'])\n", (20318, 20340), True, 'import numpy as np\n'), ((21176, 21216), 'numpy.unpackbits', 'np.unpackbits', (['node.attrs[TraceKey.EDGE]'], {}), '(node.attrs[TraceKey.EDGE])\n', (21189, 21216), True, 'import numpy as np\n'), ((21380, 21413), 'numpy.count_nonzero', 'np.count_nonzero', (['traced_edges[1]'], {}), '(traced_edges[1])\n', (21396, 21413), True, 'import numpy as np\n'), ((21416, 21446), 'numpy.count_nonzero', 'np.count_nonzero', (['traced_edges'], {}), '(traced_edges)\n', (21432, 21446), True, 'import numpy as np\n'), ((20267, 20291), 'numpy.unpackbits', 'np.unpackbits', (['node[key]'], {}), '(node[key])\n', (20280, 20291), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def filterClusters(clusters):
# Remove things with angle far from median
for i in range(len(clusters)):
cluster = clusters[i]
if len(cluster) > 9:
median = np.median([facelet[2] for facelet in cluster])
clusters[i] = [facelet for facelet in cluster if abs(facelet[2] - median) < 10]
# Figure out removing which object minimizes total distance from center, until there are 9 objects
for cluster in clusters:
while len(cluster) > 9:
maxSolidity = 0
minIndex = 0
for index in range(len(cluster)):
c = np.vstack(cluster[i][0] for i in range(len(cluster)) if i != index)
area = cv2.contourArea(c)
hull = cv2.convexHull(c)
hull_area = cv2.contourArea(hull)
solidity = float(area)/hull_area
if area < maxSolidity:
maxSolidity = solidity
minIndex = index
del(cluster[minIndex]) | [
"cv2.convexHull",
"numpy.median",
"cv2.contourArea"
] | [((227, 273), 'numpy.median', 'np.median', (['[facelet[2] for facelet in cluster]'], {}), '([facelet[2] for facelet in cluster])\n', (236, 273), True, 'import numpy as np\n'), ((742, 760), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (757, 760), False, 'import cv2\n'), ((784, 801), 'cv2.convexHull', 'cv2.convexHull', (['c'], {}), '(c)\n', (798, 801), False, 'import cv2\n'), ((830, 851), 'cv2.contourArea', 'cv2.contourArea', (['hull'], {}), '(hull)\n', (845, 851), False, 'import cv2\n')] |
import numpy as np
from gym import spaces
from gym import Env
class ObservedPointEnv(Env):
"""
point mass on a 2-D plane
four tasks: move to (-10, -10), (-10, 10), (10, -10), (10, 10)
Problem 1: augment the observation with a one-hot vector encoding the task ID
- change the dimension of the observation space
- augment the observation with a one-hot vector that encodes the task ID
"""
# ======================================================================== #
# ----------PROBLEM 1----------
# ======================================================================== #
# YOUR CODE SOMEWHERE HERE
def __init__(self, num_tasks=1, checkerboard_size=None):
self.tasks = [0, 1, 2, 3][:num_tasks]
self.task_idx = -1
self.reset_task()
self.reset()
self.observation_space = spaces.Box(low=-np.inf,
high=np.inf,
shape=(2 + num_tasks,),
dtype=np.float32)
self.action_space = spaces.Box(low=-0.1,
high=0.1,
shape=(2,),
dtype=np.float32)
def reset_task(self, is_evaluation=False):
# for evaluation, cycle deterministically through all tasks
if is_evaluation:
self.task_idx = (self.task_idx + 1) % len(self.tasks)
# during training, sample tasks randomly
else:
self.task_idx = np.random.randint(len(self.tasks))
self._task = self.tasks[self.task_idx]
goals = [[-1, -1], [-1, 1], [1, -1], [1, 1]]
self._goal = np.array(goals[self.task_idx])*10
def reset(self):
self._state = np.array([0, 0], dtype=np.float32)
return self._get_obs()
def _get_obs(self):
return np.hstack([self._state, np.eye(len(self.tasks))[self.task_idx]])
def step(self, action):
x, y = self._state
# compute reward, add penalty for large actions instead of clipping them
x -= self._goal[0]
y -= self._goal[1]
reward = - (x ** 2 + y ** 2) ** 0.5
# check if task is complete
done = abs(x) < 0.01 and abs(y) < 0.01
# move to next state
self._state = self._state + action
ob = self._get_obs()
return ob, reward, done, dict()
def viewer_setup(self):
print('no viewer')
pass
def render(self, mode='ansi'):
assert mode == 'ansi', 'Error: Human and rgb_array rendering is not supported yet.'
print('current state:', self._state)
def seed(self, seed=None):
assert seed is not None, 'Error: A seed has to be provided.'
np.random.seed = seed
return [seed]
| [
"numpy.array",
"gym.spaces.Box"
] | [((886, 964), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(2 + num_tasks,)', 'dtype': 'np.float32'}), '(low=-np.inf, high=np.inf, shape=(2 + num_tasks,), dtype=np.float32)\n', (896, 964), False, 'from gym import spaces\n'), ((1125, 1185), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-0.1)', 'high': '(0.1)', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-0.1, high=0.1, shape=(2,), dtype=np.float32)\n', (1135, 1185), False, 'from gym import spaces\n'), ((1836, 1870), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float32'}), '([0, 0], dtype=np.float32)\n', (1844, 1870), True, 'import numpy as np\n'), ((1758, 1788), 'numpy.array', 'np.array', (['goals[self.task_idx]'], {}), '(goals[self.task_idx])\n', (1766, 1788), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:mnist_softmax
Description : softmax 实现 minst 预测
使用了 sklearn 的 one-hot 编码
参考: https://ask.hellobi.com/blog/DataMiner/4897
Email : <EMAIL>
Date:18-1-13
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
def one_hot(x, dim):
temp = np.zeros(dim)
for index, value in enumerate(x):
temp[index, value] = 1
return temp
def model_train_test(x_data, y_target, lr=0.5, epoch_num=1000):
# model
X = tf.placeholder(data_type, shape=[None, n_features])
y = tf.placeholder(data_type, shape=[None, n_class])
W = tf.Variable(initial_value=tf.zeros([n_features, n_class]))
b = tf.Variable(initial_value=tf.zeros([n_class]))
y1 = tf.matmul(X, W) + b
y_pred = tf.nn.softmax(y1)
# 这里要注意 求和的方向, 维度 axis 的设置
loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_pred), axis=1), axis=0)
# softmax_cross_entropy_with_logits 已经包含 softmax , logits = y1
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y1))
goal = tf.train.GradientDescentOptimizer(lr).minimize(loss)
feed_dict = {X: x_data[0], y: y_target[0]}
feed_dict1 = {X: x_data[1], y: y_target[1]}
# accuracy
# 这里使用 y1, y_pred 其实都可以, softmax 只是做了一个归一化的处理而已, 并不会改变大小关系
# 返回的是索引
y_pred_label = tf.argmax(y_pred, axis=1)
y_label = tf.argmax(y, axis=1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(y_pred_label, y_label), data_type))
# train
init = tf.global_variables_initializer()
loss_trace, acc_trace = [], []
with tf.Session() as sess:
sess.run(init)
for epoch in range(epoch_num):
sess.run(goal, feed_dict)
loss_t = sess.run(loss, feed_dict)
acc_t = sess.run(accuracy, feed_dict1)
loss_trace.append(loss_t)
acc_trace.append(acc_t)
print('{0:4d}, loss = {1:6f}, acc = {2:6f}'.format(epoch, loss_t, acc_t))
print('mean acc =', np.mean(acc_trace))
return loss_trace, acc_trace
if __name__ == '__main__':
# 数据获取
data, target = load_digits(return_X_y=True)
n_samples, n_features = data.shape
n_class = 10
enc = OneHotEncoder(n_values=n_class, sparse=False)
# 输入必须是一个矩阵
target = enc.fit_transform(target.reshape(-1, 1))
# or
# enc = OneHotEncoder(n_values=n_class)
# # 输入必须是一个矩阵
# target = enc.fit_transform(target.reshape(-1, 1)).toarray()
print(target)
# or
# target = one_hot(target, dim=[n_samples, n_class])
data_type = tf.float32
scaler = StandardScaler().fit(data)
data = scaler.transform(data)
data_train, data_test, target_train, target_test = train_test_split(data, target, test_size=0.3, random_state=5)
loss_tc, acc_tc = model_train_test([data_train, data_test], [target_train, target_test], epoch_num=1500)
# 可视化
plt.style.use('seaborn-whitegrid')
fig, ax = plt.subplots()
ax.plot(loss_tc, label='train loss')
ax.plot(acc_tc, label='test accuracy')
ax.set(title='train loss and test accuracy', xlabel='epoch', ylabel='scale')
ax.legend(loc=0).set_visible(True)
plt.show()
| [
"tensorflow.equal",
"tensorflow.nn.softmax",
"tensorflow.log",
"numpy.mean",
"tensorflow.placeholder",
"tensorflow.Session",
"matplotlib.pyplot.style.use",
"tensorflow.matmul",
"tensorflow.zeros",
"sklearn.model_selection.train_test_split",
"tensorflow.train.GradientDescentOptimizer",
"matplot... | [((551, 564), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (559, 564), True, 'import numpy as np\n'), ((736, 787), 'tensorflow.placeholder', 'tf.placeholder', (['data_type'], {'shape': '[None, n_features]'}), '(data_type, shape=[None, n_features])\n', (750, 787), True, 'import tensorflow as tf\n'), ((796, 844), 'tensorflow.placeholder', 'tf.placeholder', (['data_type'], {'shape': '[None, n_class]'}), '(data_type, shape=[None, n_class])\n', (810, 844), True, 'import tensorflow as tf\n'), ((1009, 1026), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['y1'], {}), '(y1)\n', (1022, 1026), True, 'import tensorflow as tf\n'), ((1563, 1588), 'tensorflow.argmax', 'tf.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (1572, 1588), True, 'import tensorflow as tf\n'), ((1603, 1623), 'tensorflow.argmax', 'tf.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (1612, 1623), True, 'import tensorflow as tf\n'), ((1731, 1764), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1762, 1764), True, 'import tensorflow as tf\n'), ((2329, 2357), 'sklearn.datasets.load_digits', 'load_digits', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (2340, 2357), False, 'from sklearn.datasets import load_digits\n'), ((2424, 2469), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'n_values': 'n_class', 'sparse': '(False)'}), '(n_values=n_class, sparse=False)\n', (2437, 2469), False, 'from sklearn.preprocessing import OneHotEncoder, StandardScaler\n'), ((2917, 2978), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'test_size': '(0.3)', 'random_state': '(5)'}), '(data, target, test_size=0.3, random_state=5)\n', (2933, 2978), False, 'from sklearn.model_selection import train_test_split\n'), ((3103, 3137), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (3116, 3137), True, 'import matplotlib.pyplot as plt\n'), ((3152, 3166), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3164, 3166), True, 'import matplotlib.pyplot as plt\n'), ((3375, 3385), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3383, 3385), True, 'import matplotlib.pyplot as plt\n'), ((976, 991), 'tensorflow.matmul', 'tf.matmul', (['X', 'W'], {}), '(X, W)\n', (985, 991), True, 'import tensorflow as tf\n'), ((1809, 1821), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1819, 1821), True, 'import tensorflow as tf\n'), ((879, 910), 'tensorflow.zeros', 'tf.zeros', (['[n_features, n_class]'], {}), '([n_features, n_class])\n', (887, 910), True, 'import tensorflow as tf\n'), ((946, 965), 'tensorflow.zeros', 'tf.zeros', (['[n_class]'], {}), '([n_class])\n', (954, 965), True, 'import tensorflow as tf\n'), ((1304, 1341), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['lr'], {}), '(lr)\n', (1337, 1341), True, 'import tensorflow as tf\n'), ((1662, 1693), 'tensorflow.equal', 'tf.equal', (['y_pred_label', 'y_label'], {}), '(y_pred_label, y_label)\n', (1670, 1693), True, 'import tensorflow as tf\n'), ((2217, 2235), 'numpy.mean', 'np.mean', (['acc_trace'], {}), '(acc_trace)\n', (2224, 2235), True, 'import numpy as np\n'), ((2801, 2817), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2815, 2817), False, 'from sklearn.preprocessing import OneHotEncoder, StandardScaler\n'), ((1103, 1117), 'tensorflow.log', 'tf.log', (['y_pred'], {}), '(y_pred)\n', (1109, 1117), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
import sys, os
import gdal
import numpy as np
from gdalconst import GDT_Float32
from osgeo import osr
from netCDF4 import Dataset
def createImgCAMS(inputPath):
driver = gdal.GetDriverByName('GTiff')
ncfile = Dataset(inputPath, 'r')
data = ncfile.variables['so2_conc'][:]
xSize = data.shape[3]
ySize = data.shape[2]
numBands = data.shape[1]
timestamps = data.shape[0]
lat = ncfile.variables['latitude'][:]
lon = ncfile.variables['longitude'][:]
day = ncfile.variables['time'].long_name.replace('ANALYSIS time from ', '')
ncfile.close()
outFileList = []
for t in range(timestamps):
filename = 'CAMS_SO2_' + day + '.' + str(t).zfill(2) + '0000.tif'
dst_ds = driver.Create(filename, xSize, ySize, numBands, GDT_Float32)
lat = map(lambda x: x-180 if x > 90 else x, lat)
lon = map(lambda x: x-360 if x > 180 else x, lon)
lower_left = [lon[0], lat[-1]]
upper_right = [lon[-1], lat[0]]
for i in range(numBands):
band = data[t,i,:,:]
dst_ds.GetRasterBand(i+1).WriteArray(band)
dst_ds.GetRasterBand(i+1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(i+1).ComputeStatistics(False)
pixelWidth = abs((lower_left[0]-upper_right[0]) / xSize)
pixelHeight = abs((lower_left[1]-upper_right[1]) / ySize)
dst_ds.SetGeoTransform([lower_left[0], pixelWidth, 0, upper_right[1], 0, -pixelHeight])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
dst_ds.SetProjection(srs.ExportToWkt())
time = day[:4] + '-' + day[4:6] + '-' + day[6:8] + 'T' + str(t).zfill(2) + ':00:00Z'
dst_ds.SetMetadataItem('TIME_START', str(time))
dst_ds.SetMetadataItem('TIME_END', str(time))
dst_ds.SetMetadataItem('GLOBAL_MIN', str(np.amin(data[:])))
dst_ds.SetMetadataItem('GLOBAL_MAX', str(np.amax(data[:])))
dst_ds.SetMetadataItem('VERTICAL_LEVELS_NUMBER', str(1))
dst_ds = None
outFileList.append(filename)
return outFileList
if __name__ == '__main__':
if len(sys.argv) > 2:
sys.exit('\nUsage: %s CAMS_file(s) \n' % sys.argv[0])
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: Path %s was not found\n' % sys.argv[0])
inputPaths = sys.argv[1:]
for inputPath in inputPaths:
createImgCAMS(inputPath)
exit(0)
| [
"os.path.exists",
"numpy.amin",
"gdal.GetDriverByName",
"osgeo.osr.SpatialReference",
"netCDF4.Dataset",
"sys.exit",
"numpy.amax"
] | [((197, 226), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (217, 226), False, 'import gdal\n'), ((245, 268), 'netCDF4.Dataset', 'Dataset', (['inputPath', '"""r"""'], {}), "(inputPath, 'r')\n", (252, 268), False, 'from netCDF4 import Dataset\n'), ((2320, 2375), 'sys.exit', 'sys.exit', (['("""\nUsage: %s CAMS_file(s) \n""" % sys.argv[0])'], {}), '("""\nUsage: %s CAMS_file(s) \n""" % sys.argv[0])\n', (2328, 2375), False, 'import sys, os\n'), ((1573, 1595), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (1593, 1595), False, 'from osgeo import osr\n'), ((2399, 2426), 'os.path.exists', 'os.path.exists', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2413, 2426), False, 'import sys, os\n'), ((2440, 2500), 'sys.exit', 'sys.exit', (['("""\nERROR: Path %s was not found\n""" % sys.argv[0])'], {}), '("""\nERROR: Path %s was not found\n""" % sys.argv[0])\n', (2448, 2500), False, 'import sys, os\n'), ((1982, 1998), 'numpy.amin', 'np.amin', (['data[:]'], {}), '(data[:])\n', (1989, 1998), True, 'import numpy as np\n'), ((2054, 2070), 'numpy.amax', 'np.amax', (['data[:]'], {}), '(data[:])\n', (2061, 2070), True, 'import numpy as np\n')] |
"""Methods for geodetic calculations."""
import os
import numpy
import srtm
import geopy
from geopy.distance import GeodesicDistance
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
RADIANS_TO_DEGREES = 180. / numpy.pi
DEGREES_TO_RADIANS = numpy.pi / 180
MIN_LATITUDE_DEG = -90.
MAX_LATITUDE_DEG = 90.
MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG = -180.
MAX_LONGITUDE_NEGATIVE_IN_WEST_DEG = 180.
MIN_LONGITUDE_POSITIVE_IN_WEST_DEG = 0.
MAX_LONGITUDE_POSITIVE_IN_WEST_DEG = 360.
POSITIVE_LONGITUDE_ARG = 'positive'
NEGATIVE_LONGITUDE_ARG = 'negative'
EITHER_SIGN_LONGITUDE_ARG = 'either'
VALID_LONGITUDE_SIGN_ARGS = [
POSITIVE_LONGITUDE_ARG, NEGATIVE_LONGITUDE_ARG, EITHER_SIGN_LONGITUDE_ARG]
class ElevationFileHandler:
"""File-handler for elevation data.
This class mimics the class `FileHandler` in main.py of the `srtm` package.
"""
working_dir_name = ''
def __init__(self, working_dir_name=None):
"""Creates new instance.
:param working_dir_name: Path to working directory. Elevation files
will be read from here and, if necessary, downloaded to here. If
`working_dir_name is None`, will try to create subdirectory
".cache/srtm" in the home directory.
:raises: ValueError: if `working_dir_name is None` and this method
cannot create ".cache/srtm" in the home directory.
"""
if working_dir_name is None:
if 'HOME' in os.environ:
top_working_dir_name = os.environ['HOME']
elif 'HOMEPATH' in os.environ:
top_working_dir_name = os.environ['HOMEPATH']
else:
raise ValueError('Cannot find home directory.')
working_dir_name = '{0:s}/.cache/srtm'.format(top_working_dir_name)
file_system_utils.mkdir_recursive_if_necessary(
directory_name=working_dir_name)
self.working_dir_name = working_dir_name
def get_srtm_dir(self):
"""Returns path to working directory.
:return: working_dir_name: See doc for `__init__`.
"""
return self.working_dir_name
def exists(self, file_name):
"""Returns flag, indicating whether or not a file exists.
:param file_name: Pathless file name.
:return: does_file_exist: Boolean flag.
"""
full_file_name = '{0:s}/{1:s}'.format(self.get_srtm_dir(), file_name)
return os.path.isfile(full_file_name)
def write(self, file_name, contents):
"""Writes elevation file to working directory.
:param file_name: Pathless file name.
:param contents: Stuff to be written.
"""
full_file_name = '{0:s}/{1:s}'.format(self.get_srtm_dir(), file_name)
with open(full_file_name, 'wb') as f:
f.write(contents)
def read(self, file_name):
"""Reads elevation file from working directory.
:param file_name: Pathless file name.
:return: contents: Stuff contained in file.
"""
full_file_name = '{0:s}/{1:s}'.format(self.get_srtm_dir(), file_name)
with open(full_file_name, 'rb') as f:
return f.read()
def _get_elevation(
latitude_deg, longitude_deg, srtm_data_object=None,
working_dir_name=None):
"""Gets elevation at a single point.
WARNING: Input longitudes in western hemisphere must be negative.
If `srtm_data_object is None`, it will be created on the fly.
:param latitude_deg: Latitude (deg N).
:param longitude_deg: Longitude (deg E).
:param srtm_data_object: Instance of `srtm.data.GeoElevationData`.
:param working_dir_name: See doc for `__init__` in class
`ElevationFileHandler`.
:return: elevation_m_asl: Elevation (metres above sea level).
:return: srtm_data_object: Instance of `srtm.data.GeoElevationData`.
"""
if srtm_data_object is None:
srtm_data_object = srtm.get_data(
file_handler=ElevationFileHandler(working_dir_name))
elevation_m_asl = srtm_data_object.get_elevation(
latitude=latitude_deg, longitude=longitude_deg)
# TODO(thunderhoser): I am concerned about this hack.
if elevation_m_asl is None:
elevation_m_asl = 0.
return elevation_m_asl, srtm_data_object
def find_invalid_latitudes(latitudes_deg):
"""Returns array indices of invalid latitudes.
:param latitudes_deg: 1-D numpy array of latitudes (deg N).
:return: invalid_indices: 1-D numpy array with array indices of invalid
latitudes.
"""
error_checking.assert_is_real_numpy_array(latitudes_deg)
error_checking.assert_is_numpy_array(latitudes_deg, num_dimensions=1)
valid_flags = numpy.logical_and(
latitudes_deg >= MIN_LATITUDE_DEG, latitudes_deg <= MAX_LATITUDE_DEG)
return numpy.where(numpy.invert(valid_flags))[0]
def find_invalid_longitudes(
longitudes_deg, sign_in_western_hemisphere=POSITIVE_LONGITUDE_ARG):
"""Returns array indices of invalid longitudes.
:param longitudes_deg: 1-D numpy array of longitudes (deg E).
:param sign_in_western_hemisphere: Required sign in western hemisphere. May
be "positive", "negative", or "either".
:return: invalid_indices: 1-D numpy array with array indices of invalid
longitudes.
:raises: ValueError: if `sign_in_western_hemisphere` is not one of the 3
aforelisted options.
"""
error_checking.assert_is_real_numpy_array(longitudes_deg)
error_checking.assert_is_numpy_array(longitudes_deg, num_dimensions=1)
error_checking.assert_is_string(sign_in_western_hemisphere)
if sign_in_western_hemisphere == POSITIVE_LONGITUDE_ARG:
valid_flags = numpy.logical_and(
longitudes_deg >= MIN_LONGITUDE_POSITIVE_IN_WEST_DEG,
longitudes_deg <= MAX_LONGITUDE_POSITIVE_IN_WEST_DEG)
elif sign_in_western_hemisphere == NEGATIVE_LONGITUDE_ARG:
valid_flags = numpy.logical_and(
longitudes_deg >= MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG,
longitudes_deg <= MAX_LONGITUDE_NEGATIVE_IN_WEST_DEG)
elif sign_in_western_hemisphere == EITHER_SIGN_LONGITUDE_ARG:
valid_flags = numpy.logical_and(
longitudes_deg >= MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG,
longitudes_deg <= MAX_LONGITUDE_POSITIVE_IN_WEST_DEG)
else:
error_string = (
'\n\n{0:s}Valid options for `sign_in_western_hemisphere` are listed'
' above and do not include "{1:s}".'
).format(str(VALID_LONGITUDE_SIGN_ARGS), sign_in_western_hemisphere)
raise ValueError(error_string)
return numpy.where(numpy.invert(valid_flags))[0]
def get_latlng_centroid(latitudes_deg, longitudes_deg, allow_nan=True):
"""Finds centroid of lat-long points.
P = number of points
:param latitudes_deg: length-P numpy array of latitudes (deg N).
:param longitudes_deg: length-P numpy array of longitudes (deg E).
:param allow_nan: Boolean flag. If True, input arrays may contain NaN's
(however, NaN's must occur at the exact same positions in the two
arrays).
:return: centroid_lat_deg: Centroid latitude (deg N).
:return: centroid_lng_deg: Centroid longitude (deg E).
:raises: ValueError: if allow_nan = True but NaN's do not occur at the same
positions in the two arrays.
"""
error_checking.assert_is_boolean(allow_nan)
error_checking.assert_is_valid_lat_numpy_array(latitudes_deg, allow_nan)
error_checking.assert_is_numpy_array(latitudes_deg, num_dimensions=1)
num_points = len(latitudes_deg)
longitudes_deg = lng_conversion.convert_lng_positive_in_west(
longitudes_deg, allow_nan)
error_checking.assert_is_numpy_array(
longitudes_deg, exact_dimensions=numpy.array([num_points]))
nan_latitude_indices = numpy.where(numpy.isnan(latitudes_deg))[0]
nan_longitude_indices = numpy.where(numpy.isnan(longitudes_deg))[0]
if not numpy.array_equal(nan_latitude_indices, nan_longitude_indices):
error_string = (
'\nNaN''s occur at the following positions in `latitudes_deg`:\n' +
str(nan_latitude_indices) +
'\nand the following positions in `longitudes_deg`:\n' +
str(nan_longitude_indices) +
'\nNaN''s should occur at the same positions in the two arrays.')
raise ValueError(error_string)
return numpy.nanmean(latitudes_deg), numpy.nanmean(longitudes_deg)
def get_elevations(latitudes_deg, longitudes_deg, working_dir_name=None):
"""Returns elevation of each point.
N = number of points
:param latitudes_deg: length-N numpy array of latitudes (deg N).
:param longitudes_deg: length-N numpy array of longitudes (deg E).
:param working_dir_name: See doc for `__init__` in class
`ElevationFileHandler`.
:return: elevations_m_asl: length-N numpy array of elevations (metres above
sea level).
"""
error_checking.assert_is_valid_lat_numpy_array(latitudes_deg)
error_checking.assert_is_numpy_array(latitudes_deg, num_dimensions=1)
num_points = len(latitudes_deg)
longitudes_deg = lng_conversion.convert_lng_negative_in_west(
longitudes_deg, allow_nan=False)
error_checking.assert_is_numpy_array(
longitudes_deg, exact_dimensions=numpy.array([num_points]))
srtm_data_object = None
elevations_m_asl = numpy.full(num_points, numpy.nan)
for i in range(num_points):
elevations_m_asl[i], srtm_data_object = _get_elevation(
latitude_deg=latitudes_deg[i], longitude_deg=longitudes_deg[i],
srtm_data_object=srtm_data_object,
working_dir_name=working_dir_name)
return elevations_m_asl
def start_points_and_displacements_to_endpoints(
start_latitudes_deg, start_longitudes_deg, scalar_displacements_metres,
geodetic_bearings_deg):
"""Computes endpoint from each start point and displacement.
:param start_latitudes_deg: numpy array with latitudes (deg N) of start
points.
:param start_longitudes_deg: equivalent-size numpy array with longitudes
(deg E) of start points.
:param scalar_displacements_metres: equivalent-size numpy array of scalar
displacements.
:param geodetic_bearings_deg: equivalent-size numpy array of geodetic
bearings (from start point to end point, measured clockwise from due
north).
:return: end_latitudes_deg: equivalent-size numpy array with latitudes
(deg N) of endpoints.
:return: end_longitudes_deg: equivalent-size numpy array with longitudes
(deg E) of endpoints.
"""
error_checking.assert_is_valid_lat_numpy_array(
start_latitudes_deg, allow_nan=False)
start_longitudes_deg = lng_conversion.convert_lng_positive_in_west(
start_longitudes_deg, allow_nan=False)
error_checking.assert_is_numpy_array(
start_longitudes_deg,
exact_dimensions=numpy.array(start_latitudes_deg.shape))
error_checking.assert_is_geq_numpy_array(scalar_displacements_metres, 0.)
error_checking.assert_is_numpy_array(
scalar_displacements_metres,
exact_dimensions=numpy.array(start_latitudes_deg.shape))
error_checking.assert_is_geq_numpy_array(geodetic_bearings_deg, 0.)
error_checking.assert_is_leq_numpy_array(geodetic_bearings_deg, 360.)
error_checking.assert_is_numpy_array(
geodetic_bearings_deg,
exact_dimensions=numpy.array(start_latitudes_deg.shape))
end_latitudes_deg = numpy.full(start_latitudes_deg.shape, numpy.nan)
end_longitudes_deg = numpy.full(start_latitudes_deg.shape, numpy.nan)
num_points = start_latitudes_deg.size
for i in range(num_points):
this_start_point_object = geopy.Point(
start_latitudes_deg.flat[i], start_longitudes_deg.flat[i])
this_end_point_object = GeodesicDistance(
meters=scalar_displacements_metres.flat[i]).destination(
this_start_point_object, geodetic_bearings_deg.flat[i])
end_latitudes_deg.flat[i] = this_end_point_object.latitude
end_longitudes_deg.flat[i] = this_end_point_object.longitude
end_longitudes_deg = lng_conversion.convert_lng_positive_in_west(
end_longitudes_deg, allow_nan=False)
return end_latitudes_deg, end_longitudes_deg
def xy_to_scalar_displacements_and_bearings(
x_displacements_metres, y_displacements_metres):
"""For each displacement vector, converts x-y to magnitude and direction.
:param x_displacements_metres: numpy array of eastward displacements.
:param y_displacements_metres: equivalent-size numpy array of northward
displacements.
:return: scalar_displacements_metres: equivalent-size numpy array of total
displacements.
:return: geodetic_bearings_deg: equivalent-size numpy array of geodetic
bearings (from start point to end point, measured clockwise from due
north).
"""
error_checking.assert_is_numpy_array_without_nan(x_displacements_metres)
error_checking.assert_is_numpy_array_without_nan(y_displacements_metres)
error_checking.assert_is_numpy_array(
y_displacements_metres,
exact_dimensions=numpy.array(y_displacements_metres.shape))
scalar_displacements_metres = numpy.sqrt(
x_displacements_metres ** 2 + y_displacements_metres ** 2)
standard_bearings_deg = RADIANS_TO_DEGREES * numpy.arctan2(
y_displacements_metres, x_displacements_metres)
return scalar_displacements_metres, standard_to_geodetic_angles(
standard_bearings_deg)
def scalar_displacements_and_bearings_to_xy(
scalar_displacements_metres, geodetic_bearings_deg):
"""For each displacement vector, converts magnitude and direction to x-y.
:param scalar_displacements_metres: numpy array of total displacements.
:param geodetic_bearings_deg: equivalent-size numpy array of geodetic
bearings (from start point to end point, measured clockwise from due
north).
:return: x_displacements_metres: equivalent-size numpy array of eastward
displacements.
:return: y_displacements_metres: equivalent-size numpy array of northward
displacements.
"""
error_checking.assert_is_geq_numpy_array(scalar_displacements_metres, 0.)
error_checking.assert_is_geq_numpy_array(geodetic_bearings_deg, 0.)
error_checking.assert_is_leq_numpy_array(geodetic_bearings_deg, 360.)
error_checking.assert_is_numpy_array(
geodetic_bearings_deg,
exact_dimensions=numpy.array(scalar_displacements_metres.shape))
standard_angles_radians = DEGREES_TO_RADIANS * geodetic_to_standard_angles(
geodetic_bearings_deg)
return (scalar_displacements_metres * numpy.cos(standard_angles_radians),
scalar_displacements_metres * numpy.sin(standard_angles_radians))
def rotate_displacement_vectors(
x_displacements_metres, y_displacements_metres, ccw_rotation_angle_deg):
"""Rotates each displacement vector by a certain angle.
:param x_displacements_metres: numpy array of eastward displacements.
:param y_displacements_metres: equivalent-size numpy array of northward
displacements.
:param ccw_rotation_angle_deg: Rotation angle (degrees). Each displacement
vector will be rotated counterclockwise by this amount.
:return: x_prime_displacements_metres: equivalent-size numpy array of
"eastward" displacements (in the rotated coordinate system).
:return: y_prime_displacements_metres: equivalent-size numpy array of
"northward" displacements (in the rotated coordinate system).
"""
error_checking.assert_is_numpy_array_without_nan(x_displacements_metres)
error_checking.assert_is_numpy_array_without_nan(y_displacements_metres)
error_checking.assert_is_numpy_array(
y_displacements_metres,
exact_dimensions=numpy.array(y_displacements_metres.shape))
error_checking.assert_is_greater(ccw_rotation_angle_deg, -360.)
error_checking.assert_is_less_than(ccw_rotation_angle_deg, 360.)
ccw_rotation_angle_rad = DEGREES_TO_RADIANS * ccw_rotation_angle_deg
rotation_matrix = numpy.array([
[numpy.cos(ccw_rotation_angle_rad), -numpy.sin(ccw_rotation_angle_rad)],
[numpy.sin(ccw_rotation_angle_rad), numpy.cos(ccw_rotation_angle_rad)]
])
x_prime_displacements_metres = numpy.full(
x_displacements_metres.shape, numpy.nan)
y_prime_displacements_metres = numpy.full(
x_displacements_metres.shape, numpy.nan)
num_points = x_prime_displacements_metres.size
for i in range(num_points):
this_vector = numpy.transpose(numpy.array(
[x_displacements_metres.flat[i], y_displacements_metres.flat[i]]))
this_vector = numpy.matmul(rotation_matrix, this_vector)
x_prime_displacements_metres.flat[i] = this_vector[0]
y_prime_displacements_metres.flat[i] = this_vector[1]
return x_prime_displacements_metres, y_prime_displacements_metres
def standard_to_geodetic_angles(standard_angles_deg):
"""Converts angles from standard to geodetic format.
"Standard format" = measured counterclockwise from due east
"Geodetic format" = measured clockwise from due north
:param standard_angles_deg: numpy array of standard angles (degrees).
:return: geodetic_angles_deg: equivalent-size numpy array of geodetic
angles.
"""
error_checking.assert_is_numpy_array_without_nan(standard_angles_deg)
return numpy.mod((450. - standard_angles_deg), 360.)
def geodetic_to_standard_angles(geodetic_angles_deg):
"""Converts angles from geodetic to standard format.
For the definitions of "geodetic format" and "standard format," see doc for
`standard_to_geodetic_angles`.
:param geodetic_angles_deg: numpy array of geodetic angles (degrees).
:return: standard_angles_deg: equivalent-size numpy array of standard
angles.
"""
error_checking.assert_is_numpy_array_without_nan(geodetic_angles_deg)
return numpy.mod((450. - geodetic_angles_deg), 360.)
| [
"gewittergefahr.gg_utils.error_checking.assert_is_valid_lat_numpy_array",
"numpy.sqrt",
"gewittergefahr.gg_utils.error_checking.assert_is_numpy_array_without_nan",
"gewittergefahr.gg_utils.error_checking.assert_is_real_numpy_array",
"numpy.invert",
"numpy.nanmean",
"numpy.array",
"numpy.arctan2",
"n... | [((4674, 4730), 'gewittergefahr.gg_utils.error_checking.assert_is_real_numpy_array', 'error_checking.assert_is_real_numpy_array', (['latitudes_deg'], {}), '(latitudes_deg)\n', (4715, 4730), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((4735, 4804), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array', 'error_checking.assert_is_numpy_array', (['latitudes_deg'], {'num_dimensions': '(1)'}), '(latitudes_deg, num_dimensions=1)\n', (4771, 4804), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((4824, 4915), 'numpy.logical_and', 'numpy.logical_and', (['(latitudes_deg >= MIN_LATITUDE_DEG)', '(latitudes_deg <= MAX_LATITUDE_DEG)'], {}), '(latitudes_deg >= MIN_LATITUDE_DEG, latitudes_deg <=\n MAX_LATITUDE_DEG)\n', (4841, 4915), False, 'import numpy\n'), ((5544, 5601), 'gewittergefahr.gg_utils.error_checking.assert_is_real_numpy_array', 'error_checking.assert_is_real_numpy_array', (['longitudes_deg'], {}), '(longitudes_deg)\n', (5585, 5601), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((5606, 5676), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array', 'error_checking.assert_is_numpy_array', (['longitudes_deg'], {'num_dimensions': '(1)'}), '(longitudes_deg, num_dimensions=1)\n', (5642, 5676), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((5681, 5740), 'gewittergefahr.gg_utils.error_checking.assert_is_string', 'error_checking.assert_is_string', (['sign_in_western_hemisphere'], {}), '(sign_in_western_hemisphere)\n', (5712, 5740), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((7485, 7528), 'gewittergefahr.gg_utils.error_checking.assert_is_boolean', 'error_checking.assert_is_boolean', (['allow_nan'], {}), '(allow_nan)\n', (7517, 7528), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((7533, 7605), 'gewittergefahr.gg_utils.error_checking.assert_is_valid_lat_numpy_array', 'error_checking.assert_is_valid_lat_numpy_array', (['latitudes_deg', 'allow_nan'], {}), '(latitudes_deg, allow_nan)\n', (7579, 7605), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((7610, 7679), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array', 'error_checking.assert_is_numpy_array', (['latitudes_deg'], {'num_dimensions': '(1)'}), '(latitudes_deg, num_dimensions=1)\n', (7646, 7679), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((7738, 7808), 'gewittergefahr.gg_utils.longitude_conversion.convert_lng_positive_in_west', 'lng_conversion.convert_lng_positive_in_west', (['longitudes_deg', 'allow_nan'], {}), '(longitudes_deg, allow_nan)\n', (7781, 7808), True, 'from gewittergefahr.gg_utils import longitude_conversion as lng_conversion\n'), ((9079, 9140), 'gewittergefahr.gg_utils.error_checking.assert_is_valid_lat_numpy_array', 'error_checking.assert_is_valid_lat_numpy_array', (['latitudes_deg'], {}), '(latitudes_deg)\n', (9125, 9140), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((9145, 9214), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array', 'error_checking.assert_is_numpy_array', (['latitudes_deg'], {'num_dimensions': '(1)'}), '(latitudes_deg, num_dimensions=1)\n', (9181, 9214), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((9273, 9349), 'gewittergefahr.gg_utils.longitude_conversion.convert_lng_negative_in_west', 'lng_conversion.convert_lng_negative_in_west', (['longitudes_deg'], {'allow_nan': '(False)'}), '(longitudes_deg, allow_nan=False)\n', (9316, 9349), True, 'from gewittergefahr.gg_utils import longitude_conversion as lng_conversion\n'), ((9521, 9554), 'numpy.full', 'numpy.full', (['num_points', 'numpy.nan'], {}), '(num_points, numpy.nan)\n', (9531, 9554), False, 'import numpy\n'), ((10775, 10863), 'gewittergefahr.gg_utils.error_checking.assert_is_valid_lat_numpy_array', 'error_checking.assert_is_valid_lat_numpy_array', (['start_latitudes_deg'], {'allow_nan': '(False)'}), '(start_latitudes_deg,\n allow_nan=False)\n', (10821, 10863), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((10897, 10984), 'gewittergefahr.gg_utils.longitude_conversion.convert_lng_positive_in_west', 'lng_conversion.convert_lng_positive_in_west', (['start_longitudes_deg'], {'allow_nan': '(False)'}), '(start_longitudes_deg, allow_nan\n =False)\n', (10940, 10984), True, 'from gewittergefahr.gg_utils import longitude_conversion as lng_conversion\n'), ((11131, 11205), 'gewittergefahr.gg_utils.error_checking.assert_is_geq_numpy_array', 'error_checking.assert_is_geq_numpy_array', (['scalar_displacements_metres', '(0.0)'], {}), '(scalar_displacements_metres, 0.0)\n', (11171, 11205), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((11354, 11422), 'gewittergefahr.gg_utils.error_checking.assert_is_geq_numpy_array', 'error_checking.assert_is_geq_numpy_array', (['geodetic_bearings_deg', '(0.0)'], {}), '(geodetic_bearings_deg, 0.0)\n', (11394, 11422), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((11426, 11496), 'gewittergefahr.gg_utils.error_checking.assert_is_leq_numpy_array', 'error_checking.assert_is_leq_numpy_array', (['geodetic_bearings_deg', '(360.0)'], {}), '(geodetic_bearings_deg, 360.0)\n', (11466, 11496), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((11659, 11707), 'numpy.full', 'numpy.full', (['start_latitudes_deg.shape', 'numpy.nan'], {}), '(start_latitudes_deg.shape, numpy.nan)\n', (11669, 11707), False, 'import numpy\n'), ((11733, 11781), 'numpy.full', 'numpy.full', (['start_latitudes_deg.shape', 'numpy.nan'], {}), '(start_latitudes_deg.shape, numpy.nan)\n', (11743, 11781), False, 'import numpy\n'), ((12329, 12414), 'gewittergefahr.gg_utils.longitude_conversion.convert_lng_positive_in_west', 'lng_conversion.convert_lng_positive_in_west', (['end_longitudes_deg'], {'allow_nan': '(False)'}), '(end_longitudes_deg, allow_nan=False\n )\n', (12372, 12414), True, 'from gewittergefahr.gg_utils import longitude_conversion as lng_conversion\n'), ((13108, 13180), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array_without_nan', 'error_checking.assert_is_numpy_array_without_nan', (['x_displacements_metres'], {}), '(x_displacements_metres)\n', (13156, 13180), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((13185, 13257), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array_without_nan', 'error_checking.assert_is_numpy_array_without_nan', (['y_displacements_metres'], {}), '(y_displacements_metres)\n', (13233, 13257), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((13435, 13504), 'numpy.sqrt', 'numpy.sqrt', (['(x_displacements_metres ** 2 + y_displacements_metres ** 2)'], {}), '(x_displacements_metres ** 2 + y_displacements_metres ** 2)\n', (13445, 13504), False, 'import numpy\n'), ((14379, 14453), 'gewittergefahr.gg_utils.error_checking.assert_is_geq_numpy_array', 'error_checking.assert_is_geq_numpy_array', (['scalar_displacements_metres', '(0.0)'], {}), '(scalar_displacements_metres, 0.0)\n', (14419, 14453), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((14457, 14525), 'gewittergefahr.gg_utils.error_checking.assert_is_geq_numpy_array', 'error_checking.assert_is_geq_numpy_array', (['geodetic_bearings_deg', '(0.0)'], {}), '(geodetic_bearings_deg, 0.0)\n', (14497, 14525), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((14529, 14599), 'gewittergefahr.gg_utils.error_checking.assert_is_leq_numpy_array', 'error_checking.assert_is_leq_numpy_array', (['geodetic_bearings_deg', '(360.0)'], {}), '(geodetic_bearings_deg, 360.0)\n', (14569, 14599), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((15807, 15879), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array_without_nan', 'error_checking.assert_is_numpy_array_without_nan', (['x_displacements_metres'], {}), '(x_displacements_metres)\n', (15855, 15879), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((15884, 15956), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array_without_nan', 'error_checking.assert_is_numpy_array_without_nan', (['y_displacements_metres'], {}), '(y_displacements_metres)\n', (15932, 15956), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((16103, 16167), 'gewittergefahr.gg_utils.error_checking.assert_is_greater', 'error_checking.assert_is_greater', (['ccw_rotation_angle_deg', '(-360.0)'], {}), '(ccw_rotation_angle_deg, -360.0)\n', (16135, 16167), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((16171, 16236), 'gewittergefahr.gg_utils.error_checking.assert_is_less_than', 'error_checking.assert_is_less_than', (['ccw_rotation_angle_deg', '(360.0)'], {}), '(ccw_rotation_angle_deg, 360.0)\n', (16205, 16236), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((16549, 16600), 'numpy.full', 'numpy.full', (['x_displacements_metres.shape', 'numpy.nan'], {}), '(x_displacements_metres.shape, numpy.nan)\n', (16559, 16600), False, 'import numpy\n'), ((16645, 16696), 'numpy.full', 'numpy.full', (['x_displacements_metres.shape', 'numpy.nan'], {}), '(x_displacements_metres.shape, numpy.nan)\n', (16655, 16696), False, 'import numpy\n'), ((17594, 17663), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array_without_nan', 'error_checking.assert_is_numpy_array_without_nan', (['standard_angles_deg'], {}), '(standard_angles_deg)\n', (17642, 17663), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((17675, 17720), 'numpy.mod', 'numpy.mod', (['(450.0 - standard_angles_deg)', '(360.0)'], {}), '(450.0 - standard_angles_deg, 360.0)\n', (17684, 17720), False, 'import numpy\n'), ((18128, 18197), 'gewittergefahr.gg_utils.error_checking.assert_is_numpy_array_without_nan', 'error_checking.assert_is_numpy_array_without_nan', (['geodetic_angles_deg'], {}), '(geodetic_angles_deg)\n', (18176, 18197), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((18209, 18254), 'numpy.mod', 'numpy.mod', (['(450.0 - geodetic_angles_deg)', '(360.0)'], {}), '(450.0 - geodetic_angles_deg, 360.0)\n', (18218, 18254), False, 'import numpy\n'), ((1926, 2005), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'directory_name': 'working_dir_name'}), '(directory_name=working_dir_name)\n', (1972, 2005), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((2554, 2584), 'os.path.isfile', 'os.path.isfile', (['full_file_name'], {}), '(full_file_name)\n', (2568, 2584), False, 'import os\n'), ((5825, 5955), 'numpy.logical_and', 'numpy.logical_and', (['(longitudes_deg >= MIN_LONGITUDE_POSITIVE_IN_WEST_DEG)', '(longitudes_deg <= MAX_LONGITUDE_POSITIVE_IN_WEST_DEG)'], {}), '(longitudes_deg >= MIN_LONGITUDE_POSITIVE_IN_WEST_DEG, \n longitudes_deg <= MAX_LONGITUDE_POSITIVE_IN_WEST_DEG)\n', (5842, 5955), False, 'import numpy\n'), ((8082, 8144), 'numpy.array_equal', 'numpy.array_equal', (['nan_latitude_indices', 'nan_longitude_indices'], {}), '(nan_latitude_indices, nan_longitude_indices)\n', (8099, 8144), False, 'import numpy\n'), ((8530, 8558), 'numpy.nanmean', 'numpy.nanmean', (['latitudes_deg'], {}), '(latitudes_deg)\n', (8543, 8558), False, 'import numpy\n'), ((8560, 8589), 'numpy.nanmean', 'numpy.nanmean', (['longitudes_deg'], {}), '(longitudes_deg)\n', (8573, 8589), False, 'import numpy\n'), ((11891, 11961), 'geopy.Point', 'geopy.Point', (['start_latitudes_deg.flat[i]', 'start_longitudes_deg.flat[i]'], {}), '(start_latitudes_deg.flat[i], start_longitudes_deg.flat[i])\n', (11902, 11961), False, 'import geopy\n'), ((13563, 13624), 'numpy.arctan2', 'numpy.arctan2', (['y_displacements_metres', 'x_displacements_metres'], {}), '(y_displacements_metres, x_displacements_metres)\n', (13576, 13624), False, 'import numpy\n'), ((16942, 16984), 'numpy.matmul', 'numpy.matmul', (['rotation_matrix', 'this_vector'], {}), '(rotation_matrix, this_vector)\n', (16954, 16984), False, 'import numpy\n'), ((4944, 4969), 'numpy.invert', 'numpy.invert', (['valid_flags'], {}), '(valid_flags)\n', (4956, 4969), False, 'import numpy\n'), ((6061, 6191), 'numpy.logical_and', 'numpy.logical_and', (['(longitudes_deg >= MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG)', '(longitudes_deg <= MAX_LONGITUDE_NEGATIVE_IN_WEST_DEG)'], {}), '(longitudes_deg >= MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG, \n longitudes_deg <= MAX_LONGITUDE_NEGATIVE_IN_WEST_DEG)\n', (6078, 6191), False, 'import numpy\n'), ((6757, 6782), 'numpy.invert', 'numpy.invert', (['valid_flags'], {}), '(valid_flags)\n', (6769, 6782), False, 'import numpy\n'), ((7901, 7926), 'numpy.array', 'numpy.array', (['[num_points]'], {}), '([num_points])\n', (7912, 7926), False, 'import numpy\n'), ((7968, 7994), 'numpy.isnan', 'numpy.isnan', (['latitudes_deg'], {}), '(latitudes_deg)\n', (7979, 7994), False, 'import numpy\n'), ((8039, 8066), 'numpy.isnan', 'numpy.isnan', (['longitudes_deg'], {}), '(longitudes_deg)\n', (8050, 8066), False, 'import numpy\n'), ((9442, 9467), 'numpy.array', 'numpy.array', (['[num_points]'], {}), '([num_points])\n', (9453, 9467), False, 'import numpy\n'), ((11086, 11124), 'numpy.array', 'numpy.array', (['start_latitudes_deg.shape'], {}), '(start_latitudes_deg.shape)\n', (11097, 11124), False, 'import numpy\n'), ((11309, 11347), 'numpy.array', 'numpy.array', (['start_latitudes_deg.shape'], {}), '(start_latitudes_deg.shape)\n', (11320, 11347), False, 'import numpy\n'), ((11594, 11632), 'numpy.array', 'numpy.array', (['start_latitudes_deg.shape'], {}), '(start_latitudes_deg.shape)\n', (11605, 11632), False, 'import numpy\n'), ((13357, 13398), 'numpy.array', 'numpy.array', (['y_displacements_metres.shape'], {}), '(y_displacements_metres.shape)\n', (13368, 13398), False, 'import numpy\n'), ((14697, 14743), 'numpy.array', 'numpy.array', (['scalar_displacements_metres.shape'], {}), '(scalar_displacements_metres.shape)\n', (14708, 14743), False, 'import numpy\n'), ((14899, 14933), 'numpy.cos', 'numpy.cos', (['standard_angles_radians'], {}), '(standard_angles_radians)\n', (14908, 14933), False, 'import numpy\n'), ((14977, 15011), 'numpy.sin', 'numpy.sin', (['standard_angles_radians'], {}), '(standard_angles_radians)\n', (14986, 15011), False, 'import numpy\n'), ((16056, 16097), 'numpy.array', 'numpy.array', (['y_displacements_metres.shape'], {}), '(y_displacements_metres.shape)\n', (16067, 16097), False, 'import numpy\n'), ((16828, 16905), 'numpy.array', 'numpy.array', (['[x_displacements_metres.flat[i], y_displacements_metres.flat[i]]'], {}), '([x_displacements_metres.flat[i], y_displacements_metres.flat[i]])\n', (16839, 16905), False, 'import numpy\n'), ((6300, 6430), 'numpy.logical_and', 'numpy.logical_and', (['(longitudes_deg >= MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG)', '(longitudes_deg <= MAX_LONGITUDE_POSITIVE_IN_WEST_DEG)'], {}), '(longitudes_deg >= MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG, \n longitudes_deg <= MAX_LONGITUDE_POSITIVE_IN_WEST_DEG)\n', (6317, 6430), False, 'import numpy\n'), ((12007, 12067), 'geopy.distance.GeodesicDistance', 'GeodesicDistance', ([], {'meters': 'scalar_displacements_metres.flat[i]'}), '(meters=scalar_displacements_metres.flat[i])\n', (12023, 12067), False, 'from geopy.distance import GeodesicDistance\n'), ((16355, 16388), 'numpy.cos', 'numpy.cos', (['ccw_rotation_angle_rad'], {}), '(ccw_rotation_angle_rad)\n', (16364, 16388), False, 'import numpy\n'), ((16436, 16469), 'numpy.sin', 'numpy.sin', (['ccw_rotation_angle_rad'], {}), '(ccw_rotation_angle_rad)\n', (16445, 16469), False, 'import numpy\n'), ((16471, 16504), 'numpy.cos', 'numpy.cos', (['ccw_rotation_angle_rad'], {}), '(ccw_rotation_angle_rad)\n', (16480, 16504), False, 'import numpy\n'), ((16391, 16424), 'numpy.sin', 'numpy.sin', (['ccw_rotation_angle_rad'], {}), '(ccw_rotation_angle_rad)\n', (16400, 16424), False, 'import numpy\n')] |
""" This file defines the main object that runs experiments. """
import logging
import imp
import os
import os.path
import sys
import copy
import argparse
import threading
import time
import traceback
import matplotlib as mpl
sys.path.append('/'.join(str.split(__file__, '/')[:-2]))
# Add gps/python to path so that imports work.
from gps.utility.data_logger import DataLogger
from gps.sample.sample_list import SampleList
from gps.gui.gps_training_gui import GPSTrainingGUI
mpl.use('Qt4Agg')
class GPSMain(object):
""" Main class to run algorithms and experiments. """
def __init__(self, config, quit_on_end=False):
"""
Initialize GPSMain
Args:
config: Hyperparameters for experiment
quit_on_end: When true, quit automatically on completion
"""
self._quit_on_end = quit_on_end
self._hyperparams = config
self._conditions = config['common']['conditions']
#self._condition = 1
if 'train_conditions' in config['common']:
#False
self._train_idx = config['common']['train_conditions']
self._test_idx = config['common']['test_conditions']
else:
self._train_idx = range(self._conditions)
config['common']['train_conditions'] = config['common']['conditions']
#create a new key in the dictionary common and assign the value 1
self._hyperparams =config
#reinitiallizing the hyperparameters because the config was changed
self._test_idx = self._train_idx
#getting hte train index again
self._data_files_dir = config['common']['data_files_dir']
#getting the data file path from which is stored in the common dic
self.agent = config['agent']['type'](config['agent'])
#here it creat the object from the agent directory
#print(self.agent,'self.agent')
self.data_logger = DataLogger()
#here the gui files leads to the
self.gui = GPSTrainingGUI(config['common']) if config['gui_on'] else None
#again with they change the config file now adding object to the dic
config['algorithm']['agent'] = self.agent
self.algorithm = config['algorithm']['type'](config['algorithm'])
#print(config['algorithm']['type'](config['algorithm']),'self.algo')
# gps.algorithm.algorithm_traj_opt.AlgorithmTrajOpt is the algorithm which is used
def run(self, itr_load=None):
"""
Run training by iteratively sampling and taking an iteration.
Args:
itr_load: If specified, loads algorithm state from that
iteration, and resumes training at the next iteration.
Returns: None
"""
#this is the callable function which is used in the main.
try:
#self._initialize is the function which opens the GUI and the itr_stat
#this itr_start is the provided by the user and is reaassigned into
#itr_start
itr_start = self._initialize(itr_load)
#print(itr_start,'iteration start',self._initialize,'this is to initialize some')
for itr in range(itr_start, self._hyperparams['iterations']):
#basically the iteration starts from the iteration given by run
#by the user and the ends at iteration in the config of the
#hyperparameters file in this case 5
for cond in self._train_idx:
# this is the conditions offered in the training index in
# case point = 0
for i in range(self._hyperparams['num_samples']):
#again this is 5
print('wow wow wow wow wow wow wow wow')
self._take_sample(itr, cond, i)
traj_sample_lists = [
#this function in the agent super class, this function instantiates the sample.py file
self.agent.get_samples(cond, -self._hyperparams['num_samples'])
for cond in self._train_idx
]
print(traj_sample_lists,'Ed-sheerens')
# Clear agent samples.
#this function is in the agent superclass.
self.agent.clear_samples()
self._take_iteration(itr, traj_sample_lists)
pol_sample_lists = self._take_policy_samples()
self._log_data(itr, traj_sample_lists, pol_sample_lists)
except Exception as e:
traceback.print_exception(*sys.exc_info())
finally:
self._end()
def test_policy(self, itr, N):
"""
Take N policy samples of the algorithm state at iteration itr,
for testing the policy to see how it is behaving.
(Called directly from the command line --policy flag).
Args:
itr: the iteration from which to take policy samples
N: the number of policy samples to take
Returns: None
"""
algorithm_file = self._data_files_dir + 'algorithm_itr_%02d.pkl' % itr
self.algorithm = self.data_logger.unpickle(algorithm_file)
if self.algorithm is None:
print("Error: cannot find '%s.'" % algorithm_file)
os._exit(1) # called instead of sys.exit(), since t
traj_sample_lists = self.data_logger.unpickle(self._data_files_dir +
('traj_sample_itr_%02d.pkl' % itr))
pol_sample_lists = self._take_policy_samples(N)
self.data_logger.pickle(
self._data_files_dir + ('pol_sample_itr_%02d.pkl' % itr),
copy.copy(pol_sample_lists)
)
if self.gui:
self.gui.update(itr, self.algorithm, self.agent,
traj_sample_lists, pol_sample_lists)
self.gui.set_status_text(('Took %d policy sample(s) from ' +
'algorithm state at iteration %d.\n' +
'Saved to: data_files/pol_sample_itr_%02d.pkl.\n') % (N, itr, itr))
def _initialize(self, itr_load):
"""
Initialize from the specified iteration.
Args:
itr_load: If specified, loads algorithm state from that
iteration, and resumes training at the next iteration.
Returns:
itr_start: Iteration to start from.
"""
if itr_load is None:
if self.gui:
self.gui.set_status_text('Press \'go\' to begin.')
return 0
else:
algorithm_file = self._data_files_dir + 'algorithm_itr_%02d.pkl' % itr_load
self.algorithm = self.data_logger.unpickle(algorithm_file)
if self.algorithm is None:
print("Error: cannot find '%s.'" % algorithm_file)
os._exit(1) # called instead of sys.exit(), since this is in a thread
if self.gui:
traj_sample_lists = self.data_logger.unpickle(self._data_files_dir +
('traj_sample_itr_%02d.pkl' % itr_load))
if self.algorithm.cur[0].pol_info:
pol_sample_lists = self.data_logger.unpickle(self._data_files_dir +
('pol_sample_itr_%02d.pkl' % itr_load))
else:
pol_sample_lists = None
self.gui.set_status_text(
('Resuming training from algorithm state at iteration %d.\n' +
'Press \'go\' to begin.') % itr_load)
return itr_load + 1
def _take_sample(self, itr, cond, i):
"""
Collect a sample from the agent.
Args:
itr: Iteration number.
cond: Condition number.
i: Sample number.
Returns: None
"""
if self.algorithm._hyperparams['sample_on_policy'] \
and self.algorithm.iteration_count > 0:
print(self.algorithm.iteration_count)
pol = self.algorithm.policy_opt.policy
else:
#print(self.algorithm.iteration_count)
pol = self.algorithm.cur[cond].traj_distr
print(self.algorithm.cur[cond].traj_distr,'what is the this dis_traj')
#print(self.algorithm.cur,'this is the pol',cond,'cond')
if self.gui:
self.gui.set_image_overlays(cond) # Must call for each new cond.
redo = True
while redo:
while self.gui.mode in ('wait', 'request', 'process'):
if self.gui.mode in ('wait', 'process'):
time.sleep(0.01)
continue
# 'request' mode.
if self.gui.request == 'reset':
try:
self.agent.reset(cond)
except NotImplementedError:
self.gui.err_msg = 'Agent reset unimplemented.'
elif self.gui.request == 'fail':
self.gui.err_msg = 'Cannot fail before sampling.'
self.gui.process_mode() # Complete request.
self.gui.set_status_text(
'Sampling: iteration %d, condition %d, sample %d.' %
(itr, cond, i)
)
#sampling is done in the agent node, here agent is agent_box2D and agent is the super class.
self.agent.sample(
pol, cond,
verbose=(i < self._hyperparams['verbose_trials'])
)
if self.gui.mode == 'request' and self.gui.request == 'fail':
redo = True
self.gui.process_mode()
self.agent.delete_last_sample(cond)
else:
redo = False
else:
self.agent.sample(
pol, cond,
verbose=(i < self._hyperparams['verbose_trials'])
)
def _take_iteration(self, itr, sample_lists):
"""
Take an iteration of the algorithm.
Args:
itr: Iteration number.
Returns: None
"""
if self.gui:
self.gui.set_status_text('Calculating.')
self.gui.start_display_calculating()
# this iteration is in the metaclass or parent class algorithm in the gps directory, here the sample_list is the data
#data that is collected by runnning the simulation for 5 steps.
self.algorithm.iteration(sample_lists)
if self.gui:
self.gui.stop_display_calculating()
def _take_policy_samples(self, N=None):
"""
Take samples from the policy to see how it's doing.
Args:
N : number of policy samples to take per condition
Returns: None
"""
if 'verbose_policy_trials' not in self._hyperparams:
# AlgorithmTrajOpt
return None
verbose = self._hyperparams['verbose_policy_trials']
if self.gui:
self.gui.set_status_text('Taking policy samples.')
pol_samples = [[None] for _ in range(len(self._test_idx))]
# Since this isn't noisy, just take one sample.
# TODO: Make this noisy? Add hyperparam?
# TODO: Take at all conditions for GUI?
for cond in range(len(self._test_idx)):
pol_samples[cond][0] = self.agent.sample(
self.algorithm.policy_opt.policy, self._test_idx[cond],
verbose=verbose, save=False, noisy=False)
return [SampleList(samples) for samples in pol_samples]
def _log_data(self, itr, traj_sample_lists, pol_sample_lists=None):
"""
Log data and algorithm, and update the GUI.
Args:
itr: Iteration number.
traj_sample_lists: trajectory samples as SampleList object
pol_sample_lists: policy samples as SampleList object
Returns: None
"""
if self.gui:
self.gui.set_status_text('Logging data and updating GUI.')
self.gui.update(itr, self.algorithm, self.agent,
traj_sample_lists, pol_sample_lists)
self.gui.save_figure(
self._data_files_dir + ('figure_itr_%02d.png' % itr)
)
if 'no_sample_logging' in self._hyperparams['common']:
return
self.data_logger.pickle(
self._data_files_dir + ('algorithm_itr_%02d.pkl' % itr),
copy.copy(self.algorithm)
)
self.data_logger.pickle(
self._data_files_dir + ('traj_sample_itr_%02d.pkl' % itr),
copy.copy(traj_sample_lists)
)
if pol_sample_lists:
self.data_logger.pickle(
self._data_files_dir + ('pol_sample_itr_%02d.pkl' % itr),
copy.copy(pol_sample_lists)
)
def _end(self):
""" Finish running and exit. """
if self.gui:
self.gui.set_status_text('Training complete.')
self.gui.end_mode()
if self._quit_on_end:
# Quit automatically (for running sequential expts)
os._exit(1)
def main():
""" Main function to be run. """
parser = argparse.ArgumentParser(description='Run the Guided Policy Search algorithm.')
parser.add_argument('experiment', type=str,
help='experiment name')
parser.add_argument('-n', '--new', action='store_true',
help='create new experiment')
parser.add_argument('-t', '--targetsetup', action='store_true',
help='run target setup')
parser.add_argument('-r', '--resume', metavar='N', type=int,
help='resume training from iter N')
parser.add_argument('-p', '--policy', metavar='N', type=int,
help='take N policy samples (for BADMM/MDGPS only)')
parser.add_argument('-s', '--silent', action='store_true',
help='silent debug print outs')
parser.add_argument('-q', '--quit', action='store_true',
help='quit GUI automatically when finished')
args = parser.parse_args()
#here args means that input we give from the terminal here
exp_name = args.experiment
resume_training_itr = args.resume
test_policy_N = args.policy
from gps import __file__ as gps_filepath
#this adds all the files in the directory
gps_filepath = os.path.abspath(gps_filepath)
#in this case only the __init__.py is this
gps_dir = '/'.join(str.split(gps_filepath, '/')[:-3]) + '/'
#this is the current directory
exp_dir = gps_dir + 'experiments/' + exp_name + '/'
hyperparams_file = exp_dir + 'hyperparams.py'
#here the code goes to the experiments folder in the gps and then the file corresponding
#to the experiment given in the arg_name
#hyperparameters are in the hyperparameters file
if args.silent:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
else:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
if args.new:
from shutil import copy
if os.path.exists(exp_dir):
sys.exit("Experiment '%s' already exists.\nPlease remove '%s'." %
(exp_name, exp_dir))
os.makedirs(exp_dir)
prev_exp_file = '.previous_experiment'
prev_exp_dir = None
try:
with open(prev_exp_file, 'r') as f:
prev_exp_dir = f.readline()
copy(prev_exp_dir + 'hyperparams.py', exp_dir)
if os.path.exists(prev_exp_dir + 'targets.npz'):
copy(prev_exp_dir + 'targets.npz', exp_dir)
except IOError as e:
with open(hyperparams_file, 'w') as f:
f.write('# To get started, copy over hyperparams from another experiment.\n' +
'# Visit rll.berkeley.edu/gps/hyperparams.html for documentation.')
with open(prev_exp_file, 'w') as f:
f.write(exp_dir)
exit_msg = ("Experiment '%s' created.\nhyperparams file: '%s'" %
(exp_name, hyperparams_file))
if prev_exp_dir and os.path.exists(prev_exp_dir):
exit_msg += "\ncopied from : '%shyperparams.py'" % prev_exp_dir
sys.exit(exit_msg)
if not os.path.exists(hyperparams_file):
sys.exit("Experiment '%s' does not exist.\nDid you create '%s'?" %
(exp_name, hyperparams_file))
hyperparams = imp.load_source('hyperparams', hyperparams_file)
if args.targetsetup:
try:
import matplotlib.pyplot as plt
from gps.agent.ros.agent_ros import AgentROS
from gps.gui.target_setup_gui import TargetSetupGUI
agent = AgentROS(hyperparams.config['agent'])
TargetSetupGUI(hyperparams.config['common'], agent)
plt.ioff()
plt.show()
except ImportError:
sys.exit('ROS required for target setup.')
elif test_policy_N:
import random
import numpy as np
import matplotlib.pyplot as plt
seed = hyperparams.config.get('random_seed', 0)
random.seed(seed)
np.random.seed(seed)
data_files_dir = exp_dir + 'data_files/'
data_filenames = os.listdir(data_files_dir)
algorithm_prefix = 'algorithm_itr_'
algorithm_filenames = [f for f in data_filenames if f.startswith(algorithm_prefix)]
current_algorithm = sorted(algorithm_filenames, reverse=True)[0]
current_itr = int(current_algorithm[len(algorithm_prefix):len(algorithm_prefix)+2])
gps = GPSMain(hyperparams.config)
if hyperparams.config['gui_on']:
test_policy = threading.Thread(
target=lambda: gps.test_policy(itr=current_itr, N=test_policy_N)
)
test_policy.daemon = True
test_policy.start()
plt.ioff()
plt.show()
else:
gps.test_policy(itr=current_itr, N=test_policy_N)
else:
import random
import numpy as np
import matplotlib.pyplot as plt
seed = hyperparams.config.get('random_seed', 0)
random.seed(seed)
np.random.seed(seed)
#here the GPS main here the hyperparameters are collection of the dictionary
#refer the hyperparamers file in the experiments folder
# following link gives info regarding the hyperparameters.
# https://github.com/cbfinn/gps/blob/master/docs/hyperparams.md
gps = GPSMain(hyperparams.config, args.quit)
if hyperparams.config['gui_on']:
run_gps = threading.Thread(
target=lambda: gps.run(itr_load=resume_training_itr)
)
run_gps.daemon = True
run_gps.start()
plt.ioff()
plt.show()
else:
gps.run(itr_load=resume_training_itr)
if __name__ == "__main__":
main()
| [
"imp.load_source",
"time.sleep",
"sys.exc_info",
"sys.exit",
"os.path.exists",
"gps.utility.data_logger.DataLogger",
"os.listdir",
"argparse.ArgumentParser",
"gps.agent.ros.agent_ros.AgentROS",
"numpy.random.seed",
"gps.gui.gps_training_gui.GPSTrainingGUI",
"matplotlib.use",
"matplotlib.pypl... | [((481, 498), 'matplotlib.use', 'mpl.use', (['"""Qt4Agg"""'], {}), "('Qt4Agg')\n", (488, 498), True, 'import matplotlib as mpl\n'), ((13238, 13316), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run the Guided Policy Search algorithm."""'}), "(description='Run the Guided Policy Search algorithm.')\n", (13261, 13316), False, 'import argparse\n'), ((14465, 14494), 'os.path.abspath', 'os.path.abspath', (['gps_filepath'], {}), '(gps_filepath)\n', (14480, 14494), False, 'import os\n'), ((16551, 16599), 'imp.load_source', 'imp.load_source', (['"""hyperparams"""', 'hyperparams_file'], {}), "('hyperparams', hyperparams_file)\n", (16566, 16599), False, 'import imp\n'), ((1944, 1956), 'gps.utility.data_logger.DataLogger', 'DataLogger', ([], {}), '()\n', (1954, 1956), False, 'from gps.utility.data_logger import DataLogger\n'), ((14967, 15042), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s:%(message)s', level=logging.INFO)\n", (14986, 15042), False, 'import logging\n'), ((15061, 15137), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n", (15080, 15137), False, 'import logging\n'), ((15200, 15223), 'os.path.exists', 'os.path.exists', (['exp_dir'], {}), '(exp_dir)\n', (15214, 15223), False, 'import os\n'), ((15353, 15373), 'os.makedirs', 'os.makedirs', (['exp_dir'], {}), '(exp_dir)\n', (15364, 15373), False, 'import os\n'), ((16345, 16363), 'sys.exit', 'sys.exit', (['exit_msg'], {}), '(exit_msg)\n', (16353, 16363), False, 'import sys\n'), ((16376, 16408), 'os.path.exists', 'os.path.exists', (['hyperparams_file'], {}), '(hyperparams_file)\n', (16390, 16408), False, 'import os\n'), ((16418, 16522), 'sys.exit', 'sys.exit', (['("""Experiment \'%s\' does not exist.\nDid you create \'%s\'?""" % (exp_name,\n hyperparams_file))'], {}), '("""Experiment \'%s\' does not exist.\nDid you create \'%s\'?""" % (\n exp_name, hyperparams_file))\n', (16426, 16522), False, 'import sys\n'), ((2017, 2049), 'gps.gui.gps_training_gui.GPSTrainingGUI', 'GPSTrainingGUI', (["config['common']"], {}), "(config['common'])\n", (2031, 2049), False, 'from gps.gui.gps_training_gui import GPSTrainingGUI\n'), ((5317, 5328), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (5325, 5328), False, 'import os\n'), ((5666, 5693), 'shutil.copy.copy', 'copy.copy', (['pol_sample_lists'], {}), '(pol_sample_lists)\n', (5675, 5693), False, 'from shutil import copy\n'), ((11558, 11577), 'gps.sample.sample_list.SampleList', 'SampleList', (['samples'], {}), '(samples)\n', (11568, 11577), False, 'from gps.sample.sample_list import SampleList\n'), ((12482, 12507), 'shutil.copy.copy', 'copy.copy', (['self.algorithm'], {}), '(self.algorithm)\n', (12491, 12507), False, 'from shutil import copy\n'), ((12634, 12662), 'shutil.copy.copy', 'copy.copy', (['traj_sample_lists'], {}), '(traj_sample_lists)\n', (12643, 12662), False, 'from shutil import copy\n'), ((15237, 15331), 'sys.exit', 'sys.exit', (['("""Experiment \'%s\' already exists.\nPlease remove \'%s\'.""" % (exp_name,\n exp_dir))'], {}), '("""Experiment \'%s\' already exists.\nPlease remove \'%s\'.""" % (\n exp_name, exp_dir))\n', (15245, 15331), False, 'import sys\n'), ((15567, 15613), 'shutil.copy', 'copy', (["(prev_exp_dir + 'hyperparams.py')", 'exp_dir'], {}), "(prev_exp_dir + 'hyperparams.py', exp_dir)\n", (15571, 15613), False, 'from shutil import copy\n'), ((15629, 15673), 'os.path.exists', 'os.path.exists', (["(prev_exp_dir + 'targets.npz')"], {}), "(prev_exp_dir + 'targets.npz')\n", (15643, 15673), False, 'import os\n'), ((16227, 16255), 'os.path.exists', 'os.path.exists', (['prev_exp_dir'], {}), '(prev_exp_dir)\n', (16241, 16255), False, 'import os\n'), ((16824, 16861), 'gps.agent.ros.agent_ros.AgentROS', 'AgentROS', (["hyperparams.config['agent']"], {}), "(hyperparams.config['agent'])\n", (16832, 16861), False, 'from gps.agent.ros.agent_ros import AgentROS\n'), ((16874, 16925), 'gps.gui.target_setup_gui.TargetSetupGUI', 'TargetSetupGUI', (["hyperparams.config['common']", 'agent'], {}), "(hyperparams.config['common'], agent)\n", (16888, 16925), False, 'from gps.gui.target_setup_gui import TargetSetupGUI\n'), ((16939, 16949), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (16947, 16949), True, 'import matplotlib.pyplot as plt\n'), ((16962, 16972), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16970, 16972), True, 'import matplotlib.pyplot as plt\n'), ((17234, 17251), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (17245, 17251), False, 'import random\n'), ((17260, 17280), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (17274, 17280), True, 'import numpy as np\n'), ((17356, 17382), 'os.listdir', 'os.listdir', (['data_files_dir'], {}), '(data_files_dir)\n', (17366, 17382), False, 'import os\n'), ((18263, 18280), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (18274, 18280), False, 'import random\n'), ((18289, 18309), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (18303, 18309), True, 'import numpy as np\n'), ((6818, 6829), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (6826, 6829), False, 'import os\n'), ((12829, 12856), 'shutil.copy.copy', 'copy.copy', (['pol_sample_lists'], {}), '(pol_sample_lists)\n', (12838, 12856), False, 'from shutil import copy\n'), ((13163, 13174), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (13171, 13174), False, 'import os\n'), ((15691, 15734), 'shutil.copy', 'copy', (["(prev_exp_dir + 'targets.npz')", 'exp_dir'], {}), "(prev_exp_dir + 'targets.npz', exp_dir)\n", (15695, 15734), False, 'from shutil import copy\n'), ((17013, 17055), 'sys.exit', 'sys.exit', (['"""ROS required for target setup."""'], {}), "('ROS required for target setup.')\n", (17021, 17055), False, 'import sys\n'), ((17990, 18000), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (17998, 18000), True, 'import matplotlib.pyplot as plt\n'), ((18013, 18023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18021, 18023), True, 'import matplotlib.pyplot as plt\n'), ((18890, 18900), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (18898, 18900), True, 'import matplotlib.pyplot as plt\n'), ((18913, 18923), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18921, 18923), True, 'import matplotlib.pyplot as plt\n'), ((4599, 4613), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4611, 4613), False, 'import sys\n'), ((8583, 8599), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (8593, 8599), False, 'import time\n')] |
# This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import unittest
import numpy as np
from p3iv_utils.coordinate_transformation import CoordinateTransform
from p3iv_utils_probability.distributions import BivariateNormalDistributionSequence
from p3iv_visualization.motion.plot_array2d import PlotArray2D
from p3iv_visualization.motion.plot_motion_components import PlotMotionComponents
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
class PlotArray2dTest(unittest.TestCase):
def test_array2d(self):
fig, ax = plt.subplots(1, 1)
p = PlotArray2D(ax, "Timesteps")
timesteps_ = np.arange(12) * 0.5
p.initialize(timesteps_)
p.set_x_axis()
p.set_y_axis(-2, 20, increment=3)
motion_array2d = np.zeros(24).reshape(-1, 2)
motion_array2d[:, 0] = 5 * np.ones(12) + np.arange(12)
motion_array2d[:, 1] = 4 * np.ones(12)
p.update_motion_array2d(motion_array2d, index4pin2free=5)
plt.show()
class PlotMotionComponentsTest(unittest.TestCase):
def setUp(self):
self.position = np.array(
[
[4.92560704e01, 1.41206491e-15],
[5.00560704e01, -1.81126815e-15],
[5.08560704e01, 2.98372438e-16],
[5.17232704e01, -1.14838694e-15],
[5.27024704e01, 2.06952511e-15],
[5.37936704e01, -1.87350135e-15],
[5.49999075e01, -1.08940634e-15],
[5.63190916e01, 2.49800181e-15],
[5.77454961e01, -1.29236899e-15],
[5.92709730e01, -2.76861867e-15],
[6.08859243e01, -4.51881314e-16],
[6.25800593e01, -4.71844785e-16],
[6.43429694e01, -4.57966998e-16],
[6.61645485e01, 8.67361738e-16],
]
)
self.velocity = np.array(
[
[0.00000000e00, 0.00000000e00],
[4.00000000e00, -1.61166653e-14],
[4.00000000e00, 1.05482029e-14],
[4.33600000e00, -7.23379689e-15],
[4.89600000e00, 1.60895602e-14],
[5.45600000e00, -1.97151323e-14],
[6.03118564e00, 3.92047506e-15],
[6.59592080e00, 1.79370407e-14],
[7.13202224e00, -1.89518540e-14],
[7.62738442e00, -7.38124839e-15],
[8.07475648e00, 1.15836868e-14],
[8.47067513e00, -9.98173551e-17],
[8.81455030e00, 6.93889390e-17],
[9.10789555e00, 6.62664368e-15],
]
)
self.acceleration = np.array(
[
[0.00000000e00, 0.00000000e00],
[0.00000000e00, 0.00000000e00],
[-1.77635684e-13, 1.33324341e-13],
[1.68000000e00, -8.89099992e-14],
[2.80000000e00, 1.16616786e-13],
[2.80000000e00, -1.79023463e-13],
[2.87592818e00, 1.18178037e-13],
[2.82367581e00, 7.00828284e-14],
[2.68050723e00, -1.84444474e-13],
[2.47681090e00, 5.78530279e-14],
[2.23686028e00, 9.48246758e-14],
[1.97959324e00, -5.84175206e-14],
[1.71937588e00, 8.46031470e-16],
[1.46672621e00, 3.27862737e-14],
]
)
self.jerk = np.array(
[
[0.00000000e00, 0.00000000e00],
[0.00000000e00, 0.00000000e00],
[0.00000000e00, 0.00000000e00],
[8.40000000e00, -1.11117170e-12],
[5.60000000e00, 1.02763392e-12],
[-9.76996262e-12, -1.47820124e-12],
[3.79640891e-01, 1.48600750e-12],
[-2.61261842e-01, -2.40476042e-13],
[-7.15842895e-01, -1.27263651e-12],
[-1.01848163e00, 1.21148751e-12],
[-1.19975314e00, 1.84858239e-13],
[-1.28633516e00, -7.66210982e-13],
[-1.30108684e00, 2.96317760e-13],
[-1.26324832e00, 1.59701211e-13],
]
)
self.timesteps = np.arange(len(self.position)) * 0.5
def test_plot_motion_components(self):
fig = plt.figure(1)
gs = gridspec.GridSpec(3, 1)
gs.update(hspace=0.00)
ax0 = plt.subplot(gs[0, 0])
ax1 = plt.subplot(gs[1, 0])
ax2 = plt.subplot(gs[2, 0])
p = PlotMotionComponents(ax0, ax1, ax2)
p.initialize(self.timesteps)
p.set_labels()
p.update_profile(self.velocity, self.acceleration, self.jerk, index4pin2free=6)
plt.show()
if __name__ == "__main__":
unittest.main()
| [
"numpy.ones",
"p3iv_visualization.motion.plot_array2d.PlotArray2D",
"p3iv_visualization.motion.plot_motion_components.PlotMotionComponents",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.zeros",
"unittest.main",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.su... | [((4869, 4884), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4882, 4884), False, 'import unittest\n'), ((717, 735), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (729, 735), True, 'import matplotlib.pyplot as plt\n'), ((748, 776), 'p3iv_visualization.motion.plot_array2d.PlotArray2D', 'PlotArray2D', (['ax', '"""Timesteps"""'], {}), "(ax, 'Timesteps')\n", (759, 776), False, 'from p3iv_visualization.motion.plot_array2d import PlotArray2D\n'), ((1156, 1166), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1164, 1166), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1732), 'numpy.array', 'np.array', (['[[49.2560704, 1.41206491e-15], [50.0560704, -1.81126815e-15], [50.8560704, \n 2.98372438e-16], [51.7232704, -1.14838694e-15], [52.7024704, \n 2.06952511e-15], [53.7936704, -1.87350135e-15], [54.9999075, -\n 1.08940634e-15], [56.3190916, 2.49800181e-15], [57.7454961, -\n 1.29236899e-15], [59.270973, -2.76861867e-15], [60.8859243, -\n 4.51881314e-16], [62.5800593, -4.71844785e-16], [64.3429694, -\n 4.57966998e-16], [66.1645485, 8.67361738e-16]]'], {}), '([[49.2560704, 1.41206491e-15], [50.0560704, -1.81126815e-15], [\n 50.8560704, 2.98372438e-16], [51.7232704, -1.14838694e-15], [52.7024704,\n 2.06952511e-15], [53.7936704, -1.87350135e-15], [54.9999075, -\n 1.08940634e-15], [56.3190916, 2.49800181e-15], [57.7454961, -\n 1.29236899e-15], [59.270973, -2.76861867e-15], [60.8859243, -\n 4.51881314e-16], [62.5800593, -4.71844785e-16], [64.3429694, -\n 4.57966998e-16], [66.1645485, 8.67361738e-16]])\n', (1273, 1732), True, 'import numpy as np\n'), ((2033, 2442), 'numpy.array', 'np.array', (['[[0.0, 0.0], [4.0, -1.61166653e-14], [4.0, 1.05482029e-14], [4.336, -\n 7.23379689e-15], [4.896, 1.60895602e-14], [5.456, -1.97151323e-14], [\n 6.03118564, 3.92047506e-15], [6.5959208, 1.79370407e-14], [7.13202224, \n -1.8951854e-14], [7.62738442, -7.38124839e-15], [8.07475648, \n 1.15836868e-14], [8.47067513, -9.98173551e-17], [8.8145503, \n 6.9388939e-17], [9.10789555, 6.62664368e-15]]'], {}), '([[0.0, 0.0], [4.0, -1.61166653e-14], [4.0, 1.05482029e-14], [4.336,\n -7.23379689e-15], [4.896, 1.60895602e-14], [5.456, -1.97151323e-14], [\n 6.03118564, 3.92047506e-15], [6.5959208, 1.79370407e-14], [7.13202224, \n -1.8951854e-14], [7.62738442, -7.38124839e-15], [8.07475648, \n 1.15836868e-14], [8.47067513, -9.98173551e-17], [8.8145503, \n 6.9388939e-17], [9.10789555, 6.62664368e-15]])\n', (2041, 2442), True, 'import numpy as np\n'), ((2801, 3205), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 0.0], [-1.77635684e-13, 1.33324341e-13], [1.68, -\n 8.89099992e-14], [2.8, 1.16616786e-13], [2.8, -1.79023463e-13], [\n 2.87592818, 1.18178037e-13], [2.82367581, 7.00828284e-14], [2.68050723,\n -1.84444474e-13], [2.4768109, 5.78530279e-14], [2.23686028, \n 9.48246758e-14], [1.97959324, -5.84175206e-14], [1.71937588, \n 8.4603147e-16], [1.46672621, 3.27862737e-14]]'], {}), '([[0.0, 0.0], [0.0, 0.0], [-1.77635684e-13, 1.33324341e-13], [1.68,\n -8.89099992e-14], [2.8, 1.16616786e-13], [2.8, -1.79023463e-13], [\n 2.87592818, 1.18178037e-13], [2.82367581, 7.00828284e-14], [2.68050723,\n -1.84444474e-13], [2.4768109, 5.78530279e-14], [2.23686028, \n 9.48246758e-14], [1.97959324, -5.84175206e-14], [1.71937588, \n 8.4603147e-16], [1.46672621, 3.27862737e-14]])\n', (2809, 3205), True, 'import numpy as np\n'), ((3560, 3964), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [8.4, -1.1111717e-12], [5.6, \n 1.02763392e-12], [-9.76996262e-12, -1.47820124e-12], [0.379640891, \n 1.4860075e-12], [-0.261261842, -2.40476042e-13], [-0.715842895, -\n 1.27263651e-12], [-1.01848163, 1.21148751e-12], [-1.19975314, \n 1.84858239e-13], [-1.28633516, -7.66210982e-13], [-1.30108684, \n 2.9631776e-13], [-1.26324832, 1.59701211e-13]]'], {}), '([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [8.4, -1.1111717e-12], [5.6, \n 1.02763392e-12], [-9.76996262e-12, -1.47820124e-12], [0.379640891, \n 1.4860075e-12], [-0.261261842, -2.40476042e-13], [-0.715842895, -\n 1.27263651e-12], [-1.01848163, 1.21148751e-12], [-1.19975314, \n 1.84858239e-13], [-1.28633516, -7.66210982e-13], [-1.30108684, \n 2.9631776e-13], [-1.26324832, 1.59701211e-13]])\n', (3568, 3964), True, 'import numpy as np\n'), ((4429, 4442), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4439, 4442), True, 'import matplotlib.pyplot as plt\n'), ((4457, 4480), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (4474, 4480), True, 'import matplotlib.gridspec as gridspec\n'), ((4526, 4547), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (4537, 4547), True, 'import matplotlib.pyplot as plt\n'), ((4562, 4583), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (4573, 4583), True, 'import matplotlib.pyplot as plt\n'), ((4598, 4619), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2, 0]'], {}), '(gs[2, 0])\n', (4609, 4619), True, 'import matplotlib.pyplot as plt\n'), ((4633, 4668), 'p3iv_visualization.motion.plot_motion_components.PlotMotionComponents', 'PlotMotionComponents', (['ax0', 'ax1', 'ax2'], {}), '(ax0, ax1, ax2)\n', (4653, 4668), False, 'from p3iv_visualization.motion.plot_motion_components import PlotMotionComponents\n'), ((4825, 4835), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4833, 4835), True, 'import matplotlib.pyplot as plt\n'), ((799, 812), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (808, 812), True, 'import numpy as np\n'), ((1020, 1033), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (1029, 1033), True, 'import numpy as np\n'), ((1069, 1080), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (1076, 1080), True, 'import numpy as np\n'), ((943, 955), 'numpy.zeros', 'np.zeros', (['(24)'], {}), '(24)\n', (951, 955), True, 'import numpy as np\n'), ((1006, 1017), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (1013, 1017), True, 'import numpy as np\n')] |
"""
Class :py:class:`FWViewImage` is a FWView for interactive image
===============================================================
FWView <- QGraphicsView <- ... <- QWidget
Usage ::
# Test
#-----
import sys
from psana.graphqt.FWViewImage import *
import psana.graphqt.ColorTable as ct
app = QApplication(sys.argv)
ctab = ct.color_table_monochr256()
w = FWViewImage(None, arr, origin='UL', scale_ctl='HV', coltab=ctab)
w.show()
app.exec_()
# Main methods in addition to FWView
#------------------------------------
w.set_pixmap_from_arr(arr, set_def=True)
w.set_coltab(self, coltab=ct.color_table_rainbow(ncolors=1000, hang1=250, hang2=-20))
w.connect_mouse_press_event_to(w.test_mouse_press_event_reception)
w.connect_mouse_move_event_to(w.test_mouse_move_event_reception)
w.connect_scene_rect_changed_to(w.test_scene_rect_changed_reception)
# Methods
#--------
w.set_style()
ix, iy, v = w.cursor_on_image_pixcoords_and_value(p)
# Call-back slots
#----------------
w.mousePressEvent(e)
# w.mouseMoveEvent(e)
# w.closeEvent(e)
w.key_usage()
w.keyPressEvent(e)
# Overrides method from FWView
#-----------------------------
w.test_mouse_move_event_reception(e) # signature differs from FWView
# Global methods for test
#------------------------
img = image_with_random_peaks(shape=(500, 500))
See:
- :class:`FWView`
- :class:`FWViewImage`
- :class:`QWSpectrum`
- `lcls2 on github <https://github.com/slac-lcls/lcls2>`_.
This software was developed for the LCLS2 project.
If you use all or part of it, please give an appropriate acknowledgment.
Created on 2016-09-09 by <NAME>
Adopted for LCLS2 on 2018-02-16
"""
from math import floor
from psana.graphqt import ColorTable as ct
from psana.graphqt.FWView import *
from PyQt5.QtGui import QImage, QPixmap
class FWViewImage(FWView):
def __init__(self, parent=None, arr=None,\
coltab=ct.color_table_rainbow(ncolors=1000, hang1=250, hang2=-20),\
origin='UL', scale_ctl='HV'):
h, w = arr.shape
rscene = QRectF(0, 0, w, h)
FWView.__init__(self, parent, rscene, origin, scale_ctl)
self._name = self.__class__.__name__
self.set_coltab(coltab)
self.pmi = None
self.set_pixmap_from_arr(arr)
def set_coltab(self, coltab=ct.color_table_rainbow(ncolors=1000, hang1=250, hang2=-20)):
self.coltab = coltab
def set_style(self):
FWView.set_style(self)
self.setWindowTitle('FWViewImage%s' %(30*' '))
self.setAttribute(Qt.WA_TranslucentBackground)
#self.layout().setContentsMargins(0,0,0,0)
#self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
# def mousePressEvent(self, e):
# FWView.mousePressEvent(self, e)
# #print('XXX FWViewImage.mousePressEvent')
# def mouseMoveEvent(self, e):
# FWView.mouseMoveEvent(self, e)
# def closeEvent(self, e):
# FWView.closeEvent(self, e)
# print('%s.closeEvent' % self._name)
def add_pixmap_to_scene(self, pixmap):
if self.pmi is None: self.pmi = self.scene().addPixmap(pixmap)
else : self.pmi.setPixmap(pixmap)
def set_pixmap_from_arr(self, arr, set_def=True):
"""Input array is scailed by color table. If color table is None arr set as is.
"""
self.arr = arr
anorm = arr if self.coltab is None else\
ct.apply_color_table(arr, ctable=self.coltab)
h, w = arr.shape
image = QImage(anorm, w, h, QImage.Format_ARGB32)
pixmap = QPixmap.fromImage(image)
self.add_pixmap_to_scene(pixmap)
if set_def:
rs = QRectF(0, 0, w, h)
self.set_rect_scene(rs, set_def)
def cursor_on_image_pixcoords_and_value(self, p):
"""Returns cursor pointing pixel coordinates and value,
- p (QPoint) - cursor on scene position
"""
#p = self.mapToScene(e.pos())
ix, iy = int(floor(p.x())), int(floor(p.y()))
v = None
arr = self.arr
if ix<0\
or iy<0\
or iy>arr.shape[0]-1\
or ix>arr.shape[1]-1: pass
else: v = self.arr[iy,ix]
return ix, iy, v
if __name__ == "__main__":
def test_mouse_move_event_reception(self, e):
"""Overrides method from FWView"""
p = self.mapToScene(e.pos())
ix, iy, v = self.cursor_on_image_pixcoords_and_value(p)
fv = 0 if v is None else v
self.setWindowTitle('FWViewImage x=%d y=%d v=%s%s' % (ix, iy, '%.1f'%fv, 25*' '))
def key_usage(self):
return 'Keys:'\
'\n ESC - exit'\
'\n R - reset original size'\
'\n N - set new pixmap'\
'\n W - set new pixmap of random shape, do not change default scene rect'\
'\n D - set new pixmap of random shape and change default scene rect'\
'\n'
def keyPressEvent(self, e):
#print('keyPressEvent, key=', e.key())
if e.key() == Qt.Key_Escape:
print('Close app')
self.close()
elif e.key() == Qt.Key_R:
print('Reset original size')
self.reset_original_size()
elif e.key() == Qt.Key_N:
print('Set new pixel map')
s = self.pmi.pixmap().size()
img = image_with_random_peaks((s.height(), s.width()))
self.set_pixmap_from_arr(img, set_def=False)
elif e.key() in (Qt.Key_W, Qt.Key_D):
change_def = e.key()==Qt.Key_D
print('%s: change scene rect %s' % (self._name, 'set new default' if change_def else ''))
v = ag.random_standard((4,), mu=0, sigma=200, dtype=np.int)
rs = QRectF(v[0], v[1], v[2]+1000, v[3]+1000)
print('Set scene rect: %s' % str(rs))
#self.set_rect_scene(rs, set_def=change_def)
img = image_with_random_peaks((int(rs.height()), int(rs.width())))
self.set_pixmap_from_arr(img, set_def=change_def)
else:
print(self.key_usage())
if __name__ == "__main__":
import sys
sys.path.append('..') # use relative path from parent dir
import psana.pyalgos.generic.NDArrGenerators as ag
import numpy as np
def image_with_random_peaks(shape=(500, 500)):
from psana.pyalgos.generic.NDArrUtils import print_ndarr
print('XXX1 shape:', shape)
img = ag.random_standard(shape, mu=0, sigma=10)
print_ndarr(img, 'XXX ag.random_standard')
peaks = ag.add_random_peaks(img, npeaks=50, amean=100, arms=50, wmean=1.5, wrms=0.3)
ag.add_ring(img, amp=20, row=500, col=500, rad=300, sigma=50)
return img
def test_wfviewimage(tname):
print('%s:' % sys._getframe().f_code.co_name)
#arr = np.random.random((1000, 1000))
arr = image_with_random_peaks((1000, 1000))
#ctab = ct.color_table_rainbow(ncolors=1000, hang1=250, hang2=-20)
ctab = ct.color_table_monochr256()
#ctab = ct.color_table_interpolated()
app = QApplication(sys.argv)
w = None
if tname == '0': w = FWViewImage(None, arr, coltab=ctab, origin='UL', scale_ctl='HV')
elif tname == '1': w = FWViewImage(None, arr, coltab=ctab, origin='UL', scale_ctl='H')
elif tname == '2': w = FWViewImage(None, arr, coltab=ctab, origin='UL', scale_ctl='V')
elif tname == '3': w = FWViewImage(None, arr, coltab=ctab, origin='UL', scale_ctl='')
elif tname == '4':
arrct = ct.array_for_color_bar(orient='H')
w = FWViewImage(None, arrct, coltab=None, origin='UL', scale_ctl='H')
w.setGeometry(50, 50, 500, 40)
elif tname == '5':
arrct = ct.array_for_color_bar(orient='V')
w = FWViewImage(None, arrct, coltab=None, origin='UL', scale_ctl='V')
w.setGeometry(50, 50, 40, 500)
elif tname == '6':
#ctab= ct.color_table_rainbow(ncolors=1000, hang1=0, hang2=360)
#ctab = ct.color_table_rainbow(ncolors=1000, hang1=250, hang2=-20)
#ctab = ct.color_table_monochr256()
ctab = ct.color_table_interpolated()
arrct = ct.array_for_color_bar(ctab, orient='H')
w = FWViewImage(None, arrct, coltab=None, origin='UL', scale_ctl='H')
w.setGeometry(50, 50, 500, 40)
elif tname == '7':
a = np.arange(15).reshape((5, 3))
w = FWViewImage(None, a, coltab=ctab, origin='UL', scale_ctl='HV')
else:
print('test %s is not implemented' % tname)
return
w.connect_mouse_press_event_to(w.test_mouse_press_event_reception)
w.connect_mouse_move_event_to(w.test_mouse_move_event_reception)
w.connect_scene_rect_changed_to(w.test_scene_rect_changed_reception)
w.show()
app.exec_()
del w
del app
if __name__ == "__main__":
tname = sys.argv[1] if len(sys.argv) > 1 else '0'
print(50*'_', '\nTest %s' % tname)
test_wfviewimage(tname)
sys.exit('End of Test %s' % tname)
# EOF
| [
"psana.graphqt.ColorTable.color_table_rainbow",
"PyQt5.QtGui.QPixmap.fromImage",
"psana.graphqt.ColorTable.color_table_interpolated",
"psana.graphqt.ColorTable.color_table_monochr256",
"psana.pyalgos.generic.NDArrGenerators.add_ring",
"PyQt5.QtGui.QImage",
"sys._getframe",
"psana.graphqt.ColorTable.ar... | [((6261, 6282), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (6276, 6282), False, 'import sys\n'), ((8997, 9031), 'sys.exit', 'sys.exit', (["('End of Test %s' % tname)"], {}), "('End of Test %s' % tname)\n", (9005, 9031), False, 'import sys\n'), ((2021, 2079), 'psana.graphqt.ColorTable.color_table_rainbow', 'ct.color_table_rainbow', ([], {'ncolors': '(1000)', 'hang1': '(250)', 'hang2': '(-20)'}), '(ncolors=1000, hang1=250, hang2=-20)\n', (2043, 2079), True, 'from psana.graphqt import ColorTable as ct\n'), ((2429, 2487), 'psana.graphqt.ColorTable.color_table_rainbow', 'ct.color_table_rainbow', ([], {'ncolors': '(1000)', 'hang1': '(250)', 'hang2': '(-20)'}), '(ncolors=1000, hang1=250, hang2=-20)\n', (2451, 2487), True, 'from psana.graphqt import ColorTable as ct\n'), ((3636, 3677), 'PyQt5.QtGui.QImage', 'QImage', (['anorm', 'w', 'h', 'QImage.Format_ARGB32'], {}), '(anorm, w, h, QImage.Format_ARGB32)\n', (3642, 3677), False, 'from PyQt5.QtGui import QImage, QPixmap\n'), ((3695, 3719), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['image'], {}), '(image)\n', (3712, 3719), False, 'from PyQt5.QtGui import QImage, QPixmap\n'), ((6548, 6589), 'psana.pyalgos.generic.NDArrGenerators.random_standard', 'ag.random_standard', (['shape'], {'mu': '(0)', 'sigma': '(10)'}), '(shape, mu=0, sigma=10)\n', (6566, 6589), True, 'import psana.pyalgos.generic.NDArrGenerators as ag\n'), ((6594, 6636), 'psana.pyalgos.generic.NDArrUtils.print_ndarr', 'print_ndarr', (['img', '"""XXX ag.random_standard"""'], {}), "(img, 'XXX ag.random_standard')\n", (6605, 6636), False, 'from psana.pyalgos.generic.NDArrUtils import print_ndarr\n'), ((6650, 6726), 'psana.pyalgos.generic.NDArrGenerators.add_random_peaks', 'ag.add_random_peaks', (['img'], {'npeaks': '(50)', 'amean': '(100)', 'arms': '(50)', 'wmean': '(1.5)', 'wrms': '(0.3)'}), '(img, npeaks=50, amean=100, arms=50, wmean=1.5, wrms=0.3)\n', (6669, 6726), True, 'import psana.pyalgos.generic.NDArrGenerators as ag\n'), ((6731, 6792), 'psana.pyalgos.generic.NDArrGenerators.add_ring', 'ag.add_ring', (['img'], {'amp': '(20)', 'row': '(500)', 'col': '(500)', 'rad': '(300)', 'sigma': '(50)'}), '(img, amp=20, row=500, col=500, rad=300, sigma=50)\n', (6742, 6792), True, 'import psana.pyalgos.generic.NDArrGenerators as ag\n'), ((7063, 7090), 'psana.graphqt.ColorTable.color_table_monochr256', 'ct.color_table_monochr256', ([], {}), '()\n', (7088, 7090), True, 'from psana.graphqt import ColorTable as ct\n'), ((3547, 3592), 'psana.graphqt.ColorTable.apply_color_table', 'ct.apply_color_table', (['arr'], {'ctable': 'self.coltab'}), '(arr, ctable=self.coltab)\n', (3567, 3592), True, 'from psana.graphqt import ColorTable as ct\n'), ((6859, 6874), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (6872, 6874), False, 'import sys\n'), ((5803, 5858), 'psana.pyalgos.generic.NDArrGenerators.random_standard', 'ag.random_standard', (['(4,)'], {'mu': '(0)', 'sigma': '(200)', 'dtype': 'np.int'}), '((4,), mu=0, sigma=200, dtype=np.int)\n', (5821, 5858), True, 'import psana.pyalgos.generic.NDArrGenerators as ag\n'), ((7583, 7617), 'psana.graphqt.ColorTable.array_for_color_bar', 'ct.array_for_color_bar', ([], {'orient': '"""H"""'}), "(orient='H')\n", (7605, 7617), True, 'from psana.graphqt import ColorTable as ct\n'), ((7774, 7808), 'psana.graphqt.ColorTable.array_for_color_bar', 'ct.array_for_color_bar', ([], {'orient': '"""V"""'}), "(orient='V')\n", (7796, 7808), True, 'from psana.graphqt import ColorTable as ct\n'), ((8155, 8184), 'psana.graphqt.ColorTable.color_table_interpolated', 'ct.color_table_interpolated', ([], {}), '()\n', (8182, 8184), True, 'from psana.graphqt import ColorTable as ct\n'), ((8201, 8241), 'psana.graphqt.ColorTable.array_for_color_bar', 'ct.array_for_color_bar', (['ctab'], {'orient': '"""H"""'}), "(ctab, orient='H')\n", (8223, 8241), True, 'from psana.graphqt import ColorTable as ct\n'), ((8394, 8407), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (8403, 8407), True, 'import numpy as np\n')] |
import math
import tensorflow as tf
from .model import Model
from .builder import MODELS
from .common import ConvNormActBlock
from core.layers import build_activation
def bottle2neckx(inputs,
filters,
cardinality,
strides=1,
scale=4,
base_width=26,
dilation_rate=1,
data_format="channels_last",
normalization=dict(normalization="batch_norm", momentum=0.9, epsilon=1e-5, axis=-1, trainable=True),
activation=dict(activation="relu"),
downsample=False,
trainable=True,
dropblock=None,
expansion=4,
stype="normal",
name="Bottle2neck"):
width = int(math.floor(filters * (base_width / 64.)))
channel_axis = -1 if data_format == "channels_last" else 1
x = ConvNormActBlock(filters=width * cardinality * scale,
kernel_size=1,
trainable=trainable,
data_format=data_format,
normalization=normalization,
activation=activation,
dropblock=dropblock,
name=name + "/conv1")(inputs)
num_convs = scale if scale == 1 else scale - 1
spx = tf.keras.layers.Lambda(lambda inp: tf.split(inp, scale, channel_axis), name=name + "/split")(x)
for i in range(num_convs):
if i == 0 or stype == "stage":
sp = spx[i]
else:
sp = tf.keras.layers.Add(name=name + "/add%d" % i)([sp, spx[i]])
sp = ConvNormActBlock(filters=width * cardinality,
kernel_size=3,
strides=strides,
data_format=data_format,
dilation_rate=dilation_rate if strides == 1 else 1,
trainable=trainable,
groups=cardinality,
normalization=normalization,
activation=activation,
dropblock=dropblock,
name=name + "/convs/%d" % i)(sp)
if i == 0:
x = sp
else:
x = tf.keras.layers.Concatenate(channel_axis, name=name + "/cat%d" % i)([x, sp])
if scale != 1 and stype == "normal":
x = tf.keras.layers.Concatenate(channel_axis, name=name + "/cat%d" % num_convs)([x, spx[num_convs]])
elif scale != 1 and stype == "stage":
padding = "same"
sp_ = spx[num_convs]
if strides != 1:
sp_ = tf.keras.layers.ZeroPadding2D(((1, 1), (1, 1)), name=name + "/pad")(sp_)
padding = "valid"
sp_ = tf.keras.layers.AvgPool2D(3, strides, padding, data_format, name=name + "/avgpool")(sp_)
x = tf.keras.layers.Concatenate(channel_axis, name=name + "/cat%d" % num_convs)([x, sp_])
x = ConvNormActBlock(filters=filters * expansion,
kernel_size=1,
trainable=trainable,
data_format=data_format,
normalization=normalization,
activation=None,
dropblock=dropblock,
name=name + "/conv3")(x)
shortcut = inputs
if downsample:
shortcut = ConvNormActBlock(filters=filters * expansion,
kernel_size=1,
strides=strides,
trainable=trainable,
data_format=data_format,
normalization=normalization,
activation=None,
dropblock=dropblock,
name=name + "/downsample")(shortcut)
x = tf.keras.layers.Add(name=name + "/add")([x, shortcut])
x = build_activation(**activation, name=name + "/" + activation["activation"])(x)
return x
# def call(self, inputs, training=None):
# shortcut = inputs
# x = self.conv1(inputs, training=training)
# spx = tf.split(x, self.scale, self.channel_axis)
# for i in range(self.num_convs):
# if i == 0 or self.stype == "stage":
# sp = spx[i]
# else:
# sp += spx[i]
# sp = self.convs[i](sp, training=training)
# if i == 0:
# x = sp
# else:
# x = tf.concat([x, sp], axis=self.channel_axis)
# if self.scale != 1 and self.stype == "normal":
# x = tf.concat([x, spx[self.num_convs]], self.channel_axis)
# elif self.scale != 1 and self.stype == "stage":
# if hasattr(self, "pad"):
# x = tf.concat([x, self.avgpool(self.pad(spx[self.num_convs]))], self.channel_axis)
# else:
# x = tf.concat([x, self.avgpool(spx[self.num_convs])], self.channel_axis)
# x = self.conv3(x, training=training)
# if hasattr(self, "downsample"):
# shortcut = self.downsample(shortcut, training)
# x += shortcut
# x = self.act(x)
# return x
class Res2NeXt(Model):
def __init__(self,
name,
num_blocks,
dropblock=dict(block_size=7, drop_rate=0.1),
normalization=dict(normalization="batch_norm", momentum=0.9, epsilon=1e-5, axis=-1, trainable=True),
activation=dict(activation="relu"),
output_indices=(3, 4),
strides=(2, 2, 2, 2, 2),
dilation_rates=(1, 1, 1, 1, 1),
frozen_stages=(-1, ),
input_shape=None,
input_tensor=None,
cardinality=8,
base_width=26,
scale=4,
num_classes=1000,
drop_rate=0.5):
super(Res2NeXt, self).__init__(name,
normalization=normalization,
activation=activation,
output_indices=output_indices,
strides=strides,
dilation_rates=dilation_rates,
frozen_stages=frozen_stages,
input_shape=input_shape,
input_tensor=input_tensor,
dropblock=dropblock,
num_classes=num_classes,
drop_rate=drop_rate)
self.num_blocks = num_blocks
self.base_width = base_width
self.scale = scale
self.cardinality = cardinality
def build_model(self):
def _norm(inp):
mean = tf.constant([0.485, 0.456, 0.406], inp.dtype, [1, 1, 1, 3]) * 255.
std = 1. / (tf.constant([0.229, 0.224, 0.225], inp.dtype, [1, 1, 1, 3]) * 255.)
return (inp - mean) * std
x = tf.keras.layers.Lambda(_norm, name="norm_input")(self.img_input)
x = ConvNormActBlock(filters=64,
kernel_size=(7, 7),
strides=self.strides[0],
dilation_rate=self.dilation_rates[0],
trainable=1 not in self.frozen_stages,
kernel_initializer="he_normal",
normalization=self.normalization,
name="conv1")(x)
self.infilters = 64
outputs = [x]
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = tf.keras.layers.MaxPool2D((3, 3), self.strides[1], "valid", self.data_format, name="maxpool")(x)
x = self._make_layer(x, 64, self.num_blocks[0], 1, self.dilation_rates[1], 2 not in self.frozen_stages, name="layer1")
outputs.append(x)
x = self._make_layer(x, 128, self.num_blocks[1], self.strides[2], self.dilation_rates[2], 3 not in self.frozen_stages, name="layer2")
outputs.append(x)
x = self._make_layer(x, 256, self.num_blocks[2], self.strides[3], self.dilation_rates[3], 4 not in self.frozen_stages, name="layer3")
outputs.append(x)
x = self._make_layer(x, 512, self.num_blocks[3], self.strides[4], self.dilation_rates[4], 5 not in self.frozen_stages, name="layer4")
outputs.append(x)
if -1 not in self.output_indices:
outputs = (outputs[i-1] for i in self.output_indices)
else:
x = tf.keras.layers.GlobalAvgPool2D(data_format=self.data_format, name="gloabl_avgpool")(x)
if self.drop_rate and self.drop_rate > 0.:
x = tf.keras.layers.Dropout(rate=self.drop_rate, name="drop")(x)
outputs = tf.keras.layers.Dense(units=self.num_classes, name="logits")(x)
return tf.keras.Model(inputs=self.img_input, outputs=outputs, name=self.name)
def _make_layer(self, inputs, filters, num_block, strides=1, dilation_rate=1, trainable=True, name="layer"):
x = bottle2neckx(inputs,
filters,
cardinality=self.cardinality,
strides=strides,
base_width=self.base_width,
scale=self.scale,
dilation_rate=dilation_rate,
data_format=self.data_format,
trainable=trainable,
dropblock=self.dropblock,
normalization=self.normalization,
activation=self.activation,
downsample=strides != 1 or self.infilters != filters * 4,
stype="stage",
name=name + "/0")
for i in range(1, num_block):
x = bottle2neckx(x,
filters,
cardinality=self.cardinality,
strides=1,
base_width=self.base_width,
scale=self.scale,
dilation_rate=dilation_rate,
data_format=self.data_format,
trainable=trainable,
dropblock=self.dropblock,
normalization=self.normalization,
activation=self.activation,
downsample=False,
name=name + "/%d" % i)
self.infilters = filters * 4
return x
@MODELS.register("Res2NeXt50")
def Res2NeXt50(dropblock=dict(block_size=7, drop_rate=0.1),
normalization=dict(normalization='batch_norm', momentum=0.9, epsilon=1e-05, axis = -1, trainable =True),
activation=dict(activation='relu'),
output_indices=(-1, ),
strides=(2, 2, 2, 2, 2),
dilation_rates=(1, 1, 1, 1, 1),
frozen_stages=(-1, ),
input_shape=None,
input_tensor=None,
num_classes=1000,
drop_rate=0.5,
**kwargs):
return Res2NeXt("res2next50",
num_blocks=[3, 4, 6, 3],
dropblock=dropblock,
normalization=normalization,
activation=activation,
output_indices=output_indices,
strides=strides,
dilation_rates=dilation_rates,
frozen_stages=frozen_stages,
input_shape=input_shape,
input_tensor=input_tensor,
base_width=4,
cardinality=8,
scale=4,
num_classes=num_classes,
drop_rate=drop_rate,
**kwargs).build_model()
@MODELS.register("Res2NeXt101")
def Res2NeXt101(dropblock=dict(block_size=7, drop_rate=0.1),
normalization=dict(normalization='batch_norm', momentum=0.9, epsilon=1e-05, axis = -1, trainable =True),
activation=dict(activation='relu'),
output_indices=(-1, ),
strides=(2, 2, 2, 2, 2),
dilation_rates=(1, 1, 1, 1, 1),
frozen_stages=(-1, ),
input_shape=None,
input_tensor=None,
num_classes=1000,
drop_rate=0.5,
**kwargs):
return Res2NeXt("res2next101",
num_blocks=[3, 4, 23, 3],
dropblock=dropblock,
normalization=normalization,
activation=activation,
output_indices=output_indices,
strides=strides,
dilation_rates=dilation_rates,
frozen_stages=frozen_stages,
input_shape=input_shape,
input_tensor=input_tensor,
base_width=4,
cardinality=8,
scale=4,
num_classes=num_classes,
drop_rate=drop_rate,
**kwargs).build_model()
@MODELS.register("Res2NeXt152")
def Res2NeXt152(dropblock=dict(block_size=7, drop_rate=0.1),
normalization=dict(normalization='batch_norm', momentum=0.9, epsilon=1e-05, axis = -1, trainable =True),
activation=dict(activation='relu'),
output_indices=(-1, ),
strides=(2, 2, 2, 2, 2),
dilation_rates=(1, 1, 1, 1, 1),
frozen_stages=(-1, ),
input_shape=None,
input_tensor=None,
num_classes=1000,
drop_rate=0.5,
**kwargs):
return Res2NeXt("res2next152",
num_blocks=[3, 8, 36, 3],
dropblock=dropblock,
normalization=normalization,
activation=activation,
output_indices=output_indices,
strides=strides,
dilation_rates=dilation_rates,
frozen_stages=frozen_stages,
input_shape=input_shape,
input_tensor=input_tensor,
base_width=4,
cardinality=8,
scale=4,
num_classes=num_classes,
drop_rate=drop_rate,
**kwargs).build_model()
def _get_weight_name_map(blocks, scale):
name_map = {
"conv1/conv2d/kernel:0": "conv1.weight",
"conv1/batch_norm/gamma:0": "bn1.weight",
"conv1/batch_norm/beta:0": "bn1.bias",
"conv1/batch_norm/moving_mean:0": "bn1.running_mean",
"conv1/batch_norm/moving_variance:0": "bn1.running_var"
}
for i in range(1, 5):
for j in range(blocks[i - 1]):
for k in range(1, 4):
n1 = "layer%d/%d/conv%d" % (i, j, k)
n2 = "layer%d.%d" % (i, j)
if k != 2:
m = {
"%s/conv2d/kernel:0" % n1: "%s.conv%d.weight" % (n2, k),
"%s/batch_norm/gamma:0" % n1: "%s.bn%d.weight" % (n2, k),
"%s/batch_norm/beta:0" % n1: "%s.bn%d.bias" % (n2, k),
"%s/batch_norm/moving_mean:0" % n1: "%s.bn%d.running_mean" % (n2, k),
"%s/batch_norm/moving_variance:0" % n1: "%s.bn%d.running_var" % (n2, k),
"layer%d/0/downsample/conv2d/kernel:0" % i: "layer%d.0.downsample.0.weight" % i,
"layer%d/0/downsample/batch_norm/gamma:0" % i: "layer%d.0.downsample.1.weight" % i,
"layer%d/0/downsample/batch_norm/beta:0" % i: "layer%d.0.downsample.1.bias" % i,
"layer%d/0/downsample/batch_norm/moving_mean:0" % i: "layer%d.0.downsample.1.running_mean" % i,
"layer%d/0/downsample/batch_norm/moving_variance:0" % i: "layer%d.0.downsample.1.running_var" % i
}
name_map.update(m)
else:
for s in range(scale - 1):
m = {
"layer%d/%d/convs/%d/conv2d/kernel:0" % (i, j, s): "%s.convs.%d.weight" % (n2, s),
"layer%d/%d/convs/%d/batch_norm/gamma:0" % (i, j, s): "%s.bns.%d.weight" % (n2, s),
"layer%d/%d/convs/%d/batch_norm/beta:0" % (i, j, s): "%s.bns.%d.bias" % (n2, s),
"layer%d/%d/convs/%d/batch_norm/moving_mean:0" % (i, j, s): "%s.bns.%d.running_mean" % (n2, s),
"layer%d/%d/convs/%d/batch_norm/moving_variance:0" % (i, j, s): "%s.bns.%d.running_var" % (n2, s),
}
name_map.update(m)
name_map["logits/kernel:0"] = "fc.weight"
name_map["logits/bias:0"] = "fc.bias"
return name_map
def _torch2h5(model, torch_weight_path, blocks, scale):
import torch
import numpy as np
net = torch.load(torch_weight_path, map_location=torch.device('cpu'))
# for k, _ in net.items():
# if "tracked" in k:
# continue
# print(k)
name_map = _get_weight_name_map(blocks, scale)
for weight in model.weights:
name = weight.name
tw = net[name_map[name]].numpy()
if len(tw.shape) == 4:
tw = np.transpose(tw, (2, 3, 1, 0))
if len(tw.shape) == 2:
tw = np.transpose(tw, (1, 0))
weight.assign(tw)
del net
if __name__ == "__main__":
name = "res2next50_4s"
blocks = [3, 4, 6, 3]
scale = 4
model = Res2NeXt50(input_shape=(224, 224, 3), output_indices=(-1, ))
# model(tf.random.uniform([1, 224, 224, 3], 0, 255))
# model.summary()
_torch2h5(model, "/Users/bailang/Downloads/pretrained_weights/%s.pth" % name, blocks, scale)
with tf.io.gfile.GFile("/Users/bailang/Documents/pandas.jpg", "rb") as gf:
images = tf.image.decode_jpeg(gf.read())
images = tf.image.resize(images, (224, 224))[None]
lbl = model(images, training=False)
top5prob, top5class = tf.nn.top_k(tf.squeeze(tf.nn.softmax(lbl, -1), axis=0), k=5)
print("prob:", top5prob.numpy())
print("class:", top5class.numpy())
model.save_weights("/Users/bailang/Downloads/pretrained_weights/%s.h5" % name)
model.save_weights("/Users/bailang/Downloads/pretrained_weights/%s/model.ckpt" % name) | [
"core.layers.build_activation",
"math.floor",
"tensorflow.split",
"tensorflow.keras.layers.GlobalAvgPool2D",
"tensorflow.keras.layers.Dense",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.AvgPool2D",
"tensorflow.io.gfile.GFile",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dropou... | [((807, 848), 'math.floor', 'math.floor', (['(filters * (base_width / 64.0))'], {}), '(filters * (base_width / 64.0))\n', (817, 848), False, 'import math\n'), ((4093, 4132), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {'name': "(name + '/add')"}), "(name=name + '/add')\n", (4112, 4132), True, 'import tensorflow as tf\n'), ((4156, 4230), 'core.layers.build_activation', 'build_activation', ([], {'name': "(name + '/' + activation['activation'])"}), "(**activation, name=name + '/' + activation['activation'])\n", (4172, 4230), False, 'from core.layers import build_activation\n'), ((9280, 9350), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'self.img_input', 'outputs': 'outputs', 'name': 'self.name'}), '(inputs=self.img_input, outputs=outputs, name=self.name)\n', (9294, 9350), True, 'import tensorflow as tf\n'), ((18546, 18608), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['"""/Users/bailang/Documents/pandas.jpg"""', '"""rb"""'], {}), "('/Users/bailang/Documents/pandas.jpg', 'rb')\n", (18563, 18608), True, 'import tensorflow as tf\n'), ((18679, 18714), 'tensorflow.image.resize', 'tf.image.resize', (['images', '(224, 224)'], {}), '(images, (224, 224))\n', (18694, 18714), True, 'import tensorflow as tf\n'), ((2564, 2639), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', (['channel_axis'], {'name': "(name + '/cat%d' % num_convs)"}), "(channel_axis, name=name + '/cat%d' % num_convs)\n", (2591, 2639), True, 'import tensorflow as tf\n'), ((7398, 7446), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['_norm'], {'name': '"""norm_input"""'}), "(_norm, name='norm_input')\n", (7420, 7446), True, 'import tensorflow as tf\n'), ((7975, 8030), 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '((1, 1), (1, 1))'}), '(padding=((1, 1), (1, 1)))\n', (8004, 8030), True, 'import tensorflow as tf\n'), ((8046, 8144), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', (['(3, 3)', 'self.strides[1]', '"""valid"""', 'self.data_format'], {'name': '"""maxpool"""'}), "((3, 3), self.strides[1], 'valid', self.\n data_format, name='maxpool')\n", (8071, 8144), True, 'import tensorflow as tf\n'), ((17695, 17714), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (17707, 17714), False, 'import torch\n'), ((18039, 18069), 'numpy.transpose', 'np.transpose', (['tw', '(2, 3, 1, 0)'], {}), '(tw, (2, 3, 1, 0))\n', (18051, 18069), True, 'import numpy as np\n'), ((18118, 18142), 'numpy.transpose', 'np.transpose', (['tw', '(1, 0)'], {}), '(tw, (1, 0))\n', (18130, 18142), True, 'import numpy as np\n'), ((18815, 18837), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['lbl', '(-1)'], {}), '(lbl, -1)\n', (18828, 18837), True, 'import tensorflow as tf\n'), ((1491, 1525), 'tensorflow.split', 'tf.split', (['inp', 'scale', 'channel_axis'], {}), '(inp, scale, channel_axis)\n', (1499, 1525), True, 'import tensorflow as tf\n'), ((1677, 1722), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {'name': "(name + '/add%d' % i)"}), "(name=name + '/add%d' % i)\n", (1696, 1722), True, 'import tensorflow as tf\n'), ((2425, 2492), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', (['channel_axis'], {'name': "(name + '/cat%d' % i)"}), "(channel_axis, name=name + '/cat%d' % i)\n", (2452, 2492), True, 'import tensorflow as tf\n'), ((2930, 3017), 'tensorflow.keras.layers.AvgPool2D', 'tf.keras.layers.AvgPool2D', (['(3)', 'strides', 'padding', 'data_format'], {'name': "(name + '/avgpool')"}), "(3, strides, padding, data_format, name=name +\n '/avgpool')\n", (2955, 3017), True, 'import tensorflow as tf\n'), ((3031, 3106), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', (['channel_axis'], {'name': "(name + '/cat%d' % num_convs)"}), "(channel_axis, name=name + '/cat%d' % num_convs)\n", (3058, 3106), True, 'import tensorflow as tf\n'), ((7187, 7246), 'tensorflow.constant', 'tf.constant', (['[0.485, 0.456, 0.406]', 'inp.dtype', '[1, 1, 1, 3]'], {}), '([0.485, 0.456, 0.406], inp.dtype, [1, 1, 1, 3])\n', (7198, 7246), True, 'import tensorflow as tf\n'), ((8954, 9043), 'tensorflow.keras.layers.GlobalAvgPool2D', 'tf.keras.layers.GlobalAvgPool2D', ([], {'data_format': 'self.data_format', 'name': '"""gloabl_avgpool"""'}), "(data_format=self.data_format, name=\n 'gloabl_avgpool')\n", (8985, 9043), True, 'import tensorflow as tf\n'), ((9200, 9260), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.num_classes', 'name': '"""logits"""'}), "(units=self.num_classes, name='logits')\n", (9221, 9260), True, 'import tensorflow as tf\n'), ((2800, 2867), 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', (['((1, 1), (1, 1))'], {'name': "(name + '/pad')"}), "(((1, 1), (1, 1)), name=name + '/pad')\n", (2829, 2867), True, 'import tensorflow as tf\n'), ((7278, 7337), 'tensorflow.constant', 'tf.constant', (['[0.229, 0.224, 0.225]', 'inp.dtype', '[1, 1, 1, 3]'], {}), '([0.229, 0.224, 0.225], inp.dtype, [1, 1, 1, 3])\n', (7289, 7337), True, 'import tensorflow as tf\n'), ((9117, 9174), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'self.drop_rate', 'name': '"""drop"""'}), "(rate=self.drop_rate, name='drop')\n", (9140, 9174), True, 'import tensorflow as tf\n')] |
"""OctreeLevelInfo and OctreeLevel classes.
"""
import logging
import math
from typing import Dict, List, Optional
import numpy as np
from ....types import ArrayLike
from .octree_chunk import OctreeChunk, OctreeChunkGeom, OctreeLocation
from .octree_util import OctreeMetadata
LOGGER = logging.getLogger("napari.octree")
class OctreeLevelInfo:
"""Information about one level of the octree.
This should be a NamedTuple.
Parameters
----------
meta : OctreeMetadata
Information about the entire octree.
level_index : int
The index of this level within the whole tree.
"""
def __init__(self, meta: OctreeMetadata, level_index: int):
self.meta = meta
self.level_index = level_index
self.scale = 2 ** self.level_index
base = meta.base_shape
self.image_shape = (
int(base[0] / self.scale),
int(base[1] / self.scale),
)
tile_size = meta.tile_size
scaled_size = tile_size * self.scale
self.rows = math.ceil(base[0] / scaled_size)
self.cols = math.ceil(base[1] / scaled_size)
self.shape_in_tiles = [self.rows, self.cols]
self.num_tiles = self.rows * self.cols
class OctreeLevel:
"""One level of the octree.
An OctreeLevel is "sparse" in that it only contains a dict of
OctreeChunks for the portion of the octree that is currently being
rendered. So even if the full level contains hundreds of millions of
chunks, this class only contains a few dozens OctreeChunks.
This was necessary because even having a null reference for every
OctreeChunk in a level would use too much space and be too slow to
construct.
Parameters
----------
slice_id : int
The id of the OctreeSlice we are in.
data : ArrayLike
The data for this level.
meta : OctreeMetadata
The base image shape and other details.
level_index : int
Index of this specific level (0 is full resolution).
Attributes
----------
info : OctreeLevelInfo
Metadata about this level.
_tiles : Dict[tuple, OctreeChunk]
Maps (row, col) tuple to the OctreeChunk at that location.
"""
def __init__(
self,
slice_id: int,
data: ArrayLike,
meta: OctreeMetadata,
level_index: int,
):
self.slice_id = slice_id
self.data = data
self.info = OctreeLevelInfo(meta, level_index)
self._tiles: Dict[tuple, OctreeChunk] = {}
def get_chunk(
self, row: int, col: int, create=False
) -> Optional[OctreeChunk]:
"""Return the OctreeChunk at this location if it exists.
If create is True, an OctreeChunk will be created if one
does not exist at this location.
Parameters
----------
row : int
The row in the level.
col : int
The column in the level.
create : bool
If True, create the OctreeChunk if it does not exist.
Return
------
Optional[OctreeChunk]
The OctreeChunk if one existed or we just created it.
"""
try:
return self._tiles[(row, col)]
except KeyError:
if not create:
return None # It didn't exist so we're done.
rows, cols = self.info.shape_in_tiles
if row < 0 or row >= rows or col < 0 or col >= cols:
# The coordinates are not in the level. Not an exception because
# callers might be trying to get children just over the edge
# for non-power-of-two base images.
return None
# Create a chunk at this location and return it.
octree_chunk = self._create_chunk(row, col)
self._tiles[(row, col)] = octree_chunk
return octree_chunk
def _create_chunk(self, row: int, col: int) -> OctreeChunk:
"""Create a new OctreeChunk for this location in the level.
Parameters
----------
row : int
The row in the level.
col : int
The column in the level.
Return
------
OctreeChunk
The newly created chunk.
"""
level_index = self.info.level_index
meta = self.info.meta
layer_ref = meta.layer_ref
location = OctreeLocation(
layer_ref, self.slice_id, level_index, row, col
)
scale = self.info.scale
tile_size = self.info.meta.tile_size
scaled_size = tile_size * scale
pos = np.array(
[col * scaled_size, row * scaled_size], dtype=np.float32
)
data = self._get_data(row, col)
# Create OctreeChunkGeom used by the visual for rendering this
# chunk. Size it based on the base image pixels, not based on the
# data in this level, so it's exact.
base = np.array(meta.base_shape[::-1], dtype=np.float)
remain = base - pos
size = np.minimum(remain, [scaled_size, scaled_size])
geom = OctreeChunkGeom(pos, size)
# Return the newly created chunk.
return OctreeChunk(data, location, geom)
def _get_data(self, row: int, col: int) -> ArrayLike:
"""Get the chunk's data at this location.
Parameters
----------
row : int
The row coordinate.
col : int
The column coordinate.
Return
------
ArrayLike
The data at this location.
"""
tile_size = self.info.meta.tile_size
array_slice = (
slice(row * tile_size, (row + 1) * tile_size),
slice(col * tile_size, (col + 1) * tile_size),
)
if self.data.ndim == 3:
array_slice += (slice(None),) # Add the colors.
return self.data[array_slice]
def log_levels(levels: List[OctreeLevel], start_level: int = 0) -> None:
"""Log the dimensions of each level nicely.
We take start_level so we can log the "extra" levels we created but
with their correct level numbers.
Parameters
----------
levels : List[OctreeLevel]
Print information about these levels.
start_level : int
Start the indexing at this number, shift the indexes up.
"""
from ...._vendor.experimental.humanize.src.humanize import intword
def _dim_str(dim: tuple) -> None:
return f"({dim[0]}, {dim[1]}) = {intword(dim[0] * dim[1])}"
for index, level in enumerate(levels):
level_index = start_level + index
image_str = _dim_str(level.info.image_shape)
tiles_str = _dim_str(level.info.shape_in_tiles)
LOGGER.info(
"Level %d: %s pixels -> %s tiles",
level_index,
image_str,
tiles_str,
)
| [
"logging.getLogger",
"numpy.array",
"math.ceil",
"numpy.minimum"
] | [((289, 323), 'logging.getLogger', 'logging.getLogger', (['"""napari.octree"""'], {}), "('napari.octree')\n", (306, 323), False, 'import logging\n'), ((1044, 1076), 'math.ceil', 'math.ceil', (['(base[0] / scaled_size)'], {}), '(base[0] / scaled_size)\n', (1053, 1076), False, 'import math\n'), ((1097, 1129), 'math.ceil', 'math.ceil', (['(base[1] / scaled_size)'], {}), '(base[1] / scaled_size)\n', (1106, 1129), False, 'import math\n'), ((4595, 4661), 'numpy.array', 'np.array', (['[col * scaled_size, row * scaled_size]'], {'dtype': 'np.float32'}), '([col * scaled_size, row * scaled_size], dtype=np.float32)\n', (4603, 4661), True, 'import numpy as np\n'), ((4931, 4978), 'numpy.array', 'np.array', (['meta.base_shape[::-1]'], {'dtype': 'np.float'}), '(meta.base_shape[::-1], dtype=np.float)\n', (4939, 4978), True, 'import numpy as np\n'), ((5022, 5068), 'numpy.minimum', 'np.minimum', (['remain', '[scaled_size, scaled_size]'], {}), '(remain, [scaled_size, scaled_size])\n', (5032, 5068), True, 'import numpy as np\n')] |
# ---------------------------------------------------------
# Tensorflow MPC-GAN Implementation
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# ---------------------------------------------------------
import os
import time
import collections
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import tensorflow as tf
from PIL import Image
from dataset import Dataset
# noinspection PyPep8Naming
import TensorFlow_utils as tf_utils
import utils as utils
from model import MPCGAN
class Solver(object):
def __init__(self, flags):#X_data,gt,mask,
tf.reset_default_graph()
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=run_config)
self.flags = flags
# self.dataset = Dataset(X_data, gt, mask, self.flags)
# self.channels = X_data.shape[3]
#
# self.model = MPCGAN(self.sess, self.flags, self.dataset.image_size,self.channels)
#
# self.best_auc_sum = 0.
# self._make_folders()
#
# self.saver = tf.train.Saver()
# self.sess.run(tf.global_variables_initializer())
#
# tf_utils.show_all_variables()
def _make_folders(self):
self.model_out_dir = "{}/roc_pr_model_{}_{}".format(self.flags.dataset, self.flags.train_interval, self.flags.batch_size)
if not os.path.isdir(self.model_out_dir):
os.makedirs(self.model_out_dir)
if self.flags.is_test:
self.img_out_dir = "{}/seg_result_{}_{}".format(self.flags.dataset,
self.flags.train_interval,
self.flags.batch_size)
self.auc_out_dir = "{}/auc_{}_{}".format(self.flags.dataset, self.flags.train_interval, self.flags.batch_size)
if not os.path.isdir(self.img_out_dir):
os.makedirs(self.img_out_dir)
if not os.path.isdir(self.auc_out_dir):
os.makedirs(self.auc_out_dir)
elif not self.flags.is_test:
self.sample_out_dir = "{}/sample_{}_{}".format(self.flags.dataset, self.flags.train_interval, self.flags.batch_size)
if not os.path.isdir(self.sample_out_dir):
os.makedirs(self.sample_out_dir)
def train(self,X_data,gt,mask):
# run_config = tf.ConfigProto()
# run_config.gpu_options.allow_growth = True
# self.sess = tf.Session(config=run_config)
self.dataset = Dataset(X_data, gt, mask, self.flags)
self.channels = X_data.shape[3]
self.model = MPCGAN(self.sess, self.flags, self.dataset.image_size,self.channels)
self.best_auc_sum = 0.
self._make_folders()
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
tf_utils.show_all_variables()
for iter_time in range(0, self.flags.iters+1, self.flags.train_interval):
self.sample(iter_time) # sampling images and save them
# train discrminator
for iter_ in range(1, self.flags.train_interval+1):
x_imgs, y_imgs = self.dataset.train_next_batch(batch_size=self.flags.batch_size)
d_loss = self.model.train_dis(x_imgs, y_imgs)
self.print_info(iter_time + iter_, 'd_loss', d_loss)
self.dloss_save_placeholder = d_loss
# train generator
for iter_ in range(1, self.flags.train_interval+1):
x_imgs, y_imgs = self.dataset.train_next_batch(batch_size=self.flags.batch_size)
g_loss = self.model.train_gen(x_imgs, y_imgs)
self.print_info(iter_time + iter_, 'g_loss', g_loss)
self.gloss_save_placeholder = g_loss
#write loss to tensorboard
self.model.measure_loss(self.gloss_save_placeholder, self.dloss_save_placeholder, iter_time)
valprob,auc_sum = self.eval(iter_time, phase='train')
if self.best_auc_sum < auc_sum:
self.best_auc_sum = auc_sum
self.save_model(iter_time)
def test(self,X_data,gt,mask,k=0):#X_data,gt,mask,
# run_config = tf.ConfigProto()
# run_config.gpu_options.allow_growth = True
# self.sess = tf.Session(config=run_config)
self.dataset = Dataset(X_data, gt, mask, self.flags)
self.channels = X_data.shape[3]
self.model = MPCGAN(self.sess, self.flags, self.dataset.image_size, self.channels)
self.best_auc_sum = 0.
self._make_folders()
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
tf_utils.show_all_variables()
if self.load_model():
print(' [*] Load Success!\n')
global predprob
predprob, auc_sum = self.eval(phase='test')
else:
print(' [!] Load Failed!\n')
return predprob
def sample(self, iter_time):
if np.mod(iter_time, self.flags.sample_freq) == 0:
idx = np.random.choice(self.dataset.num_val, 2, replace=False)
x_imgs, y_imgs = self.dataset.val_imgs[idx], self.dataset.val_vessels[idx]
samples = self.model.sample_imgs(x_imgs)
# masking
seg_samples = utils.remain_in_mask(samples, self.dataset.val_masks[idx])
# crop to original image shape
x_imgs_ = utils.crop_to_original(x_imgs, self.dataset.ori_shape)
seg_samples_ = utils.crop_to_original(seg_samples, self.dataset.ori_shape)
y_imgs_ = utils.crop_to_original(y_imgs, self.dataset.ori_shape)
# sampling
# self.plot(x_imgs_, seg_samples_, y_imgs_, iter_time, idx=idx, save_file=self.sample_out_dir,
# phase='train')
def plot(self, x_imgs, samples, y_imgs, iter_time, idx=None, save_file=None, phase='train'):
# initialize grid size
cell_size_h, cell_size_w = self.dataset.ori_shape[0] / 100, self.dataset.ori_shape[1] / 100
num_columns, margin = 3, 0.05
width = cell_size_w * num_columns
height = cell_size_h * x_imgs.shape[0]
fig = plt.figure(figsize=(width, height)) # (column, row)
gs = gridspec.GridSpec(x_imgs.shape[0], num_columns) # (row, column)
gs.update(wspace=margin, hspace=margin)
# convert from normalized to original image
x_imgs_norm = np.zeros_like(x_imgs)
std, mean = 0., 0.
for _ in range(x_imgs.shape[0]):
if phase == 'train':
std = self.dataset.val_mean_std[idx[_]]['std']
mean = self.dataset.val_mean_std[idx[_]]['mean']
elif phase == 'test':
std = self.dataset.test_mean_std[idx[_]]['std']
mean = self.dataset.test_mean_std[idx[_]]['mean']
x_imgs_norm[_] = np.expand_dims(x_imgs[_], axis=0) * std + mean
# x_imgs_norm[_] = np.expand_dims(x_imgs[_], axis=0)
x_imgs_norm = x_imgs_norm.astype(np.uint8)
# 1 channel to 3 channels
samples_3 = np.stack((samples, samples, samples), axis=3)
y_imgs_3 = np.stack((y_imgs, y_imgs, y_imgs), axis=3)
imgs = [x_imgs_norm, samples_3, y_imgs_3]
for col_index in range(len(imgs)):
for row_index in range(x_imgs.shape[0]):
ax = plt.subplot(gs[row_index * num_columns + col_index])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(imgs[col_index][row_index].reshape(
self.dataset.ori_shape[0], self.dataset.ori_shape[1], 4), cmap='Greys_r')
if phase == 'train':
plt.savefig(save_file + '/{}_{}.png'.format(str(iter_time), idx[0]), bbox_inches='tight')
plt.close(fig)
else:
# save compared image
# plt.savefig(os.path.join(save_file, 'compared_{}.png'.format(os.path.basename(
# self.dataset.test_img_files[idx[0]])[:-4])), bbox_inches='tight')
# plt.close(fig)
# save vessel alone, vessel should be uint8 type
Image.fromarray(np.squeeze(samples*255).astype(np.uint8)).save(os.path.join(
save_file, '{}.png'.format(os.path.basename(self.dataset.test_img_files[idx[0]][:-4]))))
def print_info(self, iter_time, name, loss):
if np.mod(iter_time, self.flags.print_freq) == 0:
ord_output = collections.OrderedDict([(name, loss), ('dataset', self.flags.dataset),
('discriminator', self.flags.discriminator),
('train_interval', np.float32(self.flags.train_interval)),
('gpu_index', self.flags.gpu_index)])
utils.print_metrics(iter_time, ord_output)
def eval(self, iter_time=0, phase='train'):
total_time, auc_sum = 0., 0.
if np.mod(iter_time, self.flags.eval_freq) == 0:
num_data, imgs, vessels, masks = None, None, None, None
if phase == 'train':
num_data = self.dataset.num_val
imgs = self.dataset.val_imgs
vessels = self.dataset.val_vessels
masks = self.dataset.val_masks
elif phase == 'test':
num_data = self.dataset.num_test
imgs = self.dataset.test_imgs
vessels = self.dataset.test_vessels
masks = self.dataset.test_masks
generated = []
for iter_ in range(num_data):
x_img = imgs[iter_]
x_img = np.expand_dims(x_img, axis=0) # (H, W, C) to (1, H, W, C)
# measure inference time
start_time = time.time()
generated_vessel = self.model.sample_imgs(x_img)
total_time += (time.time() - start_time)
generated.append(np.squeeze(generated_vessel, axis=(0, 3))) # (1, H, W, 1) to (H, W)
generated = np.asarray(generated)
# calculate measurements
auc_sum = self.measure(generated, vessels, masks, num_data, iter_time, phase, total_time)
global cropped_vessel
if phase == 'train':
segmented_vessel = utils.remain_in_mask(generated, masks)
# crop to original image shape
# imgs_ = utils.crop_to_original(imgs, self.dataset.ori_shape)
cropped_vessel = utils.crop_to_original(segmented_vessel, self.dataset.ori_shape)
if phase == 'test':
# save test images
segmented_vessel = utils.remain_in_mask(generated, masks)
# crop to original image shape
# imgs_ = utils.crop_to_original(imgs, self.dataset.ori_shape)
cropped_vessel = utils.crop_to_original(segmented_vessel, self.dataset.ori_shape)
# vessels_ = utils.crop_to_original(vessels, self.dataset.ori_shape)
# for idx in range(num_data):
# self.plot(np.expand_dims(imgs_[idx], axis=0),
# np.expand_dims(cropped_vessel[idx], axis=0),
# np.expand_dims(vessels_[idx], axis=0),
# 'test', idx=[idx], save_file=self.img_out_dir, phase='test')
return cropped_vessel,auc_sum
def measure(self, generated, vessels, masks, num_data, iter_time, phase, total_time):
# masking
vessels_in_mask, generated_in_mask = utils.pixel_values_in_mask(
vessels, generated, masks)
# averaging processing time
avg_pt = (total_time / num_data) * 1000 # average processing tiem
# evaluate Area Under the Curve of ROC and Precision-Recall
auc_roc = utils.AUC_ROC(vessels_in_mask, generated_in_mask)
auc_pr = utils.AUC_PR(vessels_in_mask, generated_in_mask)
# binarize to calculate Dice Coeffient
binarys_in_mask = utils.threshold_by_otsu(generated, masks)
dice_coeff = utils.dice_coefficient_in_train(vessels_in_mask, binarys_in_mask)
acc, sensitivity, specificity = utils.misc_measures(vessels_in_mask, binarys_in_mask)
score = auc_pr + auc_roc + dice_coeff + acc + sensitivity + specificity
# # auc_sum for saving best model in training
# auc_sum = auc_roc + auc_pr
# if self.flags.stage == 2:
# #auc_sum = auc_roc + auc_pr
# auc_sum = auc_roc + auc_pr
# else:
# auc_sum = auc_roc + auc_pr
auc_sum = dice_coeff + acc + auc_pr
# print information
ord_output = collections.OrderedDict([('auc_pr', auc_pr), ('auc_roc', auc_roc),
('dice_coeff', dice_coeff), ('acc', acc),
('sensitivity', sensitivity), ('specificity', specificity),
('score', score), ('auc_sum', auc_sum),
('best_auc_sum', self.best_auc_sum), ('avg_pt', avg_pt)])
utils.print_metrics(iter_time, ord_output)
# write in tensorboard when in train mode only
if phase == 'train':
self.model.measure_assign(
auc_pr, auc_roc, dice_coeff, acc, sensitivity, specificity, score, iter_time)
elif phase == 'test':
# write in npy format for evaluation
utils.save_obj(vessels_in_mask, generated_in_mask,
os.path.join(self.auc_out_dir, "auc_roc.npy"),
os.path.join(self.auc_out_dir, "auc_pr.npy"))
return auc_sum
def save_model(self, iter_time):
self.model.best_auc_sum_assign(self.best_auc_sum)
model_name = "iter_{}_auc_sum_{:.3}".format(iter_time, self.best_auc_sum)
self.saver.save(self.sess, os.path.join(self.model_out_dir, model_name))
print('===================================================')
print(' Model saved! ')
print(' Best auc_sum: {:.3}'.format(self.best_auc_sum))
print('===================================================\n')
def load_model(self):
print(' [*] Reading checkpoint...')
ckpt = tf.train.get_checkpoint_state(self.model_out_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(self.model_out_dir, ckpt_name))
self.best_auc_sum = self.sess.run(self.model.best_auc_sum)
print('====================================================')
print(' Model loaded! ')
print(' Best auc_sum: {:.3}'.format(self.best_auc_sum))
print('====================================================')
return True
else:
return False
| [
"model.MPCGAN",
"utils.threshold_by_otsu",
"utils.dice_coefficient_in_train",
"utils.remain_in_mask",
"numpy.mod",
"utils.AUC_ROC",
"utils.crop_to_original",
"tensorflow.Session",
"numpy.asarray",
"matplotlib.pyplot.close",
"numpy.stack",
"matplotlib.gridspec.GridSpec",
"os.path.isdir",
"t... | [((630, 654), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (652, 654), True, 'import tensorflow as tf\n'), ((676, 692), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (690, 692), True, 'import tensorflow as tf\n'), ((764, 793), 'tensorflow.Session', 'tf.Session', ([], {'config': 'run_config'}), '(config=run_config)\n', (774, 793), True, 'import tensorflow as tf\n'), ((2603, 2640), 'dataset.Dataset', 'Dataset', (['X_data', 'gt', 'mask', 'self.flags'], {}), '(X_data, gt, mask, self.flags)\n', (2610, 2640), False, 'from dataset import Dataset\n'), ((2703, 2772), 'model.MPCGAN', 'MPCGAN', (['self.sess', 'self.flags', 'self.dataset.image_size', 'self.channels'], {}), '(self.sess, self.flags, self.dataset.image_size, self.channels)\n', (2709, 2772), False, 'from model import MPCGAN\n'), ((2855, 2871), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2869, 2871), True, 'import tensorflow as tf\n'), ((2938, 2967), 'TensorFlow_utils.show_all_variables', 'tf_utils.show_all_variables', ([], {}), '()\n', (2965, 2967), True, 'import TensorFlow_utils as tf_utils\n'), ((4445, 4482), 'dataset.Dataset', 'Dataset', (['X_data', 'gt', 'mask', 'self.flags'], {}), '(X_data, gt, mask, self.flags)\n', (4452, 4482), False, 'from dataset import Dataset\n'), ((4545, 4614), 'model.MPCGAN', 'MPCGAN', (['self.sess', 'self.flags', 'self.dataset.image_size', 'self.channels'], {}), '(self.sess, self.flags, self.dataset.image_size, self.channels)\n', (4551, 4614), False, 'from model import MPCGAN\n'), ((4698, 4714), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4712, 4714), True, 'import tensorflow as tf\n'), ((4781, 4810), 'TensorFlow_utils.show_all_variables', 'tf_utils.show_all_variables', ([], {}), '()\n', (4808, 4810), True, 'import TensorFlow_utils as tf_utils\n'), ((6300, 6335), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (6310, 6335), True, 'import matplotlib.pyplot as plt\n'), ((6366, 6413), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['x_imgs.shape[0]', 'num_columns'], {}), '(x_imgs.shape[0], num_columns)\n', (6383, 6413), True, 'import matplotlib.gridspec as gridspec\n'), ((6554, 6575), 'numpy.zeros_like', 'np.zeros_like', (['x_imgs'], {}), '(x_imgs)\n', (6567, 6575), True, 'import numpy as np\n'), ((7217, 7262), 'numpy.stack', 'np.stack', (['(samples, samples, samples)'], {'axis': '(3)'}), '((samples, samples, samples), axis=3)\n', (7225, 7262), True, 'import numpy as np\n'), ((7282, 7324), 'numpy.stack', 'np.stack', (['(y_imgs, y_imgs, y_imgs)'], {'axis': '(3)'}), '((y_imgs, y_imgs, y_imgs), axis=3)\n', (7290, 7324), True, 'import numpy as np\n'), ((11789, 11842), 'utils.pixel_values_in_mask', 'utils.pixel_values_in_mask', (['vessels', 'generated', 'masks'], {}), '(vessels, generated, masks)\n', (11815, 11842), True, 'import utils as utils\n'), ((12055, 12104), 'utils.AUC_ROC', 'utils.AUC_ROC', (['vessels_in_mask', 'generated_in_mask'], {}), '(vessels_in_mask, generated_in_mask)\n', (12068, 12104), True, 'import utils as utils\n'), ((12122, 12170), 'utils.AUC_PR', 'utils.AUC_PR', (['vessels_in_mask', 'generated_in_mask'], {}), '(vessels_in_mask, generated_in_mask)\n', (12134, 12170), True, 'import utils as utils\n'), ((12245, 12286), 'utils.threshold_by_otsu', 'utils.threshold_by_otsu', (['generated', 'masks'], {}), '(generated, masks)\n', (12268, 12286), True, 'import utils as utils\n'), ((12308, 12373), 'utils.dice_coefficient_in_train', 'utils.dice_coefficient_in_train', (['vessels_in_mask', 'binarys_in_mask'], {}), '(vessels_in_mask, binarys_in_mask)\n', (12339, 12373), True, 'import utils as utils\n'), ((12414, 12467), 'utils.misc_measures', 'utils.misc_measures', (['vessels_in_mask', 'binarys_in_mask'], {}), '(vessels_in_mask, binarys_in_mask)\n', (12433, 12467), True, 'import utils as utils\n'), ((12911, 13191), 'collections.OrderedDict', 'collections.OrderedDict', (["[('auc_pr', auc_pr), ('auc_roc', auc_roc), ('dice_coeff', dice_coeff), (\n 'acc', acc), ('sensitivity', sensitivity), ('specificity', specificity),\n ('score', score), ('auc_sum', auc_sum), ('best_auc_sum', self.\n best_auc_sum), ('avg_pt', avg_pt)]"], {}), "([('auc_pr', auc_pr), ('auc_roc', auc_roc), (\n 'dice_coeff', dice_coeff), ('acc', acc), ('sensitivity', sensitivity),\n ('specificity', specificity), ('score', score), ('auc_sum', auc_sum), (\n 'best_auc_sum', self.best_auc_sum), ('avg_pt', avg_pt)])\n", (12934, 13191), False, 'import collections\n'), ((13370, 13412), 'utils.print_metrics', 'utils.print_metrics', (['iter_time', 'ord_output'], {}), '(iter_time, ord_output)\n', (13389, 13412), True, 'import utils as utils\n'), ((14565, 14614), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['self.model_out_dir'], {}), '(self.model_out_dir)\n', (14594, 14614), True, 'import tensorflow as tf\n'), ((1438, 1471), 'os.path.isdir', 'os.path.isdir', (['self.model_out_dir'], {}), '(self.model_out_dir)\n', (1451, 1471), False, 'import os\n'), ((1485, 1516), 'os.makedirs', 'os.makedirs', (['self.model_out_dir'], {}), '(self.model_out_dir)\n', (1496, 1516), False, 'import os\n'), ((2894, 2927), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2925, 2927), True, 'import tensorflow as tf\n'), ((4737, 4770), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4768, 4770), True, 'import tensorflow as tf\n'), ((5104, 5145), 'numpy.mod', 'np.mod', (['iter_time', 'self.flags.sample_freq'], {}), '(iter_time, self.flags.sample_freq)\n', (5110, 5145), True, 'import numpy as np\n'), ((5170, 5226), 'numpy.random.choice', 'np.random.choice', (['self.dataset.num_val', '(2)'], {'replace': '(False)'}), '(self.dataset.num_val, 2, replace=False)\n', (5186, 5226), True, 'import numpy as np\n'), ((5416, 5474), 'utils.remain_in_mask', 'utils.remain_in_mask', (['samples', 'self.dataset.val_masks[idx]'], {}), '(samples, self.dataset.val_masks[idx])\n', (5436, 5474), True, 'import utils as utils\n'), ((5541, 5595), 'utils.crop_to_original', 'utils.crop_to_original', (['x_imgs', 'self.dataset.ori_shape'], {}), '(x_imgs, self.dataset.ori_shape)\n', (5563, 5595), True, 'import utils as utils\n'), ((5623, 5682), 'utils.crop_to_original', 'utils.crop_to_original', (['seg_samples', 'self.dataset.ori_shape'], {}), '(seg_samples, self.dataset.ori_shape)\n', (5645, 5682), True, 'import utils as utils\n'), ((5705, 5759), 'utils.crop_to_original', 'utils.crop_to_original', (['y_imgs', 'self.dataset.ori_shape'], {}), '(y_imgs, self.dataset.ori_shape)\n', (5727, 5759), True, 'import utils as utils\n'), ((7996, 8010), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8005, 8010), True, 'import matplotlib.pyplot as plt\n'), ((8582, 8622), 'numpy.mod', 'np.mod', (['iter_time', 'self.flags.print_freq'], {}), '(iter_time, self.flags.print_freq)\n', (8588, 8622), True, 'import numpy as np\n'), ((9030, 9072), 'utils.print_metrics', 'utils.print_metrics', (['iter_time', 'ord_output'], {}), '(iter_time, ord_output)\n', (9049, 9072), True, 'import utils as utils\n'), ((9170, 9209), 'numpy.mod', 'np.mod', (['iter_time', 'self.flags.eval_freq'], {}), '(iter_time, self.flags.eval_freq)\n', (9176, 9209), True, 'import numpy as np\n'), ((10259, 10280), 'numpy.asarray', 'np.asarray', (['generated'], {}), '(generated)\n', (10269, 10280), True, 'import numpy as np\n'), ((14158, 14202), 'os.path.join', 'os.path.join', (['self.model_out_dir', 'model_name'], {}), '(self.model_out_dir, model_name)\n', (14170, 14202), False, 'import os\n'), ((14687, 14731), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (14703, 14731), False, 'import os\n'), ((1948, 1979), 'os.path.isdir', 'os.path.isdir', (['self.img_out_dir'], {}), '(self.img_out_dir)\n', (1961, 1979), False, 'import os\n'), ((1997, 2026), 'os.makedirs', 'os.makedirs', (['self.img_out_dir'], {}), '(self.img_out_dir)\n', (2008, 2026), False, 'import os\n'), ((2046, 2077), 'os.path.isdir', 'os.path.isdir', (['self.auc_out_dir'], {}), '(self.auc_out_dir)\n', (2059, 2077), False, 'import os\n'), ((2095, 2124), 'os.makedirs', 'os.makedirs', (['self.auc_out_dir'], {}), '(self.auc_out_dir)\n', (2106, 2124), False, 'import os\n'), ((7493, 7545), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[row_index * num_columns + col_index]'], {}), '(gs[row_index * num_columns + col_index])\n', (7504, 7545), True, 'import matplotlib.pyplot as plt\n'), ((7562, 7577), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7570, 7577), True, 'import matplotlib.pyplot as plt\n'), ((9867, 9896), 'numpy.expand_dims', 'np.expand_dims', (['x_img'], {'axis': '(0)'}), '(x_img, axis=0)\n', (9881, 9896), True, 'import numpy as np\n'), ((9997, 10008), 'time.time', 'time.time', ([], {}), '()\n', (10006, 10008), False, 'import time\n'), ((10524, 10562), 'utils.remain_in_mask', 'utils.remain_in_mask', (['generated', 'masks'], {}), '(generated, masks)\n', (10544, 10562), True, 'import utils as utils\n'), ((10723, 10787), 'utils.crop_to_original', 'utils.crop_to_original', (['segmented_vessel', 'self.dataset.ori_shape'], {}), '(segmented_vessel, self.dataset.ori_shape)\n', (10745, 10787), True, 'import utils as utils\n'), ((10891, 10929), 'utils.remain_in_mask', 'utils.remain_in_mask', (['generated', 'masks'], {}), '(generated, masks)\n', (10911, 10929), True, 'import utils as utils\n'), ((11090, 11154), 'utils.crop_to_original', 'utils.crop_to_original', (['segmented_vessel', 'self.dataset.ori_shape'], {}), '(segmented_vessel, self.dataset.ori_shape)\n', (11112, 11154), True, 'import utils as utils\n'), ((14774, 14817), 'os.path.join', 'os.path.join', (['self.model_out_dir', 'ckpt_name'], {}), '(self.model_out_dir, ckpt_name)\n', (14786, 14817), False, 'import os\n'), ((2311, 2345), 'os.path.isdir', 'os.path.isdir', (['self.sample_out_dir'], {}), '(self.sample_out_dir)\n', (2324, 2345), False, 'import os\n'), ((2363, 2395), 'os.makedirs', 'os.makedirs', (['self.sample_out_dir'], {}), '(self.sample_out_dir)\n', (2374, 2395), False, 'import os\n'), ((6998, 7031), 'numpy.expand_dims', 'np.expand_dims', (['x_imgs[_]'], {'axis': '(0)'}), '(x_imgs[_], axis=0)\n', (7012, 7031), True, 'import numpy as np\n'), ((10105, 10116), 'time.time', 'time.time', ([], {}), '()\n', (10114, 10116), False, 'import time\n'), ((10165, 10206), 'numpy.squeeze', 'np.squeeze', (['generated_vessel'], {'axis': '(0, 3)'}), '(generated_vessel, axis=(0, 3))\n', (10175, 10206), True, 'import numpy as np\n'), ((13800, 13845), 'os.path.join', 'os.path.join', (['self.auc_out_dir', '"""auc_roc.npy"""'], {}), "(self.auc_out_dir, 'auc_roc.npy')\n", (13812, 13845), False, 'import os\n'), ((13874, 13918), 'os.path.join', 'os.path.join', (['self.auc_out_dir', '"""auc_pr.npy"""'], {}), "(self.auc_out_dir, 'auc_pr.npy')\n", (13886, 13918), False, 'import os\n'), ((8459, 8517), 'os.path.basename', 'os.path.basename', (['self.dataset.test_img_files[idx[0]][:-4]'], {}), '(self.dataset.test_img_files[idx[0]][:-4])\n', (8475, 8517), False, 'import os\n'), ((8890, 8927), 'numpy.float32', 'np.float32', (['self.flags.train_interval'], {}), '(self.flags.train_interval)\n', (8900, 8927), True, 'import numpy as np\n'), ((8355, 8380), 'numpy.squeeze', 'np.squeeze', (['(samples * 255)'], {}), '(samples * 255)\n', (8365, 8380), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Utility functions for data loading and training of VGSL networks.
"""
import json
import regex
import torch
import traceback
import unicodedata
import numpy as np
import pkg_resources
import bidi.algorithm as bd
import shapely.geometry as geom
import torch.nn.functional as F
import torchvision.transforms.functional as tf
from os import path
from functools import partial
from shapely.ops import split, snap
from PIL import Image, ImageDraw
from itertools import groupby
from collections import Counter, defaultdict
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from typing import Dict, List, Tuple, Iterable, Sequence, Callable, Optional, Any, Union, cast
from skimage.draw import polygon
from kraken.lib.xml import parse_alto, parse_page, parse_xml
from kraken.lib.util import is_bitonal
from kraken.lib.codec import PytorchCodec
from kraken.lib.models import TorchSeqRecognizer
from kraken.lib.segmentation import extract_polygons, calculate_polygonal_environment
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.lineest import CenterNormalizer, dewarp
from kraken.lib import functional_im_transforms as F_t
__all__ = ['BaselineSet', 'PolygonGTDataset', 'GroundTruthDataset', 'compute_error', 'generate_input_transforms', 'preparse_xml_data']
import logging
logger = logging.getLogger(__name__)
def generate_input_transforms(batch: int, height: int, width: int, channels: int, pad: int, valid_norm: bool = True, force_binarization=False) -> transforms.Compose:
"""
Generates a torchvision transformation converting a PIL.Image into a
tensor usable in a network forward pass.
Args:
batch (int): mini-batch size
height (int): height of input image in pixels
width (int): width of input image in pixels
channels (int): color channels of input
pad (int): Amount of padding on horizontal ends of image
valid_norm (bool): Enables/disables baseline normalization as a valid
preprocessing step. If disabled we will fall back to
standard scaling.
force_binarization (bool): Forces binarization of input images using
the nlbin algorithm.
Returns:
A torchvision transformation composition converting the input image to
the appropriate tensor.
"""
scale = (height, width) # type: Tuple[int, int]
center_norm = False
mode = 'RGB' if channels == 3 else 'L'
if height == 1 and width == 0 and channels > 3:
perm = (1, 0, 2)
scale = (channels, 0)
if valid_norm:
center_norm = True
mode = 'L'
elif height > 1 and width == 0 and channels in (1, 3):
perm = (0, 1, 2)
if valid_norm and channels == 1:
center_norm = True
elif height == 0 and width > 1 and channels in (1, 3):
perm = (0, 1, 2)
# fixed height and width image => bicubic scaling of the input image, disable padding
elif height > 0 and width > 0 and channels in (1, 3):
perm = (0, 1, 2)
pad = 0
elif height == 0 and width == 0 and channels in (1, 3):
perm = (0, 1, 2)
pad = 0
else:
raise KrakenInputException('Invalid input spec {}, {}, {}, {}, {}'.format(batch,
height,
width,
channels,
pad))
if mode != 'L' and force_binarization:
raise KrakenInputException('Invalid input spec {}, {}, {}, {} in'
' combination with forced binarization.'.format(batch,
height,
width,
channels,
pad))
out_transforms = []
out_transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=mode)))
if force_binarization:
out_transforms.append(transforms.Lambda(F_t.pil_to_bin))
# dummy transforms to ensure we can determine color mode of input material
# from first two transforms. It's stupid but it works.
out_transforms.append(transforms.Lambda(F_t.dummy))
if scale != (0, 0):
if center_norm:
lnorm = CenterNormalizer(scale[0])
out_transforms.append(transforms.Lambda(partial(F_t.pil_dewarp, lnorm=lnorm)))
out_transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=mode)))
else:
out_transforms.append(transforms.Lambda(partial(F_t.pil_fixed_resize, scale=scale)))
if pad:
out_transforms.append(transforms.Pad((pad, 0), fill=255))
out_transforms.append(transforms.ToTensor())
# invert
out_transforms.append(transforms.Lambda(F_t.tensor_invert))
out_transforms.append(transforms.Lambda(partial(F_t.tensor_permute, perm=perm)))
return transforms.Compose(out_transforms)
def _fast_levenshtein(seq1: Sequence[Any], seq2: Sequence[Any]) -> int:
oneago = None
thisrow = list(range(1, len(seq2) + 1)) + [0]
rows = [thisrow]
for x in range(len(seq1)):
oneago, thisrow = thisrow, [0] * len(seq2) + [x + 1]
for y in range(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
rows.append(thisrow)
return thisrow[len(seq2) - 1]
def global_align(seq1: Sequence[Any], seq2: Sequence[Any]) -> Tuple[int, List[str], List[str]]:
"""
Computes a global alignment of two strings.
Args:
seq1 (Sequence[Any]):
seq2 (Sequence[Any]):
Returns a tuple (distance, list(algn1), list(algn2))
"""
# calculate cost and direction matrix
cost = [[0] * (len(seq2) + 1) for x in range(len(seq1) + 1)]
for i in range(1, len(cost)):
cost[i][0] = i
for i in range(1, len(cost[0])):
cost[0][i] = i
direction = [[(0, 0)] * (len(seq2) + 1) for x in range(len(seq1) + 1)]
direction[0] = [(0, x) for x in range(-1, len(seq2))]
for i in range(-1, len(direction) - 1):
direction[i + 1][0] = (i, 0)
for i in range(1, len(cost)):
for j in range(1, len(cost[0])):
delcost = ((i - 1, j), cost[i - 1][j] + 1)
addcost = ((i, j - 1), cost[i][j - 1] + 1)
subcost = ((i - 1, j - 1), cost[i - 1][j - 1] + (seq1[i - 1] != seq2[j - 1]))
best = min(delcost, addcost, subcost, key=lambda x: x[1])
cost[i][j] = best[1]
direction[i][j] = best[0]
d = cost[-1][-1]
# backtrace
algn1: List[Any] = []
algn2: List[Any] = []
i = len(direction) - 1
j = len(direction[0]) - 1
while direction[i][j] != (-1, 0):
k, l = direction[i][j]
if k == i - 1 and l == j - 1:
algn1.insert(0, seq1[i - 1])
algn2.insert(0, seq2[j - 1])
elif k < i:
algn1.insert(0, seq1[i - 1])
algn2.insert(0, '')
elif l < j:
algn1.insert(0, '')
algn2.insert(0, seq2[j - 1])
i, j = k, l
return d, algn1, algn2
def compute_confusions(algn1: Sequence[str], algn2: Sequence[str]):
"""
Compute confusion matrices from two globally aligned strings.
Args:
align1 (Sequence[str]): sequence 1
align2 (Sequence[str]): sequence 2
Returns:
A tuple (counts, scripts, ins, dels, subs) with `counts` being per-character
confusions, `scripts` per-script counts, `ins` a dict with per script
insertions, `del` an integer of the number of deletions, `subs` per
script substitutions.
"""
counts: Dict[Tuple[str, str], int] = Counter()
with pkg_resources.resource_stream(__name__, 'scripts.json') as fp:
script_map = json.load(fp)
def _get_script(c):
for s, e, n in script_map:
if ord(c) == s or (e and s <= ord(c) <= e):
return n
return 'Unknown'
scripts: Dict[Tuple[str, str], int] = Counter()
ins: Dict[Tuple[str, str], int] = Counter()
dels: int = 0
subs: Dict[Tuple[str, str], int] = Counter()
for u,v in zip(algn1, algn2):
counts[(u, v)] += 1
for k, v in counts.items():
if k[0] == '':
dels += v
else:
script = _get_script(k[0])
scripts[script] += v
if k[1] == '':
ins[script] += v
elif k[0] != k[1]:
subs[script] += v
return counts, scripts, ins, dels, subs
def compute_error(model: TorchSeqRecognizer, validation_set: Iterable[Dict[str, torch.Tensor]]) -> Tuple[int, int]:
"""
Computes error report from a model and a list of line image-text pairs.
Args:
model (kraken.lib.models.TorchSeqRecognizer): Model used for recognition
validation_set (list): List of tuples (image, text) for validation
Returns:
A tuple with total number of characters and edit distance across the
whole validation set.
"""
total_chars = 0
error = 0
for batch in validation_set:
preds = model.predict_string(batch['image'], batch['seq_lens'])
total_chars += batch['target_lens'].sum()
for pred, text in zip(preds, batch['target']):
error += _fast_levenshtein(pred, text)
return total_chars, error
def preparse_xml_data(filenames, format_type='xml', repolygonize=False):
"""
Loads training data from a set of xml files.
Extracts line information from Page/ALTO xml files for training of
recognition models.
Args:
filenames (list): List of XML files.
format_type (str): Either `page`, `alto` or `xml` for
autodetermination.
repolygonize (bool): (Re-)calculates polygon information using the
kraken algorithm.
Returns:
A list of dicts {'text': text, 'baseline': [[x0, y0], ...], 'boundary':
[[x0, y0], ...], 'image': PIL.Image}.
"""
training_pairs = []
if format_type == 'xml':
parse_fn = parse_xml
elif format_type == 'alto':
parse_fn = parse_alto
elif format_type == 'page':
parse_fn = parse_page
else:
raise Exception(f'invalid format {format_type} for preparse_xml_data')
for fn in filenames:
try:
data = parse_fn(fn)
except KrakenInputException as e:
logger.warning(e)
continue
try:
with open(data['image'], 'rb') as fp:
Image.open(fp)
except FileNotFoundError as e:
logger.warning(f'Could not open file {e.filename} in {fn}')
continue
if repolygonize:
logger.info('repolygonizing {} lines in {}'.format(len(data['lines']), data['image']))
data['lines'] = _repolygonize(data['image'], data['lines'])
for line in data['lines']:
training_pairs.append({'image': data['image'], **line})
return training_pairs
def _repolygonize(im: Image.Image, lines):
"""
Helper function taking an output of the lib.xml parse_* functions and
recalculating the contained polygonization.
Args:
im (Image.Image): Input image
lines (list): List of dicts [{'boundary': [[x0, y0], ...], 'baseline': [[x0, y0], ...], 'text': 'abcvsd'}, {...]
Returns:
A data structure `lines` with a changed polygonization.
"""
im = Image.open(im).convert('L')
polygons = calculate_polygonal_environment(im, [x['baseline'] for x in lines])
return [{'boundary': polygon, 'baseline': orig['baseline'], 'text': orig['text'], 'script': orig['script']} for orig, polygon in zip(lines, polygons)]
def collate_sequences(batch):
"""
Sorts and pads sequences.
"""
sorted_batch = sorted(batch, key=lambda x: x['image'].shape[2], reverse=True)
seqs = [x['image'] for x in sorted_batch]
seq_lens = torch.LongTensor([seq.shape[2] for seq in seqs])
max_len = seqs[0].shape[2]
seqs = torch.stack([F.pad(seq, pad=(0, max_len-seq.shape[2])) for seq in seqs])
if isinstance(sorted_batch[0]['target'], str):
labels = [x['target'] for x in sorted_batch]
else:
labels = torch.cat([x['target'] for x in sorted_batch]).long()
label_lens = torch.LongTensor([len(x['target']) for x in sorted_batch])
return {'image': seqs, 'target': labels, 'seq_lens': seq_lens, 'target_lens': label_lens}
class InfiniteDataLoader(DataLoader):
"""
Version of DataLoader that auto-reinitializes the iterator once it is
exhausted.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iter = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
sample = next(self.dataset_iter)
except StopIteration:
self.dataset_iter = super().__iter__()
sample = next(self.dataset_iter)
return sample
class PolygonGTDataset(Dataset):
"""
Dataset for training a line recognition model from polygonal/baseline data.
"""
def __init__(self,
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
reorder: bool = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
preload: bool = True,
augmentation: bool = False) -> None:
self._images = [] # type: Union[List[Image], List[torch.Tensor]]
self._gt = [] # type: List[str]
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.transforms = im_transforms
self.preload = preload
self.aug = None
self.seg_type = 'baselines'
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
)
self.aug = Compose([
ToFloat(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=3, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
], p=0.5)
self.im_mode = '1'
def add(self, *args, **kwargs):
"""
Adds a line to the dataset.
Args:
im (path): Path to the whole page image
text (str): Transcription of the line.
baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]].
boundary (list): A polygon mask for the line.
"""
if 'preparse' not in kwargs or not kwargs['preparse']:
kwargs = self.parse(image, text, baseline, boundary, *args, **kwargs)
if kwargs['preload']:
self.im_mode = kwargs['im_mode']
self._images.append(kwargs['image'])
else:
self._images.append((kwargs['image'], kwargs['baseline'], kwargs['boundary']))
self._gt.append(kwargs['text'])
self.alphabet.update(kwargs['text'])
def parse(self, image: Union[str, Image.Image], text: str, baseline: List[Tuple[int, int]], boundary: List[Tuple[int, int]], *args, **kwargs):
"""
Parses a sample for the dataset and returns it.
This function is mainly uses for parallelized loading of training data.
Args:
im (path): Path to the whole page image
text (str): Transcription of the line.
baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]].
boundary (list): A polygon mask for the line.
"""
for func in self.text_transforms:
text = func(text)
if not text:
raise KrakenInputException('Text line is empty after transformations')
if not baseline:
raise KrakenInputException('No baseline given for line')
if not boundary:
raise KrakenInputException('No boundary given for line')
if self.preload:
if not isinstance(image, Image.Image):
im = Image.open(image)
try:
im, _ = next(extract_polygons(im, {'type': 'baselines', 'lines': [{'baseline': baseline, 'boundary': boundary}]}))
except IndexError:
raise KrakenInputException('Patch extraction failed for baseline')
try:
im = self.head_transforms(im)
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
self._images.append(im)
return {'text': text, 'image': im, 'baseline': baseline, 'boundary': boundary, 'im_mode': im.mode, 'preload': True, 'preparse': True}
else:
return {'text': text, 'image': image, 'baseline': baseline, 'boundary': boundary, 'preload': False, 'preparse': True}
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset and encodes all text lines.
Has to be run before sampling from the dataset.
"""
if codec:
self.codec = codec
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, self.codec.encode(gt)))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, gt))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
if self.preload:
x, y = self.training_set[index]
if self.aug:
x = x.permute((1, 2, 0)).numpy()
o = self.aug(image=x)
x = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': x, 'target': y}
else:
item = self.training_set[index]
try:
logger.debug(f'Attempting to load {item[0]}')
im = item[0][0]
if not isinstance(im, Image.Image):
im = Image.open(im)
im, _ = next(extract_polygons(im, {'type': 'baselines', 'lines': [{'baseline': item[0][1], 'boundary': item[0][2]}]}))
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': item[1]}
except Exception:
idx = np.random.randint(0, len(self.training_set))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.training_set))]
def __len__(self) -> int:
return len(self.training_set)
class GroundTruthDataset(Dataset):
"""
Dataset for training a line recognition model.
All data is cached in memory.
"""
def __init__(self, split: Callable[[str], str] = F_t.default_split,
suffix: str = '.gt.txt',
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
reorder: bool = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
preload: bool = True,
augmentation: bool = False) -> None:
"""
Reads a list of image-text pairs and creates a ground truth set.
Args:
split (func): Function for generating the base name without
extensions from paths
suffix (str): Suffix to attach to image base name for text
retrieval
mode (str): Image color space. Either RGB (color) or L
(grayscale/bw). Only L is compatible with vertical
scaling/dewarping.
scale (int, tuple): Target height or (width, height) of dewarped
line images. Vertical-only scaling is through
CenterLineNormalizer, resizing with Lanczos
interpolation. Set to 0 to disable.
normalization (str): Unicode normalization for gt
whitespace_normalization (str): Normalizes unicode whitespace and
strips whitespace.
reorder (bool): Whether to rearrange code points in "display"/LTR
order
im_transforms (func): Function taking an PIL.Image and returning a
tensor suitable for forward passes.
preload (bool): Enables preloading and preprocessing of image files.
"""
self.suffix = suffix
self.split = partial(F_t.suffix_split, split=split, suffix=suffix)
self._images = [] # type: Union[List[Image], List[torch.Tensor]]
self._gt = [] # type: List[str]
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.aug = None
self.preload = preload
self.seg_type = 'bbox'
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
)
self.aug = Compose([
ToFloat(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
], p=0.5)
self.im_mode = '1'
def add(self, *args, **kwargs) -> None:
"""
Adds a line-image-text pair to the dataset.
Args:
image (str): Input image path
"""
if 'preparse' not in kwargs or not kwargs['preparse']:
kwargs = self.parse(image, *args, **kwargs)
if kwargs['preload']:
self.im_mode = kwargs['im_mode']
self._images.append(kwargs['image'])
self._gt.append(kwargs['text'])
self.alphabet.update(kwargs['text'])
def parse(self, image: Union[str, Image.Image], *args, **kwargs) -> Dict:
"""
Parses a sample for this dataset.
This is mostly used to parallelize populating the dataset.
Args:
image (str): Input image path
"""
with open(self.split(image), 'r', encoding='utf-8') as fp:
gt = fp.read().strip('\n\r')
for func in self.text_transforms:
gt = func(gt)
if not gt:
raise KrakenInputException(f'Text line is empty ({fp.name})')
if self.preload:
try:
im = Image.open(image)
im = self.head_transforms(im)
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
return {'image': im, 'text': gt, 'im_mode': im.mode, 'preload': True, 'preparse': True}
else:
return {'image': image, 'text': gt, 'preload': False, 'preparse': True}
def add_loaded(self, image: Image.Image, gt: str) -> None:
"""
Adds an already loaded line-image-text pair to the dataset.
Args:
image (PIL.Image.Image): Line image
gt (str): Text contained in the line image
"""
if self.preload:
try:
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
self._images.append(im)
else:
self._images.append(image)
for func in self.text_transforms:
gt = func(gt)
self._gt.append(gt)
self.alphabet.update(gt)
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset and encodes all text lines.
Has to be run before sampling from the dataset.
"""
if codec:
self.codec = codec
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, self.codec.encode(gt)))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, gt))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
if self.preload:
x, y = self.training_set[index]
if self.aug:
im = x.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': y}
return {'image': x, 'target': y}
else:
item = self.training_set[index]
try:
logger.debug(f'Attempting to load {item[0]}')
im = item[0]
if not isinstance(im, Image.Image):
im = Image.open(im)
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': item[1]}
except Exception:
idx = np.random.randint(0, len(self.training_set))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.training_set))]
def __len__(self) -> int:
return len(self.training_set)
class BaselineSet(Dataset):
"""
Dataset for training a baseline/region segmentation model.
"""
def __init__(self, imgs: Sequence[str] = None,
suffix: str = '.path',
line_width: int = 4,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
mode: str = 'path',
augmentation: bool = False,
valid_baselines: Sequence[str] = None,
merge_baselines: Dict[str, Sequence[str]] = None,
valid_regions: Sequence[str] = None,
merge_regions: Dict[str, Sequence[str]] = None):
"""
Reads a list of image-json pairs and creates a data set.
Args:
imgs (list):
suffix (int): Suffix to attach to image base name to load JSON
files from.
line_width (int): Height of the baseline in the scaled input.
target_size (tuple): Target size of the image as a (height, width) tuple.
mode (str): Either path, alto, page, xml, or None. In alto, page,
and xml mode the baseline paths and image data is
retrieved from an ALTO/PageXML file. In `None` mode
data is iteratively added through the `add` method.
augmentation (bool): Enable/disable augmentation.
valid_baselines (list): Sequence of valid baseline identifiers. If
`None` all are valid.
merge_baselines (dict): Sequence of baseline identifiers to merge.
Note that merging occurs after entities not
in valid_* have been discarded.
valid_regions (list): Sequence of valid region identifiers. If
`None` all are valid.
merge_regions (dict): Sequence of region identifiers to merge.
Note that merging occurs after entities not
in valid_* have been discarded.
"""
super().__init__()
self.mode = mode
self.im_mode = '1'
self.aug = None
self.targets = []
# n-th entry contains semantic of n-th class
self.class_mapping = {'aux': {'_start_separator': 0, '_end_separator': 1}, 'baselines': {}, 'regions': {}}
self.class_stats = {'baselines': defaultdict(int), 'regions': defaultdict(int)}
self.num_classes = 2
self.mbl_dict = merge_baselines if merge_baselines is not None else {}
self.mreg_dict = merge_regions if merge_regions is not None else {}
self.valid_baselines = valid_baselines
self.valid_regions = valid_regions
if mode in ['alto', 'page', 'xml']:
if mode == 'alto':
fn = parse_alto
elif mode == 'page':
fn = parse_page
elif mode == 'xml':
fn = parse_xml
im_paths = []
self.targets = []
for img in imgs:
try:
data = fn(img)
im_paths.append(data['image'])
lines = defaultdict(list)
for line in data['lines']:
if valid_baselines is None or line['script'] in valid_baselines:
lines[self.mbl_dict.get(line['script'], line['script'])].append(line['baseline'])
self.class_stats['baselines'][self.mbl_dict.get(line['script'], line['script'])] += 1
regions = defaultdict(list)
for k, v in data['regions'].items():
if valid_regions is None or k in valid_regions:
regions[self.mreg_dict.get(k, k)].extend(v)
self.class_stats['regions'][self.mreg_dict.get(k, k)] += len(v)
data['regions'] = regions
self.targets.append({'baselines': lines, 'regions': data['regions']})
except KrakenInputException as e:
logger.warning(e)
continue
# get line types
imgs = im_paths
# calculate class mapping
line_types = set()
region_types = set()
for page in self.targets:
for line_type in page['baselines'].keys():
line_types.add(line_type)
for reg_type in page['regions'].keys():
region_types.add(reg_type)
idx = -1
for idx, line_type in enumerate(line_types):
self.class_mapping['baselines'][line_type] = idx + self.num_classes
self.num_classes += idx + 1
idx = -1
for idx, reg_type in enumerate(region_types):
self.class_mapping['regions'][reg_type] = idx + self.num_classes
self.num_classes += idx + 1
elif mode == 'path':
pass
elif mode is None:
imgs = []
else:
raise Exception('invalid dataset mode')
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
HueSaturationValue,
)
self.aug = Compose([
ToFloat(),
RandomRotate90(),
Flip(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.3),
], p=0.5)
self.imgs = imgs
self.line_width = line_width
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.seg_type = None
def add(self,
image: Union[str, Image.Image],
baselines: List[List[List[Tuple[int, int]]]] = None,
regions: Dict[str, List[List[Tuple[int, int]]]] = None,
*args,
**kwargs):
"""
Adds a page to the dataset.
Args:
im (path): Path to the whole page image
baseline (dict): A list containing dicts with a list of coordinates
and script types [{'baseline': [[x0, y0], ...,
[xn, yn]], 'script': 'script_type'}, ...]
regions (dict): A dict containing list of lists of coordinates {'region_type_0': [[x0, y0], ..., [xn, yn]]], 'region_type_1': ...}.
"""
if self.mode:
raise Exception(f'The `add` method is incompatible with dataset mode {self.mode}')
baselines_ = defaultdict(list)
for line in baselines:
line_type = self.mbl_dict.get(line['script'], line['script'])
if self.valid_baselines is None or line['script'] in self.valid_baselines:
baselines_[line_type].append(line['baseline'])
self.class_stats['baselines'][line_type] += 1
if line_type not in self.class_mapping['baselines']:
self.num_classes += 1
self.class_mapping['baselines'][line_type] = self.num_classes - 1
regions_ = defaultdict(list)
for k, v in regions.items():
reg_type = self.mreg_dict.get(k, k)
if self.valid_regions is None or reg_type in self.valid_regions:
regions_[reg_type].extend(v)
self.class_stats['baselines'][reg_type] += len(v)
if reg_type not in self.class_mapping['regions']:
self.num_classes += 1
self.class_mapping['regions'][reg_type] = self.num_classes - 1
self.targets.append({'baselines': baselines_, 'regions': regions_})
self.imgs.append(image)
def __getitem__(self, idx):
im = self.imgs[idx]
if self.mode != 'path':
target = self.targets[idx]
else:
with open('{}.path'.format(path.splitext(im)[0]), 'r') as fp:
target = json.load(fp)
if not isinstance(im, Image.Image):
try:
logger.debug(f'Attempting to load {im}')
im = Image.open(im)
im, target = self.transform(im, target)
return {'image': im, 'target': target}
except Exception:
idx = np.random.randint(0, len(self.imgs))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.imgs))]
im, target = self.transform(im, target)
return {'image': im, 'target': target}
@staticmethod
def _get_ortho_line(lineseg, point, line_width, offset):
lineseg = np.array(lineseg)
norm_vec = lineseg[1,...] - lineseg[0,...]
norm_vec_len = np.sqrt(np.sum(norm_vec**2))
unit_vec = norm_vec / norm_vec_len
ortho_vec = unit_vec[::-1] * ((1,-1), (-1,1))
if offset == 'l':
point -= unit_vec * line_width
else:
point += unit_vec * line_width
return (ortho_vec * 10 + point).astype('int').tolist()
def transform(self, image, target):
orig_size = image.size
image = self.head_transforms(image)
if not is_bitonal(image):
self.im_mode = image.mode
image = self.tail_transforms(image)
scale = image.shape[2]/orig_size[0]
t = torch.zeros((self.num_classes,) + image.shape[1:])
start_sep_cls = self.class_mapping['aux']['_start_separator']
end_sep_cls = self.class_mapping['aux']['_end_separator']
for key, lines in target['baselines'].items():
try:
cls_idx = self.class_mapping['baselines'][key]
except KeyError:
# skip lines of classes not present in the training set
continue
for line in lines:
# buffer out line to desired width
line = [k for k, g in groupby(line)]
line = np.array(line)*scale
shp_line = geom.LineString(line)
split_offset = min(5, shp_line.length/2)
line_pol = np.array(shp_line.buffer(-self.line_width, cap_style=2, single_sided=True).boundary, dtype=np.int)
rr, cc = polygon(line_pol[:,1], line_pol[:,0], shape=image.shape[1:])
t[cls_idx, rr, cc] = 1
split_pt = shp_line.interpolate(split_offset).buffer(0.001)
# top
start_sep = np.array((split(shp_line, split_pt)[0].parallel_offset(0.5*self.line_width, side='right').buffer(1.5*self.line_width, cap_style=3).boundary), dtype=np.int)
rr_s, cc_s = polygon(start_sep[:,1], start_sep[:,0], shape=image.shape[1:])
t[start_sep_cls, rr_s, cc_s] = 1
t[start_sep_cls, rr, cc] = 0
split_pt = shp_line.interpolate(-split_offset).buffer(0.001)
# top
end_sep = np.array((split(shp_line, split_pt)[-1].parallel_offset(0.5*self.line_width, side='right').buffer(1.5*self.line_width, cap_style=3).boundary), dtype=np.int)
rr_s, cc_s = polygon(end_sep[:,1], end_sep[:,0], shape=image.shape[1:])
t[end_sep_cls, rr_s, cc_s] = 1
t[end_sep_cls, rr, cc] = 0
for key, regions in target['regions'].items():
try:
cls_idx = self.class_mapping['regions'][key]
except KeyError:
# skip regions of classes not present in the training set
continue
for region in regions:
region = np.array(region)*scale
rr, cc = polygon(region[:,1], region[:,0], shape=image.shape[1:])
t[cls_idx, rr, cc] = 1
target = t
if self.aug:
image = image.permute(1, 2, 0).numpy()
target = target.permute(1, 2, 0).numpy()
o = self.aug(image=image, mask=target)
image = torch.tensor(o['image']).permute(2, 0, 1)
target = torch.tensor(o['mask']).permute(2, 0, 1)
return image, target
def __len__(self):
return len(self.imgs)
| [
"logging.getLogger",
"kraken.lib.segmentation.extract_polygons",
"shapely.ops.split",
"albumentations.MedianBlur",
"torch.LongTensor",
"albumentations.Blur",
"torchvision.transforms.Lambda",
"albumentations.HueSaturationValue",
"numpy.array",
"torch.nn.functional.pad",
"torchvision.transforms.Pa... | [((1979, 2006), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1996, 2006), False, 'import logging\n'), ((5975, 6009), 'torchvision.transforms.Compose', 'transforms.Compose', (['out_transforms'], {}), '(out_transforms)\n', (5993, 6009), False, 'from torchvision import transforms\n'), ((8842, 8851), 'collections.Counter', 'Counter', ([], {}), '()\n', (8849, 8851), False, 'from collections import Counter, defaultdict\n'), ((9168, 9177), 'collections.Counter', 'Counter', ([], {}), '()\n', (9175, 9177), False, 'from collections import Counter, defaultdict\n'), ((9216, 9225), 'collections.Counter', 'Counter', ([], {}), '()\n', (9223, 9225), False, 'from collections import Counter, defaultdict\n'), ((9283, 9292), 'collections.Counter', 'Counter', ([], {}), '()\n', (9290, 9292), False, 'from collections import Counter, defaultdict\n'), ((12663, 12730), 'kraken.lib.segmentation.calculate_polygonal_environment', 'calculate_polygonal_environment', (['im', "[x['baseline'] for x in lines]"], {}), "(im, [x['baseline'] for x in lines])\n", (12694, 12730), False, 'from kraken.lib.segmentation import extract_polygons, calculate_polygonal_environment\n'), ((13107, 13155), 'torch.LongTensor', 'torch.LongTensor', (['[seq.shape[2] for seq in seqs]'], {}), '([seq.shape[2] for seq in seqs])\n', (13123, 13155), False, 'import torch\n'), ((5258, 5286), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['F_t.dummy'], {}), '(F_t.dummy)\n', (5275, 5286), False, 'from torchvision import transforms\n'), ((5779, 5800), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5798, 5800), False, 'from torchvision import transforms\n'), ((5841, 5877), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['F_t.tensor_invert'], {}), '(F_t.tensor_invert)\n', (5858, 5877), False, 'from torchvision import transforms\n'), ((8861, 8916), 'pkg_resources.resource_stream', 'pkg_resources.resource_stream', (['__name__', '"""scripts.json"""'], {}), "(__name__, 'scripts.json')\n", (8890, 8916), False, 'import pkg_resources\n'), ((8945, 8958), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (8954, 8958), False, 'import json\n'), ((14544, 14566), 'torchvision.transforms.Compose', 'transforms.Compose', (['[]'], {}), '([])\n', (14562, 14566), False, 'from torchvision import transforms\n'), ((14802, 14811), 'collections.Counter', 'Counter', ([], {}), '()\n', (14809, 14811), False, 'from collections import Counter, defaultdict\n'), ((15088, 15136), 'torchvision.transforms.Compose', 'transforms.Compose', (['im_transforms.transforms[:2]'], {}), '(im_transforms.transforms[:2])\n', (15106, 15136), False, 'from torchvision import transforms\n'), ((15168, 15216), 'torchvision.transforms.Compose', 'transforms.Compose', (['im_transforms.transforms[2:]'], {}), '(im_transforms.transforms[2:])\n', (15186, 15216), False, 'from torchvision import transforms\n'), ((22211, 22233), 'torchvision.transforms.Compose', 'transforms.Compose', (['[]'], {}), '([])\n', (22229, 22233), False, 'from torchvision import transforms\n'), ((23746, 23799), 'functools.partial', 'partial', (['F_t.suffix_split'], {'split': 'split', 'suffix': 'suffix'}), '(F_t.suffix_split, split=split, suffix=suffix)\n', (23753, 23799), False, 'from functools import partial\n'), ((23941, 23950), 'collections.Counter', 'Counter', ([], {}), '()\n', (23948, 23950), False, 'from collections import Counter, defaultdict\n'), ((24227, 24275), 'torchvision.transforms.Compose', 'transforms.Compose', (['im_transforms.transforms[:2]'], {}), '(im_transforms.transforms[:2])\n', (24245, 24275), False, 'from torchvision import transforms\n'), ((24307, 24355), 'torchvision.transforms.Compose', 'transforms.Compose', (['im_transforms.transforms[2:]'], {}), '(im_transforms.transforms[2:])\n', (24325, 24355), False, 'from torchvision import transforms\n'), ((30757, 30779), 'torchvision.transforms.Compose', 'transforms.Compose', (['[]'], {}), '([])\n', (30775, 30779), False, 'from torchvision import transforms\n'), ((37130, 37178), 'torchvision.transforms.Compose', 'transforms.Compose', (['im_transforms.transforms[:2]'], {}), '(im_transforms.transforms[:2])\n', (37148, 37178), False, 'from torchvision import transforms\n'), ((37210, 37258), 'torchvision.transforms.Compose', 'transforms.Compose', (['im_transforms.transforms[2:]'], {}), '(im_transforms.transforms[2:])\n', (37228, 37258), False, 'from torchvision import transforms\n'), ((38162, 38179), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (38173, 38179), False, 'from collections import Counter, defaultdict\n'), ((38715, 38732), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (38726, 38732), False, 'from collections import Counter, defaultdict\n'), ((40299, 40316), 'numpy.array', 'np.array', (['lineseg'], {}), '(lineseg)\n', (40307, 40316), True, 'import numpy as np\n'), ((40994, 41044), 'torch.zeros', 'torch.zeros', (['((self.num_classes,) + image.shape[1:])'], {}), '((self.num_classes,) + image.shape[1:])\n', (41005, 41044), False, 'import torch\n'), ((4963, 4998), 'functools.partial', 'partial', (['F_t.pil_to_mode'], {'mode': 'mode'}), '(F_t.pil_to_mode, mode=mode)\n', (4970, 4998), False, 'from functools import partial\n'), ((5059, 5092), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['F_t.pil_to_bin'], {}), '(F_t.pil_to_bin)\n', (5076, 5092), False, 'from torchvision import transforms\n'), ((5356, 5382), 'kraken.lib.lineest.CenterNormalizer', 'CenterNormalizer', (['scale[0]'], {}), '(scale[0])\n', (5372, 5382), False, 'from kraken.lib.lineest import CenterNormalizer, dewarp\n'), ((5717, 5751), 'torchvision.transforms.Pad', 'transforms.Pad', (['(pad, 0)'], {'fill': '(255)'}), '((pad, 0), fill=255)\n', (5731, 5751), False, 'from torchvision import transforms\n'), ((5923, 5961), 'functools.partial', 'partial', (['F_t.tensor_permute'], {'perm': 'perm'}), '(F_t.tensor_permute, perm=perm)\n', (5930, 5961), False, 'from functools import partial\n'), ((12620, 12634), 'PIL.Image.open', 'Image.open', (['im'], {}), '(im)\n', (12630, 12634), False, 'from PIL import Image, ImageDraw\n'), ((13211, 13254), 'torch.nn.functional.pad', 'F.pad', (['seq'], {'pad': '(0, max_len - seq.shape[2])'}), '(seq, pad=(0, max_len - seq.shape[2]))\n', (13216, 13254), True, 'import torch.nn.functional as F\n'), ((18181, 18245), 'kraken.lib.exceptions.KrakenInputException', 'KrakenInputException', (['"""Text line is empty after transformations"""'], {}), "('Text line is empty after transformations')\n", (18201, 18245), False, 'from kraken.lib.exceptions import KrakenInputException\n'), ((18289, 18339), 'kraken.lib.exceptions.KrakenInputException', 'KrakenInputException', (['"""No baseline given for line"""'], {}), "('No baseline given for line')\n", (18309, 18339), False, 'from kraken.lib.exceptions import KrakenInputException\n'), ((18383, 18433), 'kraken.lib.exceptions.KrakenInputException', 'KrakenInputException', (['"""No boundary given for line"""'], {}), "('No boundary given for line')\n", (18403, 18433), False, 'from kraken.lib.exceptions import KrakenInputException\n'), ((32926, 32942), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (32937, 32942), False, 'from collections import Counter, defaultdict\n'), ((32955, 32971), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (32966, 32971), False, 'from collections import Counter, defaultdict\n'), ((40399, 40420), 'numpy.sum', 'np.sum', (['(norm_vec ** 2)'], {}), '(norm_vec ** 2)\n', (40405, 40420), True, 'import numpy as np\n'), ((40837, 40854), 'kraken.lib.util.is_bitonal', 'is_bitonal', (['image'], {}), '(image)\n', (40847, 40854), False, 'from kraken.lib.util import is_bitonal\n'), ((11708, 11722), 'PIL.Image.open', 'Image.open', (['fp'], {}), '(fp)\n', (11718, 11722), False, 'from PIL import Image, ImageDraw\n'), ((13402, 13448), 'torch.cat', 'torch.cat', (["[x['target'] for x in sorted_batch]"], {}), "([x['target'] for x in sorted_batch])\n", (13411, 13448), False, 'import torch\n'), ((15452, 15508), 'functools.partial', 'partial', (['F_t.text_normalize'], {'normalization': 'normalization'}), '(F_t.text_normalize, normalization=normalization)\n', (15459, 15508), False, 'from functools import partial\n'), ((18531, 18548), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (18541, 18548), False, 'from PIL import Image, ImageDraw\n'), ((24546, 24602), 'functools.partial', 'partial', (['F_t.text_normalize'], {'normalization': 'normalization'}), '(F_t.text_normalize, normalization=normalization)\n', (24553, 24602), False, 'from functools import partial\n'), ((26793, 26848), 'kraken.lib.exceptions.KrakenInputException', 'KrakenInputException', (['f"""Text line is empty ({fp.name})"""'], {}), "(f'Text line is empty ({fp.name})')\n", (26813, 26848), False, 'from kraken.lib.exceptions import KrakenInputException\n'), ((26912, 26929), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (26922, 26929), False, 'from PIL import Image, ImageDraw\n'), ((39551, 39564), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (39560, 39564), False, 'import json\n'), ((39704, 39718), 'PIL.Image.open', 'Image.open', (['im'], {}), '(im)\n', (39714, 39718), False, 'from PIL import Image, ImageDraw\n'), ((41649, 41670), 'shapely.geometry.LineString', 'geom.LineString', (['line'], {}), '(line)\n', (41664, 41670), True, 'import shapely.geometry as geom\n'), ((41879, 41941), 'skimage.draw.polygon', 'polygon', (['line_pol[:, 1]', 'line_pol[:, 0]'], {'shape': 'image.shape[1:]'}), '(line_pol[:, 1], line_pol[:, 0], shape=image.shape[1:])\n', (41886, 41941), False, 'from skimage.draw import polygon\n'), ((42290, 42354), 'skimage.draw.polygon', 'polygon', (['start_sep[:, 1]', 'start_sep[:, 0]'], {'shape': 'image.shape[1:]'}), '(start_sep[:, 1], start_sep[:, 0], shape=image.shape[1:])\n', (42297, 42354), False, 'from skimage.draw import polygon\n'), ((42758, 42818), 'skimage.draw.polygon', 'polygon', (['end_sep[:, 1]', 'end_sep[:, 0]'], {'shape': 'image.shape[1:]'}), '(end_sep[:, 1], end_sep[:, 0], shape=image.shape[1:])\n', (42765, 42818), False, 'from skimage.draw import polygon\n'), ((43276, 43334), 'skimage.draw.polygon', 'polygon', (['region[:, 1]', 'region[:, 0]'], {'shape': 'image.shape[1:]'}), '(region[:, 1], region[:, 0], shape=image.shape[1:])\n', (43283, 43334), False, 'from skimage.draw import polygon\n'), ((5435, 5471), 'functools.partial', 'partial', (['F_t.pil_dewarp'], {'lnorm': 'lnorm'}), '(F_t.pil_dewarp, lnorm=lnorm)\n', (5442, 5471), False, 'from functools import partial\n'), ((5526, 5561), 'functools.partial', 'partial', (['F_t.pil_to_mode'], {'mode': 'mode'}), '(F_t.pil_to_mode, mode=mode)\n', (5533, 5561), False, 'from functools import partial\n'), ((5630, 5672), 'functools.partial', 'partial', (['F_t.pil_fixed_resize'], {'scale': 'scale'}), '(F_t.pil_fixed_resize, scale=scale)\n', (5637, 5672), False, 'from functools import partial\n'), ((16031, 16040), 'albumentations.ToFloat', 'ToFloat', ([], {}), '()\n', (16038, 16040), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((16344, 16420), 'albumentations.ShiftScaleRotate', 'ShiftScaleRotate', ([], {'shift_limit': '(0.0625)', 'scale_limit': '(0.2)', 'rotate_limit': '(3)', 'p': '(0.2)'}), '(shift_limit=0.0625, scale_limit=0.2, rotate_limit=3, p=0.2)\n', (16360, 16420), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((18595, 18699), 'kraken.lib.segmentation.extract_polygons', 'extract_polygons', (['im', "{'type': 'baselines', 'lines': [{'baseline': baseline, 'boundary': boundary}]}"], {}), "(im, {'type': 'baselines', 'lines': [{'baseline': baseline,\n 'boundary': boundary}]})\n", (18611, 18699), False, 'from kraken.lib.segmentation import extract_polygons, calculate_polygonal_environment\n'), ((18750, 18810), 'kraken.lib.exceptions.KrakenInputException', 'KrakenInputException', (['"""Patch extraction failed for baseline"""'], {}), "('Patch extraction failed for baseline')\n", (18770, 18810), False, 'from kraken.lib.exceptions import KrakenInputException\n'), ((18973, 19032), 'kraken.lib.exceptions.KrakenInputException', 'KrakenInputException', (['f"""Image transforms failed on {image}"""'], {}), "(f'Image transforms failed on {image}')\n", (18993, 19032), False, 'from kraken.lib.exceptions import KrakenInputException\n'), ((20808, 20822), 'PIL.Image.open', 'Image.open', (['im'], {}), '(im)\n', (20818, 20822), False, 'from PIL import Image, ImageDraw\n'), ((20852, 20961), 'kraken.lib.segmentation.extract_polygons', 'extract_polygons', (['im', "{'type': 'baselines', 'lines': [{'baseline': item[0][1], 'boundary': item[0\n ][2]}]}"], {}), "(im, {'type': 'baselines', 'lines': [{'baseline': item[0][1\n ], 'boundary': item[0][2]}]})\n", (20868, 20961), False, 'from kraken.lib.segmentation import extract_polygons, calculate_polygonal_environment\n'), ((21027, 21041), 'kraken.lib.util.is_bitonal', 'is_bitonal', (['im'], {}), '(im)\n', (21037, 21041), False, 'from kraken.lib.util import is_bitonal\n'), ((25125, 25134), 'albumentations.ToFloat', 'ToFloat', ([], {}), '()\n', (25132, 25134), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((25438, 25515), 'albumentations.ShiftScaleRotate', 'ShiftScaleRotate', ([], {'shift_limit': '(0.0625)', 'scale_limit': '(0.2)', 'rotate_limit': '(45)', 'p': '(0.2)'}), '(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2)\n', (25454, 25515), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((27075, 27134), 'kraken.lib.exceptions.KrakenInputException', 'KrakenInputException', (['f"""Image transforms failed on {image}"""'], {}), "(f'Image transforms failed on {image}')\n", (27095, 27134), False, 'from kraken.lib.exceptions import KrakenInputException\n'), ((27718, 27732), 'kraken.lib.util.is_bitonal', 'is_bitonal', (['im'], {}), '(im)\n', (27728, 27732), False, 'from kraken.lib.util import is_bitonal\n'), ((27876, 27935), 'kraken.lib.exceptions.KrakenInputException', 'KrakenInputException', (['f"""Image transforms failed on {image}"""'], {}), "(f'Image transforms failed on {image}')\n", (27896, 27935), False, 'from kraken.lib.exceptions import KrakenInputException\n'), ((29653, 29667), 'PIL.Image.open', 'Image.open', (['im'], {}), '(im)\n', (29663, 29667), False, 'from PIL import Image, ImageDraw\n'), ((29737, 29751), 'kraken.lib.util.is_bitonal', 'is_bitonal', (['im'], {}), '(im)\n', (29747, 29751), False, 'from kraken.lib.util import is_bitonal\n'), ((33702, 33719), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (33713, 33719), False, 'from collections import Counter, defaultdict\n'), ((34110, 34127), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (34121, 34127), False, 'from collections import Counter, defaultdict\n'), ((36029, 36038), 'albumentations.ToFloat', 'ToFloat', ([], {}), '()\n', (36036, 36038), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36072, 36088), 'albumentations.RandomRotate90', 'RandomRotate90', ([], {}), '()\n', (36086, 36088), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36122, 36128), 'albumentations.Flip', 'Flip', ([], {}), '()\n', (36126, 36128), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36432, 36509), 'albumentations.ShiftScaleRotate', 'ShiftScaleRotate', ([], {'shift_limit': '(0.0625)', 'scale_limit': '(0.2)', 'rotate_limit': '(45)', 'p': '(0.2)'}), '(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2)\n', (36448, 36509), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36749, 36841), 'albumentations.HueSaturationValue', 'HueSaturationValue', ([], {'hue_shift_limit': '(20)', 'sat_shift_limit': '(0.1)', 'val_shift_limit': '(0.1)', 'p': '(0.3)'}), '(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit\n =0.1, p=0.3)\n', (36767, 36841), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((41601, 41615), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (41609, 41615), True, 'import numpy as np\n'), ((43228, 43244), 'numpy.array', 'np.array', (['region'], {}), '(region)\n', (43236, 43244), True, 'import numpy as np\n'), ((43587, 43611), 'torch.tensor', 'torch.tensor', (["o['image']"], {}), "(o['image'])\n", (43599, 43611), False, 'import torch\n'), ((43650, 43673), 'torch.tensor', 'torch.tensor', (["o['mask']"], {}), "(o['mask'])\n", (43662, 43673), False, 'import torch\n'), ((21510, 21532), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (21530, 21532), False, 'import traceback\n'), ((30220, 30242), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (30240, 30242), False, 'import traceback\n'), ((39948, 39970), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (39968, 39970), False, 'import traceback\n'), ((41563, 41576), 'itertools.groupby', 'groupby', (['line'], {}), '(line)\n', (41570, 41576), False, 'from itertools import groupby\n'), ((16118, 16135), 'albumentations.MotionBlur', 'MotionBlur', ([], {'p': '(0.2)'}), '(p=0.2)\n', (16128, 16135), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((16173, 16204), 'albumentations.MedianBlur', 'MedianBlur', ([], {'blur_limit': '(3)', 'p': '(0.1)'}), '(blur_limit=3, p=0.1)\n', (16183, 16204), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((16242, 16267), 'albumentations.Blur', 'Blur', ([], {'blur_limit': '(3)', 'p': '(0.1)'}), '(blur_limit=3, p=0.1)\n', (16246, 16267), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((16498, 16522), 'albumentations.OpticalDistortion', 'OpticalDistortion', ([], {'p': '(0.3)'}), '(p=0.3)\n', (16515, 16522), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((16560, 16583), 'albumentations.ElasticTransform', 'ElasticTransform', ([], {'p': '(0.1)'}), '(p=0.1)\n', (16576, 16583), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((25212, 25229), 'albumentations.MotionBlur', 'MotionBlur', ([], {'p': '(0.2)'}), '(p=0.2)\n', (25222, 25229), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((25267, 25298), 'albumentations.MedianBlur', 'MedianBlur', ([], {'blur_limit': '(3)', 'p': '(0.1)'}), '(blur_limit=3, p=0.1)\n', (25277, 25298), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((25336, 25361), 'albumentations.Blur', 'Blur', ([], {'blur_limit': '(3)', 'p': '(0.1)'}), '(blur_limit=3, p=0.1)\n', (25340, 25361), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((25593, 25617), 'albumentations.OpticalDistortion', 'OpticalDistortion', ([], {'p': '(0.3)'}), '(p=0.3)\n', (25610, 25617), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((25655, 25678), 'albumentations.ElasticTransform', 'ElasticTransform', ([], {'p': '(0.1)'}), '(p=0.1)\n', (25671, 25678), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36206, 36223), 'albumentations.MotionBlur', 'MotionBlur', ([], {'p': '(0.2)'}), '(p=0.2)\n', (36216, 36223), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36261, 36292), 'albumentations.MedianBlur', 'MedianBlur', ([], {'blur_limit': '(3)', 'p': '(0.1)'}), '(blur_limit=3, p=0.1)\n', (36271, 36292), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36330, 36355), 'albumentations.Blur', 'Blur', ([], {'blur_limit': '(3)', 'p': '(0.1)'}), '(blur_limit=3, p=0.1)\n', (36334, 36355), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36587, 36611), 'albumentations.OpticalDistortion', 'OpticalDistortion', ([], {'p': '(0.3)'}), '(p=0.3)\n', (36604, 36611), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((36649, 36672), 'albumentations.ElasticTransform', 'ElasticTransform', ([], {'p': '(0.1)'}), '(p=0.1)\n', (36665, 36672), False, 'from albumentations import Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur, ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast, HueSaturationValue\n'), ((39491, 39508), 'os.path.splitext', 'path.splitext', (['im'], {}), '(im)\n', (39504, 39508), False, 'from os import path\n'), ((42115, 42140), 'shapely.ops.split', 'split', (['shp_line', 'split_pt'], {}), '(shp_line, split_pt)\n', (42120, 42140), False, 'from shapely.ops import split, snap\n'), ((42582, 42607), 'shapely.ops.split', 'split', (['shp_line', 'split_pt'], {}), '(shp_line, split_pt)\n', (42587, 42607), False, 'from shapely.ops import split, snap\n')] |
import numpy as np
import scmodes
def test_simulate_pois_rank1():
x, eta = scmodes.dataset.simulate_pois(n=30, p=60, rank=1)
assert x.shape == (30, 60)
assert eta.shape == (30, 60)
assert (x >= 0).all()
assert (~np.isclose(np.linalg.svd(eta, compute_uv=False, full_matrices=False), 0)).sum() == 1
def test_simulate_pois_rank2():
x, eta = scmodes.dataset.simulate_pois(n=30, p=60, rank=2)
assert x.shape == (30, 60)
assert eta.shape == (30, 60)
assert (x >= 0).all()
assert (~np.isclose(np.linalg.svd(eta, compute_uv=False, full_matrices=False), 0)).sum() == 2
def test_simulate_pois_masked():
x, eta = scmodes.dataset.simulate_pois(n=30, p=60, rank=2, holdout=.25)
assert np.ma.is_masked(x)
def test_simulate_pois_size():
x, mu = scmodes.dataset.simulate_pois_size(n=30, p=60, s=1000, rank=1, seed=0)
assert x.shape == (30, 60)
assert mu.shape == (30, 60)
assert (x >= 0).all()
assert np.isclose(mu.sum(axis=0), 1).all()
| [
"numpy.linalg.svd",
"numpy.ma.is_masked",
"scmodes.dataset.simulate_pois",
"scmodes.dataset.simulate_pois_size"
] | [((78, 127), 'scmodes.dataset.simulate_pois', 'scmodes.dataset.simulate_pois', ([], {'n': '(30)', 'p': '(60)', 'rank': '(1)'}), '(n=30, p=60, rank=1)\n', (107, 127), False, 'import scmodes\n'), ((352, 401), 'scmodes.dataset.simulate_pois', 'scmodes.dataset.simulate_pois', ([], {'n': '(30)', 'p': '(60)', 'rank': '(2)'}), '(n=30, p=60, rank=2)\n', (381, 401), False, 'import scmodes\n'), ((627, 690), 'scmodes.dataset.simulate_pois', 'scmodes.dataset.simulate_pois', ([], {'n': '(30)', 'p': '(60)', 'rank': '(2)', 'holdout': '(0.25)'}), '(n=30, p=60, rank=2, holdout=0.25)\n', (656, 690), False, 'import scmodes\n'), ((699, 717), 'numpy.ma.is_masked', 'np.ma.is_masked', (['x'], {}), '(x)\n', (714, 717), True, 'import numpy as np\n'), ((760, 830), 'scmodes.dataset.simulate_pois_size', 'scmodes.dataset.simulate_pois_size', ([], {'n': '(30)', 'p': '(60)', 's': '(1000)', 'rank': '(1)', 'seed': '(0)'}), '(n=30, p=60, s=1000, rank=1, seed=0)\n', (794, 830), False, 'import scmodes\n'), ((234, 291), 'numpy.linalg.svd', 'np.linalg.svd', (['eta'], {'compute_uv': '(False)', 'full_matrices': '(False)'}), '(eta, compute_uv=False, full_matrices=False)\n', (247, 291), True, 'import numpy as np\n'), ((508, 565), 'numpy.linalg.svd', 'np.linalg.svd', (['eta'], {'compute_uv': '(False)', 'full_matrices': '(False)'}), '(eta, compute_uv=False, full_matrices=False)\n', (521, 565), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learned_optimizers.population.population."""
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
from learned_optimization.population import population as pop_mod
from learned_optimization.population.mutators import winner_take_all_genetic
import numpy as onp
class PopulationTest(parameterized.TestCase):
@parameterized.parameters((1,), (3,))
def test_no_interrupt_population(self, steps):
onp.random.seed(0)
def mutate_fn(p):
p += onp.random.normal() * 0.01
return p
mutate = winner_take_all_genetic.WinnerTakeAllGenetic(mutate_fn, steps)
num_worker = 3
population = pop_mod.PopulationController([1. for _ in range(num_worker)],
mutate)
for outer_step in range(600):
for i in range(num_worker):
new_data = population.maybe_get_worker_data(i, None, outer_step, None,
None)
step = new_data.step
self.assertEqual(step, outer_step)
meta_params = new_data.meta_params
gen_id = new_data.generation_id
population.set_eval(i, gen_id, step + 1, None, meta_params**2)
self.assertLess(onp.abs(meta_params), 0.1)
def test_interruption(self):
onp.random.seed(0)
with tempfile.TemporaryDirectory() as logdir:
def mutate_fn(p):
p += onp.random.normal() * 0.03
return p
mutate = winner_take_all_genetic.WinnerTakeAllGenetic(mutate_fn, 1)
num_worker = 3
for _ in range(10):
population = pop_mod.PopulationController(
[1. for _ in range(num_worker)], mutate, log_dir=logdir)
for _ in range(5):
for i in range(num_worker):
new_data = population.maybe_get_worker_data(i, None, 0, None, None)
gen_id = new_data.generation_id
population.set_eval(i, gen_id, new_data.step + 1, None,
new_data.meta_params**2)
gen_id = "bad_gen_id"
population.set_eval(i, gen_id, new_data.step + 1, None,
new_data.meta_params**2)
self.assertLess(onp.abs(new_data.meta_params), 0.5)
def test_do_have_to_reload_params(self):
onp.random.seed(0)
def mutate_fn(p):
p += onp.random.normal() * 0.01
return p
mutate = winner_take_all_genetic.WinnerTakeAllGenetic(mutate_fn, 2)
num_worker = 3
population = pop_mod.PopulationController([1. for _ in range(num_worker)],
mutate)
new_data = population.maybe_get_worker_data(0, None, 0, None, None)
step = new_data.step
meta_params = new_data.meta_params
gen_id = new_data.generation_id
params = new_data.params
new_data = population.maybe_get_worker_data(0, gen_id, step, params,
meta_params)
self.assertIsNone(new_data)
new_data = population.maybe_get_worker_data(0, gen_id, step, params,
meta_params)
self.assertIsNone(new_data)
new_data = population.maybe_get_worker_data(0, "some other id", step,
params, meta_params)
self.assertIsNotNone(new_data)
new_data = population.maybe_get_worker_data(0, gen_id, step, params,
meta_params)
self.assertIsNone(new_data)
# if worker and gen id get mixed up, return an updated data too.
new_data = population.maybe_get_worker_data(1, gen_id, step, params,
meta_params)
self.assertIsNotNone(new_data)
if __name__ == "__main__":
absltest.main()
| [
"numpy.random.normal",
"numpy.abs",
"tempfile.TemporaryDirectory",
"learned_optimization.population.mutators.winner_take_all_genetic.WinnerTakeAllGenetic",
"absl.testing.parameterized.parameters",
"absl.testing.absltest.main",
"numpy.random.seed"
] | [((955, 991), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(1,)', '(3,)'], {}), '((1,), (3,))\n', (979, 991), False, 'from absl.testing import parameterized\n'), ((4329, 4344), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4342, 4344), False, 'from absl.testing import absltest\n'), ((1045, 1063), 'numpy.random.seed', 'onp.random.seed', (['(0)'], {}), '(0)\n', (1060, 1063), True, 'import numpy as onp\n'), ((1154, 1216), 'learned_optimization.population.mutators.winner_take_all_genetic.WinnerTakeAllGenetic', 'winner_take_all_genetic.WinnerTakeAllGenetic', (['mutate_fn', 'steps'], {}), '(mutate_fn, steps)\n', (1198, 1216), False, 'from learned_optimization.population.mutators import winner_take_all_genetic\n'), ((1885, 1903), 'numpy.random.seed', 'onp.random.seed', (['(0)'], {}), '(0)\n', (1900, 1903), True, 'import numpy as onp\n'), ((2856, 2874), 'numpy.random.seed', 'onp.random.seed', (['(0)'], {}), '(0)\n', (2871, 2874), True, 'import numpy as onp\n'), ((2965, 3023), 'learned_optimization.population.mutators.winner_take_all_genetic.WinnerTakeAllGenetic', 'winner_take_all_genetic.WinnerTakeAllGenetic', (['mutate_fn', '(2)'], {}), '(mutate_fn, 2)\n', (3009, 3023), False, 'from learned_optimization.population.mutators import winner_take_all_genetic\n'), ((1822, 1842), 'numpy.abs', 'onp.abs', (['meta_params'], {}), '(meta_params)\n', (1829, 1842), True, 'import numpy as onp\n'), ((1913, 1942), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1940, 1942), False, 'import tempfile\n'), ((2052, 2110), 'learned_optimization.population.mutators.winner_take_all_genetic.WinnerTakeAllGenetic', 'winner_take_all_genetic.WinnerTakeAllGenetic', (['mutate_fn', '(1)'], {}), '(mutate_fn, 1)\n', (2096, 2110), False, 'from learned_optimization.population.mutators import winner_take_all_genetic\n'), ((1098, 1117), 'numpy.random.normal', 'onp.random.normal', ([], {}), '()\n', (1115, 1117), True, 'import numpy as onp\n'), ((2772, 2801), 'numpy.abs', 'onp.abs', (['new_data.meta_params'], {}), '(new_data.meta_params)\n', (2779, 2801), True, 'import numpy as onp\n'), ((2909, 2928), 'numpy.random.normal', 'onp.random.normal', ([], {}), '()\n', (2926, 2928), True, 'import numpy as onp\n'), ((1992, 2011), 'numpy.random.normal', 'onp.random.normal', ([], {}), '()\n', (2009, 2011), True, 'import numpy as onp\n')] |
import sys
import tempfile
import numpy as np
from dffml.record import Record
from dffml.high_level.ml import score
from dffml.source.source import Sources
from dffml.source.memory import MemorySource, MemorySourceConfig
from dffml.feature import Feature, Features
from dffml.util.asynctestcase import AsyncTestCase
import dffml_model_scikit.scikit_models
from sklearn.datasets import make_blobs
from model.scikit.dffml_model_scikit import (
SklearnModelAccuracy,
ScorerWillNotWork,
)
class TestScikitModel:
@classmethod
def setUpClass(cls):
cls.is_multi = "MULTI_" in cls.MODEL_TYPE
cls.model_dir = tempfile.TemporaryDirectory()
cls.features = Features()
if cls.MODEL_TYPE in classifier_types:
A, B, C, D, E, F, G, H, X, Y = list(
zip(*FEATURE_DATA_CLASSIFICATION)
)
cls.features.append(Feature("A", float, 1))
cls.features.append(Feature("B", float, 1))
cls.features.append(Feature("C", float, 1))
cls.features.append(Feature("D", float, 1))
cls.features.append(Feature("E", float, 1))
cls.features.append(Feature("F", float, 1))
cls.features.append(Feature("G", float, 1))
cls.features.append(Feature("H", float, 1))
if cls.MODEL_TYPE == "CLASSIFICATION":
cls.features.append(Feature("X", float, 1))
cls.records = [
Record(
str(i),
data={
"features": {
"A": A[i],
"B": B[i],
"C": C[i],
"D": D[i],
"E": E[i],
"F": F[i],
"G": G[i],
"H": H[i],
"X": X[i],
"Y": Y[i],
}
},
)
for i in range(0, len(A))
]
elif cls.MODEL_TYPE in regressor_types:
cls.features.append(Feature("A", float, 1))
cls.features.append(Feature("B", float, 1))
cls.features.append(Feature("C", float, 1))
cls.features.append(Feature("D", float, 1))
if cls.MODEL_TYPE == "REGRESSION":
cls.features.append(Feature("X", float, 1))
A, B, C, D, X, Y = list(zip(*FEATURE_DATA_REGRESSION))
cls.records = [
Record(
str(i),
data={
"features": {
"A": A[i],
"B": B[i],
"C": C[i],
"D": D[i],
"X": X[i],
"Y": Y[i],
}
},
)
for i in range(0, len(A))
]
elif cls.MODEL_TYPE == "CLUSTERING":
cls.features.append(Feature("A", float, 1))
cls.features.append(Feature("B", float, 1))
cls.features.append(Feature("C", float, 1))
cls.features.append(Feature("D", float, 1))
A, B, C, D, X = list(zip(*FEATURE_DATA_CLUSTERING))
cls.records = [
Record(
str(i),
data={
"features": {
"A": A[i],
"B": B[i],
"C": C[i],
"D": D[i],
"X": X[i],
}
},
)
for i in range(0, len(A))
]
cls.sources = Sources(
MemorySource(MemorySourceConfig(records=cls.records))
)
properties = {
"location": cls.model_dir.name,
"features": cls.features,
}
config_fields = dict()
estimator_type = cls.MODEL.SCIKIT_MODEL._estimator_type
if estimator_type in supervised_estimators:
if cls.is_multi:
config_fields["predict"] = Features(
Feature("X", float, 1), Feature("Y", float, 1)
)
else:
config_fields["predict"] = Feature("X", float, 1)
elif estimator_type in unsupervised_estimators:
# TODO If cls.TRUE_CLSTR_PRESENT then we want to use the
# mutual_info_score scikit accuracy scorer. In this case we might
# want to change tcluster to a boolean config property.
# For more info see commit e4f523976bf37d3457cda140ceab7899420ae2c7
config_fields["predict"] = Feature("X", float, 1)
cls.model = cls.MODEL(
cls.MODEL_CONFIG(**{**properties, **config_fields})
)
cls.scorer = cls.SCORER()
@classmethod
def tearDownClass(cls):
cls.model_dir.cleanup()
async def test_00_train(self):
async with self.sources as sources, self.model as model:
async with sources() as sctx, model() as mctx:
await mctx.train(sctx)
async def test_01_accuracy(self):
if self.MODEL_TYPE == "CLUSTERING":
with self.assertRaises(ScorerWillNotWork):
await score(
self.model,
self.scorer,
self.model.config.predict,
self.sources,
)
elif self.MODEL_TYPE in regressor_types:
res = await score(
self.model,
self.scorer,
self.model.config.predict,
self.sources,
)
self.assertTrue(0 <= res <= float("inf"))
else:
res = await score(
self.model,
self.scorer,
self.model.config.predict,
self.sources,
)
self.assertTrue(0 <= res <= 1)
async def test_02_predict(self):
async with self.sources as sources, self.model as model:
async with sources() as sctx, model() as mctx:
async for record in mctx.predict(sctx):
target = (
model.config.predict.names()
if self.is_multi
else model.config.predict.name
)
if self.is_multi:
prediction = [
v["value"]
for v in record.predictions(target).values()
]
else:
prediction = record.prediction(target).value
if self.MODEL_TYPE == "CLASSIFICATION":
self.assertIn(prediction, [2, 4])
elif self.MODEL_TYPE == "REGRESSION":
correct = FEATURE_DATA_REGRESSION[int(record.key)][3]
self.assertGreater(
prediction, correct - (correct * 0.40)
)
self.assertLess(prediction, correct + (correct * 0.40))
elif self.MODEL_TYPE == "CLUSTERING":
self.assertIn(prediction, [-1, 0, 1, 2, 3, 4, 5, 6, 7])
FEATURE_DATA_CLASSIFICATION = [
[5, 1, 1, 1, 2, 1, 3, 1, 1, 2],
[5, 4, 4, 5, 7, 10, 3, 2, 1, 2],
[3, 1, 1, 1, 2, 2, 3, 1, 1, 2],
[6, 8, 8, 1, 3, 4, 3, 7, 1, 2],
[4, 1, 1, 3, 2, 1, 3, 1, 1, 2],
[8, 10, 10, 8, 7, 10, 9, 7, 1, 4],
[1, 1, 1, 1, 2, 10, 3, 1, 1, 2],
[2, 1, 2, 1, 2, 1, 3, 1, 1, 2],
[2, 1, 1, 1, 2, 1, 1, 1, 2, 2],
[4, 2, 1, 1, 2, 1, 2, 1, 1, 2],
[1, 1, 1, 1, 1, 1, 3, 1, 1, 2],
[2, 1, 1, 1, 2, 1, 2, 1, 1, 2],
[5, 3, 3, 3, 2, 3, 4, 4, 1, 4],
[1, 1, 1, 1, 2, 3, 3, 1, 1, 2],
[8, 7, 5, 10, 7, 9, 5, 5, 2, 4],
[7, 4, 6, 4, 6, 1, 4, 3, 1, 4],
[4, 1, 1, 1, 2, 1, 2, 1, 1, 2],
[4, 1, 1, 1, 2, 1, 3, 1, 1, 2],
[10, 7, 7, 6, 4, 10, 4, 1, 2, 4],
[6, 1, 1, 1, 2, 1, 3, 1, 1, 2],
[7, 3, 2, 10, 5, 10, 5, 4, 2, 4],
[10, 5, 5, 3, 6, 7, 7, 10, 1, 4],
[2, 3, 1, 1, 2, 1, 2, 1, 1, 2],
[2, 1, 1, 1, 1, 1, 2, 1, 1, 2],
[4, 1, 3, 1, 2, 1, 2, 1, 1, 2],
[3, 1, 1, 1, 2, 1, 2, 1, 1, 2],
[4, 1, 1, 1, 2, 1, 2, 1, 1, 2],
[5, 1, 1, 1, 2, 1, 2, 1, 1, 2],
[3, 1, 1, 1, 2, 1, 2, 1, 1, 2],
[6, 3, 3, 3, 3, 2, 6, 1, 1, 2],
[7, 1, 2, 3, 2, 1, 2, 1, 1, 2],
[1, 1, 1, 1, 2, 1, 1, 1, 1, 2],
[5, 1, 1, 2, 1, 1, 2, 1, 1, 2],
[3, 1, 3, 1, 3, 4, 1, 1, 1, 2],
[4, 6, 6, 5, 7, 6, 7, 7, 2, 4],
[2, 1, 1, 1, 2, 5, 1, 1, 1, 2],
[2, 1, 1, 1, 2, 1, 1, 1, 1, 2],
[4, 1, 1, 1, 2, 1, 1, 1, 2, 2],
[6, 2, 3, 1, 2, 1, 1, 1, 1, 2],
[5, 1, 1, 1, 2, 1, 2, 1, 1, 2],
[1, 1, 1, 1, 2, 1, 1, 1, 2, 2],
]
FEATURE_DATA_REGRESSION = [
[5.0, 162.0, 60.0, 191.0, 36.0, 50.0],
[2.0, 110.0, 60.0, 189.0, 37.0, 52.0],
[12.0, 101.0, 101.0, 193.0, 38.0, 58.0],
[12.0, 105.0, 37.0, 162.0, 35.0, 62.0],
[13.0, 155.0, 58.0, 189.0, 35.0, 46.0],
[4.0, 101.0, 42.0, 182.0, 36.0, 56.0],
[8.0, 101.0, 38.0, 211.0, 38.0, 56.0],
[6.0, 125.0, 40.0, 167.0, 34.0, 60.0],
[15.0, 200.0, 40.0, 176.0, 31.0, 74.0],
[17.0, 251.0, 250.0, 154.0, 33.0, 56.0],
[17.0, 120.0, 38.0, 169.0, 34.0, 50.0],
[13.0, 210.0, 115.0, 166.0, 33.0, 52.0],
[14.0, 215.0, 105.0, 154.0, 34.0, 64.0],
[1.0, 50.0, 50.0, 247.0, 46.0, 50.0],
[6.0, 70.0, 31.0, 193.0, 36.0, 46.0],
[12.0, 210.0, 120.0, 202.0, 37.0, 62.0],
[4.0, 60.0, 25.0, 176.0, 37.0, 54.0],
[11.0, 230.0, 80.0, 157.0, 32.0, 52.0],
[15.0, 225.0, 73.0, 156.0, 33.0, 54.0],
[2.0, 110.0, 43.0, 138.0, 33.0, 68.0],
]
"""
FEATURE_DATA_CLUSTERING = [
[-9.01904123, 6.44409816, 5.95914173, 6.30718146],
[ 7.10630876, -2.07342124, -0.72564101, 3.81251745],
...
]
"""
data, labels = make_blobs(
n_samples=80, centers=8, n_features=4, random_state=2020
)
FEATURE_DATA_CLUSTERING = np.concatenate((data, labels[:, None]), axis=1)
CLASSIFIERS = [
"KNeighborsClassifier",
"SVC",
"GaussianProcessClassifier",
"DecisionTreeClassifier",
"RandomForestClassifier",
"MLPClassifier",
"AdaBoostClassifier",
"GaussianNB",
"QuadraticDiscriminantAnalysis",
"LogisticRegression",
"GradientBoostingClassifier",
"BernoulliNB",
"ExtraTreesClassifier",
"BaggingClassifier",
"LinearDiscriminantAnalysis",
"MultinomialNB",
]
REGRESSORS = [
"LinearRegression",
"ElasticNet",
"BayesianRidge",
"Lasso",
"ARDRegression",
"RANSACRegressor",
"DecisionTreeRegressor",
"GaussianProcessRegressor",
"OrthogonalMatchingPursuit",
"Lars",
"Ridge",
]
CLUSTERERS = [
"KMeans",
"Birch",
"MiniBatchKMeans",
"AffinityPropagation",
"MeanShift",
"SpectralClustering",
"AgglomerativeClustering",
"OPTICS",
]
supervised_estimators = ["classifier", "regressor"]
unsupervised_estimators = ["clusterer"]
classifier_types = ["CLASSIFICATION", "MULTI_CLASSIFICATION"]
regressor_types = ["REGRESSION", "MULTI_REGRESSION"]
valid_estimators = supervised_estimators + unsupervised_estimators
for clf in CLASSIFIERS:
for model_type in classifier_types:
test_cls = type(
f"Test{clf}Model",
(TestScikitModel, AsyncTestCase),
{
"MODEL_TYPE": model_type,
"MODEL": getattr(
dffml_model_scikit.scikit_models, clf + "Model"
),
"MODEL_CONFIG": getattr(
dffml_model_scikit.scikit_models, clf + "ModelConfig"
),
"SCORER": SklearnModelAccuracy,
},
)
setattr(sys.modules[__name__], test_cls.__qualname__, test_cls)
for reg in REGRESSORS:
for model_type in regressor_types:
test_cls = type(
f"Test{reg}Model",
(TestScikitModel, AsyncTestCase),
{
"MODEL_TYPE": model_type,
"MODEL": getattr(
dffml_model_scikit.scikit_models, reg + "Model"
),
"MODEL_CONFIG": getattr(
dffml_model_scikit.scikit_models, reg + "ModelConfig"
),
"SCORER": SklearnModelAccuracy,
},
)
setattr(sys.modules[__name__], test_cls.__qualname__, test_cls)
for clstr in CLUSTERERS:
for true_clstr_present in [True, False]:
labelInfo = f"withLabel" if true_clstr_present else f"withoutLabel"
test_cls = type(
f"Test{clstr}Model" + labelInfo,
(TestScikitModel, AsyncTestCase),
{
"MODEL_TYPE": "CLUSTERING",
"MODEL": getattr(
dffml_model_scikit.scikit_models, clstr + "Model"
),
"MODEL_CONFIG": getattr(
dffml_model_scikit.scikit_models, clstr + "ModelConfig"
),
"TRUE_CLSTR_PRESENT": true_clstr_present,
"SCORER": SklearnModelAccuracy,
},
)
setattr(sys.modules[__name__], test_cls.__qualname__, test_cls)
| [
"tempfile.TemporaryDirectory",
"sklearn.datasets.make_blobs",
"dffml.high_level.ml.score",
"dffml.feature.Feature",
"dffml.feature.Features",
"numpy.concatenate",
"dffml.source.memory.MemorySourceConfig"
] | [((10059, 10127), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(80)', 'centers': '(8)', 'n_features': '(4)', 'random_state': '(2020)'}), '(n_samples=80, centers=8, n_features=4, random_state=2020)\n', (10069, 10127), False, 'from sklearn.datasets import make_blobs\n'), ((10160, 10207), 'numpy.concatenate', 'np.concatenate', (['(data, labels[:, None])'], {'axis': '(1)'}), '((data, labels[:, None]), axis=1)\n', (10174, 10207), True, 'import numpy as np\n'), ((636, 665), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (663, 665), False, 'import tempfile\n'), ((689, 699), 'dffml.feature.Features', 'Features', ([], {}), '()\n', (697, 699), False, 'from dffml.feature import Feature, Features\n'), ((892, 914), 'dffml.feature.Feature', 'Feature', (['"""A"""', 'float', '(1)'], {}), "('A', float, 1)\n", (899, 914), False, 'from dffml.feature import Feature, Features\n'), ((948, 970), 'dffml.feature.Feature', 'Feature', (['"""B"""', 'float', '(1)'], {}), "('B', float, 1)\n", (955, 970), False, 'from dffml.feature import Feature, Features\n'), ((1004, 1026), 'dffml.feature.Feature', 'Feature', (['"""C"""', 'float', '(1)'], {}), "('C', float, 1)\n", (1011, 1026), False, 'from dffml.feature import Feature, Features\n'), ((1060, 1082), 'dffml.feature.Feature', 'Feature', (['"""D"""', 'float', '(1)'], {}), "('D', float, 1)\n", (1067, 1082), False, 'from dffml.feature import Feature, Features\n'), ((1116, 1138), 'dffml.feature.Feature', 'Feature', (['"""E"""', 'float', '(1)'], {}), "('E', float, 1)\n", (1123, 1138), False, 'from dffml.feature import Feature, Features\n'), ((1172, 1194), 'dffml.feature.Feature', 'Feature', (['"""F"""', 'float', '(1)'], {}), "('F', float, 1)\n", (1179, 1194), False, 'from dffml.feature import Feature, Features\n'), ((1228, 1250), 'dffml.feature.Feature', 'Feature', (['"""G"""', 'float', '(1)'], {}), "('G', float, 1)\n", (1235, 1250), False, 'from dffml.feature import Feature, Features\n'), ((1284, 1306), 'dffml.feature.Feature', 'Feature', (['"""H"""', 'float', '(1)'], {}), "('H', float, 1)\n", (1291, 1306), False, 'from dffml.feature import Feature, Features\n'), ((3879, 3918), 'dffml.source.memory.MemorySourceConfig', 'MemorySourceConfig', ([], {'records': 'cls.records'}), '(records=cls.records)\n', (3897, 3918), False, 'from dffml.source.memory import MemorySource, MemorySourceConfig\n'), ((4420, 4442), 'dffml.feature.Feature', 'Feature', (['"""X"""', 'float', '(1)'], {}), "('X', float, 1)\n", (4427, 4442), False, 'from dffml.feature import Feature, Features\n'), ((4833, 4855), 'dffml.feature.Feature', 'Feature', (['"""X"""', 'float', '(1)'], {}), "('X', float, 1)\n", (4840, 4855), False, 'from dffml.feature import Feature, Features\n'), ((1395, 1417), 'dffml.feature.Feature', 'Feature', (['"""X"""', 'float', '(1)'], {}), "('X', float, 1)\n", (1402, 1417), False, 'from dffml.feature import Feature, Features\n'), ((2158, 2180), 'dffml.feature.Feature', 'Feature', (['"""A"""', 'float', '(1)'], {}), "('A', float, 1)\n", (2165, 2180), False, 'from dffml.feature import Feature, Features\n'), ((2214, 2236), 'dffml.feature.Feature', 'Feature', (['"""B"""', 'float', '(1)'], {}), "('B', float, 1)\n", (2221, 2236), False, 'from dffml.feature import Feature, Features\n'), ((2270, 2292), 'dffml.feature.Feature', 'Feature', (['"""C"""', 'float', '(1)'], {}), "('C', float, 1)\n", (2277, 2292), False, 'from dffml.feature import Feature, Features\n'), ((2326, 2348), 'dffml.feature.Feature', 'Feature', (['"""D"""', 'float', '(1)'], {}), "('D', float, 1)\n", (2333, 2348), False, 'from dffml.feature import Feature, Features\n'), ((4294, 4316), 'dffml.feature.Feature', 'Feature', (['"""X"""', 'float', '(1)'], {}), "('X', float, 1)\n", (4301, 4316), False, 'from dffml.feature import Feature, Features\n'), ((4318, 4340), 'dffml.feature.Feature', 'Feature', (['"""Y"""', 'float', '(1)'], {}), "('Y', float, 1)\n", (4325, 4340), False, 'from dffml.feature import Feature, Features\n'), ((5432, 5503), 'dffml.high_level.ml.score', 'score', (['self.model', 'self.scorer', 'self.model.config.predict', 'self.sources'], {}), '(self.model, self.scorer, self.model.config.predict, self.sources)\n', (5437, 5503), False, 'from dffml.high_level.ml import score\n'), ((5676, 5747), 'dffml.high_level.ml.score', 'score', (['self.model', 'self.scorer', 'self.model.config.predict', 'self.sources'], {}), '(self.model, self.scorer, self.model.config.predict, self.sources)\n', (5681, 5747), False, 'from dffml.high_level.ml import score\n'), ((5919, 5990), 'dffml.high_level.ml.score', 'score', (['self.model', 'self.scorer', 'self.model.config.predict', 'self.sources'], {}), '(self.model, self.scorer, self.model.config.predict, self.sources)\n', (5924, 5990), False, 'from dffml.high_level.ml import score\n'), ((2433, 2455), 'dffml.feature.Feature', 'Feature', (['"""X"""', 'float', '(1)'], {}), "('X', float, 1)\n", (2440, 2455), False, 'from dffml.feature import Feature, Features\n'), ((3103, 3125), 'dffml.feature.Feature', 'Feature', (['"""A"""', 'float', '(1)'], {}), "('A', float, 1)\n", (3110, 3125), False, 'from dffml.feature import Feature, Features\n'), ((3159, 3181), 'dffml.feature.Feature', 'Feature', (['"""B"""', 'float', '(1)'], {}), "('B', float, 1)\n", (3166, 3181), False, 'from dffml.feature import Feature, Features\n'), ((3215, 3237), 'dffml.feature.Feature', 'Feature', (['"""C"""', 'float', '(1)'], {}), "('C', float, 1)\n", (3222, 3237), False, 'from dffml.feature import Feature, Features\n'), ((3271, 3293), 'dffml.feature.Feature', 'Feature', (['"""D"""', 'float', '(1)'], {}), "('D', float, 1)\n", (3278, 3293), False, 'from dffml.feature import Feature, Features\n')] |
import numpy as np
import scipy as sp
from sklearn.gaussian_process import GaussianProcessRegressor
import matplotlib.pyplot as plt
class PostProcessing:
"""
This class contains the methods for visualizing the results of the DIC analysis.
**Input:**
* **analysis_obj** (`object`)
Object of the Analysis class.
**Attributes:**
* **analysis_obj** (`float`)
Object of the Analysis class.
* **mesh_obj** (`object`)
Object of the RectangularMesh class.
* **strain11** (`ndarray`)
Strain xx at the center of each cell.
* **strain22** (`ndarray`)
Strain yy at the center of each cell.
* **strain12** (`ndarray`)
Strain xy at the center of each cell.
* **strain21** (`ndarray`)
Strain yx (equal to Strain xy) at the center of each cell.
**Methods:**
"""
def __init__(self, analysis_obj=None):
self.mesh_obj = analysis_obj.mesh_obj
self.analysis_obj = analysis_obj
self.strain11 = None
self.strain22 = None
self.strain12 = None
self.strain21 = None
def get_fields(self):
"""
Method to estimate the strain fields.
**Input:**
**Output/Returns:**
"""
# Derivative
# d_ker = np.matrix([-1., 0, 1.])
u = self.analysis_obj.u
v = self.analysis_obj.v
pixel_dim = self.analysis_obj.pixel_dim
centers = self.mesh_obj.centers
zero_mat = np.zeros((np.shape(u)[1], np.shape(u)[2]))
strain_matrix_11 = []
strain_matrix_12 = []
strain_matrix_21 = []
strain_matrix_22 = []
strain_matrix_11.append(zero_mat)
strain_matrix_12.append(zero_mat)
strain_matrix_21.append(zero_mat)
strain_matrix_22.append(zero_mat)
for k in range(np.shape(u)[0]):
points = []
dx = []
dy = []
c = 0
for i in range(np.shape(u)[1]):
for j in range(np.shape(u)[2]):
points.append([centers[c][0], centers[c][1]])
dx.append(u[k, i, j])
dy.append(v[k, i, j])
c = c + 1
gpu = GaussianProcessRegressor(n_restarts_optimizer=10, normalize_y=True)
gpu.fit(points, dx)
gpv = GaussianProcessRegressor(n_restarts_optimizer=10, normalize_y=False)
gpv.fit(points, dy)
strain_11 = np.zeros((np.shape(u)[1], np.shape(u)[2]))
strain_22 = np.zeros((np.shape(u)[1], np.shape(u)[2]))
strain_12 = np.zeros((np.shape(u)[1], np.shape(u)[2]))
strain_21 = np.zeros((np.shape(u)[1], np.shape(u)[2]))
c = 0
h = pixel_dim / 100
for i in range(np.shape(u)[1]):
for j in range(np.shape(u)[2]):
p0 = [[centers[c][0], centers[c][1]]]
p1 = [[centers[c][0] + h, centers[c][1]]]
pred0ux = gpu.predict(p0)[0]
pred1ux = gpu.predict(p1)[0]
pred0vx = gpv.predict(p0)[0]
pred1vx = gpv.predict(p1)[0]
p0 = [[centers[c][0], centers[c][1]]]
p1 = [[centers[c][0], centers[c][1] + h]]
pred0uy = gpu.predict(p0)[0]
pred1uy = gpu.predict(p1)[0]
pred0vy = gpv.predict(p0)[0]
pred1vy = gpv.predict(p1)[0]
d11 = (pred1ux - pred0ux) / h
d12 = (pred1uy - pred0uy) / h
d21 = (pred1vx - pred0vx) / h
d22 = (pred1vy - pred0vy) / h
# Compute strain fields.
strain_11[i, j] = d11 + 0.5 * (d11 ** 2 + d22 ** 2)
strain_22[i, j] = d22 + 0.5 * (d11 ** 2 + d22 ** 2)
strain_12[i, j] = 0.5 * (d12 + d21 + d11 * d12 + d21 * d22)
strain_21[i, j] = 0.5 * (d12 + d21 + d11 * d12 + d21 * d22)
c = c + 1
strain_matrix_11.append(strain_11)
strain_matrix_22.append(strain_22)
strain_matrix_12.append(strain_12)
strain_matrix_21.append(strain_21)
self.strain11 = np.array(strain_matrix_11)
self.strain22 = np.array(strain_matrix_22)
self.strain12 = np.array(strain_matrix_12)
self.strain21 = np.array(strain_matrix_21)
def visualization(self, results="u", step=0, smooth=False):
"""
Method to plot the results in terms of displacements and strains.
**Input:**
* **results** (`str`)
Visulaize the results:
-'u': displacement x.
-'v': displacement y.
-'e11': strain xx.
-'e22': strain yy:
-'e12': strain xy.
-'e21': strain yx.
* **step** (`int`)
Load step of interest.
* **smooth** (`bool`)
Gaussian filtering.
**Output/Returns:**
"""
if step < 0:
raise ValueError("DICpy: `step` must be larger than or equal to 0.")
if step > len(self.analysis_obj.u):
raise ValueError("DICpy: `step` cannot be larger than the number of steps in the analysis.")
if not isinstance(step, int):
raise TypeError("DICpy: step must be an integer.")
point_a = self.mesh_obj.point_a
point_b = self.mesh_obj.point_b
images = self.mesh_obj.images_obj.images
stepx = self.mesh_obj.stepx
stepy = self.mesh_obj.stepy
self.get_fields()
u_ = self.analysis_obj.u
v_ = self.analysis_obj.v
if step == 0:
u = np.zeros(np.shape(u_[0, :, :]))
v = np.zeros(np.shape(u_[0, :, :]))
else:
u = u_[step - 1, :, :]
v = v_[step - 1, :, :]
e11 = self.strain11[step, :, :]
e12 = self.strain12[step, :, :]
e21 = self.strain21[step, :, :]
e22 = self.strain22[step, :, :]
if results == 'u':
mask = u
elif results == 'v':
mask = v
elif results == 'abs':
mask = np.sqrt(v ** 2 + u ** 2)
elif results == 'e11':
mask = e11
elif results == 'e12':
mask = e12
elif results == 'e21':
mask = e21
elif results == 'e22':
mask = e22
else:
raise ValueError('DICpy: not valid option for results.')
img = images[step]
x = np.arange(0, np.shape(img)[1])
y = np.arange(0, np.shape(img)[0])
X, Y = np.meshgrid(x, y)
xm = np.arange(min(point_a[0], point_b[0]), max(point_a[0], point_b[0]))
ym = np.arange(min(point_a[1], point_b[1]), max(point_a[1], point_b[1]))
Xm, Ym = np.meshgrid(xm, ym)
extent = np.min(x), np.max(x), np.min(y), np.max(y)
extentm = np.min(xm), np.max(xm), np.shape(img)[0] - np.max(ym), np.shape(img)[0] - np.min(ym)
if smooth:
lx = stepx[1] - stepx[0]
ly = stepy[1] - stepy[0]
sigma = 0.005 * max(lx, ly)
mask = sp.ndimage.gaussian_filter(mask, sigma=sigma)
plt.close()
fig = plt.figure(frameon=False)
im1 = plt.imshow(img, cmap=plt.cm.gray, interpolation='bilinear', extent=extent)
im2 = plt.imshow(mask, cmap=plt.cm.hsv, alpha=.7, interpolation='bilinear', extent=extentm)
im3 = plt.plot([0, np.shape(img)[1]], [0, np.shape(img)[0]], '.')
plt.xlim(0, np.shape(img)[1])
plt.ylim(0, np.shape(img)[0])
plt.colorbar(im2)
plt.show()
| [
"matplotlib.pyplot.imshow",
"sklearn.gaussian_process.GaussianProcessRegressor",
"scipy.ndimage.gaussian_filter",
"numpy.sqrt",
"matplotlib.pyplot.colorbar",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.meshgrid",
"numpy.shape",
"matp... | [((4302, 4328), 'numpy.array', 'np.array', (['strain_matrix_11'], {}), '(strain_matrix_11)\n', (4310, 4328), True, 'import numpy as np\n'), ((4353, 4379), 'numpy.array', 'np.array', (['strain_matrix_22'], {}), '(strain_matrix_22)\n', (4361, 4379), True, 'import numpy as np\n'), ((4404, 4430), 'numpy.array', 'np.array', (['strain_matrix_12'], {}), '(strain_matrix_12)\n', (4412, 4430), True, 'import numpy as np\n'), ((4455, 4481), 'numpy.array', 'np.array', (['strain_matrix_21'], {}), '(strain_matrix_21)\n', (4463, 4481), True, 'import numpy as np\n'), ((6695, 6712), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (6706, 6712), True, 'import numpy as np\n'), ((6893, 6912), 'numpy.meshgrid', 'np.meshgrid', (['xm', 'ym'], {}), '(xm, ym)\n', (6904, 6912), True, 'import numpy as np\n'), ((7285, 7296), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7294, 7296), True, 'import matplotlib.pyplot as plt\n'), ((7311, 7336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (7321, 7336), True, 'import matplotlib.pyplot as plt\n'), ((7351, 7425), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': 'plt.cm.gray', 'interpolation': '"""bilinear"""', 'extent': 'extent'}), "(img, cmap=plt.cm.gray, interpolation='bilinear', extent=extent)\n", (7361, 7425), True, 'import matplotlib.pyplot as plt\n'), ((7440, 7530), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask'], {'cmap': 'plt.cm.hsv', 'alpha': '(0.7)', 'interpolation': '"""bilinear"""', 'extent': 'extentm'}), "(mask, cmap=plt.cm.hsv, alpha=0.7, interpolation='bilinear',\n extent=extentm)\n", (7450, 7530), True, 'import matplotlib.pyplot as plt\n'), ((7684, 7701), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im2'], {}), '(im2)\n', (7696, 7701), True, 'import matplotlib.pyplot as plt\n'), ((7710, 7720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7718, 7720), True, 'import matplotlib.pyplot as plt\n'), ((2241, 2308), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'n_restarts_optimizer': '(10)', 'normalize_y': '(True)'}), '(n_restarts_optimizer=10, normalize_y=True)\n', (2265, 2308), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((2360, 2428), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'n_restarts_optimizer': '(10)', 'normalize_y': '(False)'}), '(n_restarts_optimizer=10, normalize_y=False)\n', (2384, 2428), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((6931, 6940), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (6937, 6940), True, 'import numpy as np\n'), ((6942, 6951), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6948, 6951), True, 'import numpy as np\n'), ((6953, 6962), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (6959, 6962), True, 'import numpy as np\n'), ((6964, 6973), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (6970, 6973), True, 'import numpy as np\n'), ((6992, 7002), 'numpy.min', 'np.min', (['xm'], {}), '(xm)\n', (6998, 7002), True, 'import numpy as np\n'), ((7004, 7014), 'numpy.max', 'np.max', (['xm'], {}), '(xm)\n', (7010, 7014), True, 'import numpy as np\n'), ((7230, 7275), 'scipy.ndimage.gaussian_filter', 'sp.ndimage.gaussian_filter', (['mask'], {'sigma': 'sigma'}), '(mask, sigma=sigma)\n', (7256, 7275), True, 'import scipy as sp\n'), ((1849, 1860), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (1857, 1860), True, 'import numpy as np\n'), ((5776, 5797), 'numpy.shape', 'np.shape', (['u_[0, :, :]'], {}), '(u_[0, :, :])\n', (5784, 5797), True, 'import numpy as np\n'), ((5824, 5845), 'numpy.shape', 'np.shape', (['u_[0, :, :]'], {}), '(u_[0, :, :])\n', (5832, 5845), True, 'import numpy as np\n'), ((6619, 6632), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (6627, 6632), True, 'import numpy as np\n'), ((6662, 6675), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (6670, 6675), True, 'import numpy as np\n'), ((7035, 7045), 'numpy.max', 'np.max', (['ym'], {}), '(ym)\n', (7041, 7045), True, 'import numpy as np\n'), ((7066, 7076), 'numpy.min', 'np.min', (['ym'], {}), '(ym)\n', (7072, 7076), True, 'import numpy as np\n'), ((7620, 7633), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (7628, 7633), True, 'import numpy as np\n'), ((7658, 7671), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (7666, 7671), True, 'import numpy as np\n'), ((1504, 1515), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (1512, 1515), True, 'import numpy as np\n'), ((1520, 1531), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (1528, 1531), True, 'import numpy as np\n'), ((1977, 1988), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (1985, 1988), True, 'import numpy as np\n'), ((2808, 2819), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2816, 2819), True, 'import numpy as np\n'), ((6241, 6265), 'numpy.sqrt', 'np.sqrt', (['(v ** 2 + u ** 2)'], {}), '(v ** 2 + u ** 2)\n', (6248, 6265), True, 'import numpy as np\n'), ((7016, 7029), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (7024, 7029), True, 'import numpy as np\n'), ((7047, 7060), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (7055, 7060), True, 'import numpy as np\n'), ((7553, 7566), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (7561, 7566), True, 'import numpy as np\n'), ((7576, 7589), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (7584, 7589), True, 'import numpy as np\n'), ((2025, 2036), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2033, 2036), True, 'import numpy as np\n'), ((2496, 2507), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2504, 2507), True, 'import numpy as np\n'), ((2512, 2523), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2520, 2523), True, 'import numpy as np\n'), ((2563, 2574), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2571, 2574), True, 'import numpy as np\n'), ((2579, 2590), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2587, 2590), True, 'import numpy as np\n'), ((2630, 2641), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2638, 2641), True, 'import numpy as np\n'), ((2646, 2657), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2654, 2657), True, 'import numpy as np\n'), ((2697, 2708), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2705, 2708), True, 'import numpy as np\n'), ((2713, 2724), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2721, 2724), True, 'import numpy as np\n'), ((2856, 2867), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (2864, 2867), True, 'import numpy as np\n')] |
"""
Here the structure of the network is made in pytorch
"""
from typing import List, Union, Optional
import torch
import os
from logger import logger
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy.stats import norm
class Encoder(nn.Module):
"""
Encodes the data using a CNN
Input => 64x64 image
Output => mean vector z_dim
log_std vector z_dim
predicted value
"""
def __init__(self, z_dim: int = 20, custom_layers: Optional[nn.Sequential] = None):
super().__init__()
self.z_dim = z_dim
self.layers = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64, 128, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 256, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(256),
nn.Conv2d(256, 512, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(512),
nn.Flatten(),
nn.Linear(512, 1000),
nn.LeakyReLU(),
nn.Linear(1000, z_dim*2+1)
)
def forward(self, input: torch.Tensor):
"""
Perform forward pass of encoder.
"""
out = self.layers(input)
# return classification, mean and log_std
return out[:, 0], out[:, 1:self.z_dim+1], F.softplus(out[:,self.z_dim+1:])
class UnFlatten(nn.Module):
def __init__(self, channel_size, image_size):
super(UnFlatten, self).__init__()
self.channel_size = channel_size
self.image_size = image_size
def forward(self, input):
return input.view(-1, self.channel_size, self.image_size, self.image_size)
class Decoder(nn.Module):
"""
Encodes the data using a CNN
Input => sample vector z_dim
Output => 64x64 image
4 6 13 29 61
"""
def __init__(self, z_dim: int = 20, custom_layers: Optional[nn.Sequential] = None):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(z_dim, 1000),
nn.LeakyReLU(),
nn.Linear(1000, 512*1*1),
UnFlatten(512, 1),
nn.ConvTranspose2d(512, 256, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(256),
nn.ConvTranspose2d(256, 128, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128, 64, kernel_size=5, stride=2, output_padding=1),
nn.LeakyReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(64, 3, kernel_size=5, stride=2, output_padding=1),
nn.Sigmoid()
)
def forward(self, input: torch.Tensor):
"""
Perform forward pass of encoder.
"""
out = self.layers(input)
return out
class Db_vae(nn.Module):
def __init__(
self,
z_dim: int = 20,
hist_size: int = 1000,
alpha: float = 0.01,
num_bins: int = 10,
device: str = "cpu",
custom_encoding_layers: Optional[nn.Sequential] = None,
custom_decoding_layers: Optional[nn.Sequential] = None
):
super().__init__()
self.device = device
self.z_dim = z_dim
self.encoder = Encoder(z_dim, custom_encoding_layers)
self.decoder = Decoder(z_dim, custom_decoding_layers)
self.target_dist = torch.distributions.normal.Normal(0, 1)
self.c1 = 1
self.c2 = 1
self.c3 = 0.1
self.num_bins = num_bins
self.min_val = -15
self.max_val = 15
self.xlin = np.linspace(self.min_val, self.max_val, self.num_bins).reshape(1,1,self.num_bins)
self.hist = np.zeros((z_dim, self.num_bins))
self.means = torch.Tensor().to(self.device)
self.std = torch.Tensor().to(self.device)
self.alpha = alpha
@staticmethod
def init(path_to_model: str, device: str, z_dim: int):
full_path_to_model = f"results/{path_to_model}/model.pt"
if not os.path.exists(full_path_to_model):
logger.error(
f"Can't find model at {full_path_to_model}",
next_step="Evaluation will stop",
tip="Double check your path to model"
)
raise Exception
model: Db_vae = Db_vae(z_dim=z_dim, device=device)
try:
model.load_state_dict(torch.load(full_path_to_model, map_location=device))
except:
logger.error("Unable to load model from {full_path_to_model}.",
next_step="Model will not initialize",
tip="Did you use the right config parameters, or custom layers from the stored model?"
)
logger.info(f"Loaded model from {path_to_model}!")
return model
def forward(self, images: torch.Tensor, labels: torch.Tensor):
"""
Given images, perform an encoding and decoding step and return the
negative average elbo for the given batch.
"""
pred, mean, std = self.encoder(images)
loss_class = F.binary_cross_entropy_with_logits(pred, labels.float(), reduction='none')
# We only want to calculate the loss towards actual faces
faceslicer = labels == 1
facemean = mean[faceslicer]
facestd = std[faceslicer]
# Get single samples from the distributions with reparametrisation trick
dist = torch.distributions.normal.Normal(facemean, facestd)
z = dist.rsample().to(self.device)
res = self.decoder(z)
# calculate VAE losses
loss_recon = (images[faceslicer] - res)**2
loss_recon = loss_recon.view(loss_recon.shape[0],-1).mean(1)
loss_kl = torch.distributions.kl.kl_divergence(dist, self.target_dist)
loss_kl = loss_kl.view(loss_kl.shape[0],-1).mean(1)
loss_vae = self.c2 * loss_recon + self.c3 * loss_kl
loss_total = self.c1 * loss_class
# Only add loss to positions of faces, rest is zero
zeros = torch.zeros(faceslicer.shape[0]).to(self.device)
zeros[faceslicer] = loss_vae
loss_total = loss_total + zeros
return pred, loss_total
def forward_eval(self, images: torch.Tensor):
"""
Given images, perform an encoding and decoding step and return the
negative average elbo for the given batch.
"""
with torch.no_grad():
pred, _,_ = self.encoder(images)
return pred
def interpolate(self, images: torch.Tensor, amount: int):
with torch.no_grad():
_, mean, std = self.encoder(images)
mean_1, std_1 = mean[0,:], std[0,:]
mean_2, std_2 = mean[1,:], std[1,:]
all_mean = torch.tensor([]).to(self.device)
all_std = torch.tensor([]).to(self.device)
diff_mean = mean_1 - mean_2
diff_std = std_1 = std_2
steps_mean = diff_mean / (amount-1)
steps_std = diff_std / (amount-1)
for i in range(amount):
all_mean = torch.cat((all_mean, mean_1 - steps_mean*i))
all_std = torch.cat((all_std, std_1 - steps_std*i))
all_mean = all_mean.view(amount, -1)
all_std = all_std.view(amount, -1)
dist = torch.distributions.normal.Normal(all_mean, all_std)
z = dist.rsample().to(self.device)
recon_images = self.decoder(z)
return recon_images
def build_means(self, input: torch.Tensor):
_, mean, log_std = self.encoder(input)
self.means = torch.cat((self.means, mean))
return
def build_histo(self, input: torch.Tensor):
"""
Creates histos or samples Qs from it
NOTE:
Make sure you only put faces into this
functions
"""
_, mean, std = self.encoder(input)
self.means = torch.cat((self.means, mean))
self.std = torch.cat((self.std, std))
values = norm.pdf(self.xlin, mean.unsqueeze(-1).cpu(), std.unsqueeze(-1).cpu()).sum(0)
self.hist += values
return
def get_histo_max(self):
probs = torch.zeros_like(self.means[:,0]).to(self.device)
for i in range(self.z_dim):
dist = self.means[:,i].cpu().numpy()
hist, bins = np.histogram(dist, density=True, bins=self.num_bins)
bins[0] = -float('inf')
bins[-1] = float('inf')
bin_idx = np.digitize(dist, bins)
hist = hist + self.alpha
hist /= np.sum(hist)
p = 1.0/(hist[bin_idx-1])
p /= np.sum(p)
probs = torch.max(probs, torch.Tensor(p).to(self.device))
probs /= probs.sum()
return probs
def get_histo_max5(self):
probs = torch.zeros_like(self.means, dtype=float).to(self.device)
for i in range(self.z_dim):
dist = self.means[:,i].cpu().numpy()
hist, bins = np.histogram(dist, density=True, bins=self.num_bins)
bins[0] = -float('inf')
bins[-1] = float('inf')
bin_idx = np.digitize(dist, bins)
hist = hist + self.alpha
hist /= np.sum(hist)
p = 1.0/(hist[bin_idx-1])
p /= np.sum(p)
probs[:,i] = torch.Tensor(p).to(self.device)
probs = probs.sort(1, descending=True)[0][:,:5]
probs = probs.prod(1)
print(probs)
return probs
def get_histo_gaussian(self):
"""
Returns the probabilities given the means given the histo values
"""
results = np.empty(self.means.shape[0])
hist_batch_size = 4000
# Iterate in large batches over dataset to prevent memory lockup
for i in range(0, self.means.shape[0], hist_batch_size):
i_end = i + hist_batch_size
if i_end > self.means.shape[0]:
i_end = self.means.shape[0]
mean = self.means[i:i_end, :]
std = self.std[i:i_end, :]
lins = norm.pdf(self.xlin, mean.unsqueeze(-1).cpu(), std.unsqueeze(-1).cpu())
Q = lins * self.hist
Q = Q.sum(-1)
W = 1 / (Q + self.alpha)
# Performing the max value technique, TODO: analyse top 5
results[i:i_end] = W.max(-1)
# # Reset values
self.hist.fill(0)
self.means = torch.Tensor().to(self.device)
self.std = torch.Tensor().to(self.device)
return torch.tensor(results).to(self.device)
def recon_images(self, images: torch.Tensor):
with torch.no_grad():
pred, mean, std = self.encoder(images)
# Get single samples from the distributions with reparametrisation trick
dist = torch.distributions.normal.Normal(mean, std)
z = dist.rsample().to(self.device)
recon_images = self.decoder(z)
# return predictions and the loss
return recon_images
def sample(self, n_samples, z_samples=[]):
"""
Sample n_samples from the model. Return both the sampled images
(from bernoulli) and the means for these bernoullis (as these are
used to plot the data manifold).
"""
with torch.no_grad():
z_samples = torch.randn(n_samples, self.z_dim).to(self.device)
sampled_images = self.decoder(z_samples)
return sampled_images
| [
"torch.distributions.normal.Normal",
"logger.logger.error",
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"os.path.exists",
"numpy.histogram",
"torch.nn.Flatten",
"numpy.linspace",
"numpy.empty",
"torch.zeros_like",
"torch.randn",
"logger.logger.info",
"torch.nn.LeakyReLU",
"numpy.digitize",... | [((3540, 3579), 'torch.distributions.normal.Normal', 'torch.distributions.normal.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (3573, 3579), False, 'import torch\n'), ((3852, 3884), 'numpy.zeros', 'np.zeros', (['(z_dim, self.num_bins)'], {}), '((z_dim, self.num_bins))\n', (3860, 3884), True, 'import numpy as np\n'), ((4892, 4942), 'logger.logger.info', 'logger.info', (['f"""Loaded model from {path_to_model}!"""'], {}), "(f'Loaded model from {path_to_model}!')\n", (4903, 4942), False, 'from logger import logger\n'), ((5594, 5646), 'torch.distributions.normal.Normal', 'torch.distributions.normal.Normal', (['facemean', 'facestd'], {}), '(facemean, facestd)\n', (5627, 5646), False, 'import torch\n'), ((5893, 5953), 'torch.distributions.kl.kl_divergence', 'torch.distributions.kl.kl_divergence', (['dist', 'self.target_dist'], {}), '(dist, self.target_dist)\n', (5929, 5953), False, 'import torch\n'), ((7760, 7789), 'torch.cat', 'torch.cat', (['(self.means, mean)'], {}), '((self.means, mean))\n', (7769, 7789), False, 'import torch\n'), ((8085, 8114), 'torch.cat', 'torch.cat', (['(self.means, mean)'], {}), '((self.means, mean))\n', (8094, 8114), False, 'import torch\n'), ((8134, 8160), 'torch.cat', 'torch.cat', (['(self.std, std)'], {}), '((self.std, std))\n', (8143, 8160), False, 'import torch\n'), ((9809, 9838), 'numpy.empty', 'np.empty', (['self.means.shape[0]'], {}), '(self.means.shape[0])\n', (9817, 9838), True, 'import numpy as np\n'), ((647, 688), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(3, 64, kernel_size=5, stride=2)\n', (656, 688), True, 'import torch.nn as nn\n'), ((702, 716), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (714, 716), True, 'import torch.nn as nn\n'), ((730, 748), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (744, 748), True, 'import torch.nn as nn\n'), ((763, 806), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(64, 128, kernel_size=5, stride=2)\n', (772, 806), True, 'import torch.nn as nn\n'), ((820, 834), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (832, 834), True, 'import torch.nn as nn\n'), ((848, 867), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (862, 867), True, 'import torch.nn as nn\n'), ((882, 926), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(128, 256, kernel_size=5, stride=2)\n', (891, 926), True, 'import torch.nn as nn\n'), ((940, 954), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (952, 954), True, 'import torch.nn as nn\n'), ((968, 987), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (982, 987), True, 'import torch.nn as nn\n'), ((1002, 1046), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(256, 512, kernel_size=5, stride=2)\n', (1011, 1046), True, 'import torch.nn as nn\n'), ((1060, 1074), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1072, 1074), True, 'import torch.nn as nn\n'), ((1088, 1107), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (1102, 1107), True, 'import torch.nn as nn\n'), ((1121, 1133), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (1131, 1133), True, 'import torch.nn as nn\n'), ((1148, 1168), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1000)'], {}), '(512, 1000)\n', (1157, 1168), True, 'import torch.nn as nn\n'), ((1182, 1196), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1194, 1196), True, 'import torch.nn as nn\n'), ((1211, 1241), 'torch.nn.Linear', 'nn.Linear', (['(1000)', '(z_dim * 2 + 1)'], {}), '(1000, z_dim * 2 + 1)\n', (1220, 1241), True, 'import torch.nn as nn\n'), ((1494, 1529), 'torch.nn.functional.softplus', 'F.softplus', (['out[:, self.z_dim + 1:]'], {}), '(out[:, self.z_dim + 1:])\n', (1504, 1529), True, 'import torch.nn.functional as F\n'), ((2161, 2183), 'torch.nn.Linear', 'nn.Linear', (['z_dim', '(1000)'], {}), '(z_dim, 1000)\n', (2170, 2183), True, 'import torch.nn as nn\n'), ((2197, 2211), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2209, 2211), True, 'import torch.nn as nn\n'), ((2225, 2253), 'torch.nn.Linear', 'nn.Linear', (['(1000)', '(512 * 1 * 1)'], {}), '(1000, 512 * 1 * 1)\n', (2234, 2253), True, 'import torch.nn as nn\n'), ((2295, 2348), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(512)', '(256)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(512, 256, kernel_size=5, stride=2)\n', (2313, 2348), True, 'import torch.nn as nn\n'), ((2362, 2376), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2374, 2376), True, 'import torch.nn as nn\n'), ((2390, 2409), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (2404, 2409), True, 'import torch.nn as nn\n'), ((2424, 2477), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(256, 128, kernel_size=5, stride=2)\n', (2442, 2477), True, 'import torch.nn as nn\n'), ((2491, 2505), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2503, 2505), True, 'import torch.nn as nn\n'), ((2519, 2538), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2533, 2538), True, 'import torch.nn as nn\n'), ((2553, 2623), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'output_padding': '(1)'}), '(128, 64, kernel_size=5, stride=2, output_padding=1)\n', (2571, 2623), True, 'import torch.nn as nn\n'), ((2637, 2651), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2649, 2651), True, 'import torch.nn as nn\n'), ((2665, 2683), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2679, 2683), True, 'import torch.nn as nn\n'), ((2698, 2766), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(3)'], {'kernel_size': '(5)', 'stride': '(2)', 'output_padding': '(1)'}), '(64, 3, kernel_size=5, stride=2, output_padding=1)\n', (2716, 2766), True, 'import torch.nn as nn\n'), ((2780, 2792), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2790, 2792), True, 'import torch.nn as nn\n'), ((4173, 4207), 'os.path.exists', 'os.path.exists', (['full_path_to_model'], {}), '(full_path_to_model)\n', (4187, 4207), False, 'import os\n'), ((4221, 4356), 'logger.logger.error', 'logger.error', (['f"""Can\'t find model at {full_path_to_model}"""'], {'next_step': '"""Evaluation will stop"""', 'tip': '"""Double check your path to model"""'}), '(f"Can\'t find model at {full_path_to_model}", next_step=\n \'Evaluation will stop\', tip=\'Double check your path to model\')\n', (4233, 4356), False, 'from logger import logger\n'), ((6568, 6583), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6581, 6583), False, 'import torch\n'), ((6728, 6743), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6741, 6743), False, 'import torch\n'), ((7469, 7521), 'torch.distributions.normal.Normal', 'torch.distributions.normal.Normal', (['all_mean', 'all_std'], {}), '(all_mean, all_std)\n', (7502, 7521), False, 'import torch\n'), ((8509, 8561), 'numpy.histogram', 'np.histogram', (['dist'], {'density': '(True)', 'bins': 'self.num_bins'}), '(dist, density=True, bins=self.num_bins)\n', (8521, 8561), True, 'import numpy as np\n'), ((8657, 8680), 'numpy.digitize', 'np.digitize', (['dist', 'bins'], {}), '(dist, bins)\n', (8668, 8680), True, 'import numpy as np\n'), ((8739, 8751), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (8745, 8751), True, 'import numpy as np\n'), ((8808, 8817), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (8814, 8817), True, 'import numpy as np\n'), ((9158, 9210), 'numpy.histogram', 'np.histogram', (['dist'], {'density': '(True)', 'bins': 'self.num_bins'}), '(dist, density=True, bins=self.num_bins)\n', (9170, 9210), True, 'import numpy as np\n'), ((9306, 9329), 'numpy.digitize', 'np.digitize', (['dist', 'bins'], {}), '(dist, bins)\n', (9317, 9329), True, 'import numpy as np\n'), ((9388, 9400), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (9394, 9400), True, 'import numpy as np\n'), ((9457, 9466), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (9463, 9466), True, 'import numpy as np\n'), ((10787, 10802), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10800, 10802), False, 'import torch\n'), ((10960, 11004), 'torch.distributions.normal.Normal', 'torch.distributions.normal.Normal', (['mean', 'std'], {}), '(mean, std)\n', (10993, 11004), False, 'import torch\n'), ((11440, 11455), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11453, 11455), False, 'import torch\n'), ((3750, 3804), 'numpy.linspace', 'np.linspace', (['self.min_val', 'self.max_val', 'self.num_bins'], {}), '(self.min_val, self.max_val, self.num_bins)\n', (3761, 3804), True, 'import numpy as np\n'), ((3906, 3920), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (3918, 3920), False, 'import torch\n'), ((3956, 3970), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (3968, 3970), False, 'import torch\n'), ((4550, 4601), 'torch.load', 'torch.load', (['full_path_to_model'], {'map_location': 'device'}), '(full_path_to_model, map_location=device)\n', (4560, 4601), False, 'import torch\n'), ((4631, 4836), 'logger.logger.error', 'logger.error', (['"""Unable to load model from {full_path_to_model}."""'], {'next_step': '"""Model will not initialize"""', 'tip': '"""Did you use the right config parameters, or custom layers from the stored model?"""'}), "('Unable to load model from {full_path_to_model}.', next_step=\n 'Model will not initialize', tip=\n 'Did you use the right config parameters, or custom layers from the stored model?'\n )\n", (4643, 4836), False, 'from logger import logger\n'), ((6194, 6226), 'torch.zeros', 'torch.zeros', (['faceslicer.shape[0]'], {}), '(faceslicer.shape[0])\n', (6205, 6226), False, 'import torch\n'), ((7240, 7286), 'torch.cat', 'torch.cat', (['(all_mean, mean_1 - steps_mean * i)'], {}), '((all_mean, mean_1 - steps_mean * i))\n', (7249, 7286), False, 'import torch\n'), ((7311, 7354), 'torch.cat', 'torch.cat', (['(all_std, std_1 - steps_std * i)'], {}), '((all_std, std_1 - steps_std * i))\n', (7320, 7354), False, 'import torch\n'), ((8347, 8381), 'torch.zeros_like', 'torch.zeros_like', (['self.means[:, 0]'], {}), '(self.means[:, 0])\n', (8363, 8381), False, 'import torch\n'), ((8988, 9029), 'torch.zeros_like', 'torch.zeros_like', (['self.means'], {'dtype': 'float'}), '(self.means, dtype=float)\n', (9004, 9029), False, 'import torch\n'), ((10589, 10603), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (10601, 10603), False, 'import torch\n'), ((10639, 10653), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (10651, 10653), False, 'import torch\n'), ((10685, 10706), 'torch.tensor', 'torch.tensor', (['results'], {}), '(results)\n', (10697, 10706), False, 'import torch\n'), ((6915, 6931), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6927, 6931), False, 'import torch\n'), ((6970, 6986), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6982, 6986), False, 'import torch\n'), ((9493, 9508), 'torch.Tensor', 'torch.Tensor', (['p'], {}), '(p)\n', (9505, 9508), False, 'import torch\n'), ((11481, 11515), 'torch.randn', 'torch.randn', (['n_samples', 'self.z_dim'], {}), '(n_samples, self.z_dim)\n', (11492, 11515), False, 'import torch\n'), ((8856, 8871), 'torch.Tensor', 'torch.Tensor', (['p'], {}), '(p)\n', (8868, 8871), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 08:55:28 2021
Purpose of the script:
The purpose of the script is to load informations from files created by both the Quality Control of Nergica and the lidar installed on Nergica's site.
The informations is extracted for heights and the year selected a the top of this script. The information is then saved in a dataframe (.pkl) that can
be easily loaded to plot graphes and compare data from the met mast (with a quality control from nergica) and the lidar.
This code contains 4 sections
1. The first section contains the code for loading lidar and met mast data from Nergica site
The met mast has been run through a Quality Control.
The Code cleans the data with the Quality Control and uniformize data like the timestamps to
easily plot comparison graphs between the lidar and the met mast data.
The code saves all dataFrame in a pickle file that can be extracted easily from python in order
to plot graphs.
2. The second section is a function to plot different data from the lidar in comparison with
the double anemometry that was performed with the Quality Control (the name of the result
of the double anemometry is INFO01)
3. The third section presents how we got the temperature, pressure and humidity data
since we had problems on our hand with some data from the lidar (Windcube V2)
It also presents an easy correlation comparison
4. The fourth section use the 3 previous sections to demonstrate how to use these functions
You can manually change the informations to test other combinations
Example to run the code :
on spyder or other related IDEs -> Run the script
on anaconda or other related prompts -> Make sure the file is in the right directory and run this command : python ScriptTask32_CorrelationLidarMetMast_Nergica.py
@author: oplambert
"""
#%%
"""
This section load data from windcube v2 files on Nergica site and put all data in a dictionnary
There is an entry for each height that we want
This section load data from the control quality of the met mast at Nergica site and put all data in a dictionary
There is an entry of each captor on the met mast that we want
"""
#Importation of python libraries used in this script
import datetime
import glob
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pickle
###################################################
###### PARAMETERS TO CHANGE MANUALLY - Lidar ######
###################################################
# Chosen year for lidaryear que l'on veut évaluer les données du Lidar
str_year = "2015" #2020
#List of heights that we want to evaluate
list_heights = ["80"]#,"60","80","100","120","140","160","180","200"]
# Path directory where lidar data files are saved
str_pathDirectory_lidar = "./lidarData/"
###################################################
#### PARAMETERS TO CHANGE MANUALLY - Met Mast #####
###################################################
# Data that we want to discard after the Quality Control
# Each code refers to a specific condition (ex.:R101 -> No Data, R104 -> instruments under maintenance)
list_dropColumn_CQ = ["R101","R103","R104","R105","R201","R202","R203","R204","R205","R206"]#,"R301","R303","R401","R403"]
#Path Directory where CQ files are saved
str_pathDirectory_CQ = "./metMastData/"
# Height Filter in the CQ file name / In that case, height of 80m is chosen
str_filesFilter="*mmv1*Ht80m*"
###################################################
########### INITIALIzATION of VARIABLES ###########
###################################################
# Dictionary initialisation for raw CQ data and cleaned CQ data
dict_data_CQ={}
dict_data_CQ_cleaned={}
# Dictionary initialisation for raw lidar data and cleaned lidar data
dict_data_lidar={}
dict_data_lidar_cleaned={}
####################################################
########### Importation of lidar data ##############
####################################################
# Loop importing Lidar data for desired heights
# Save data in dict_data_lidar
# Name each dataFrame inside a dict : data_[annee]_[hauteur]m
for iter_height in list_heights:
with open(str_pathDirectory_lidar+iter_height+"m_"+str_year+"_dataWindCube.pkl", 'rb') as file:
vars()["df_"+str_year+"_"+iter_height+"m"]=pickle.load(file)
dict_data_lidar["df_"+str_year+"_"+iter_height+"m"]=(vars()["df_"+str_year+"_"+iter_height+"m"])
####################################################
########## Cleaning of Control lidar data ##########
####################################################
# Add column for months in lidar dataframe : dict_data_lidar_cleaned
for height_lidar in dict_data_lidar:
df_lidar = dict_data_lidar[height_lidar]
list_column_month_lidar = df_lidar["TimeObjectData"].astype(str).str[5:7]
df_column_month=pd.DataFrame({"Month":list_column_month_lidar})
df_lidar = df_lidar.join(df_column_month)
dict_data_lidar_cleaned[height_lidar]=df_lidar
####################################################
####### Importation of Control Quality data ########
####################################################
# loop importing CQ data for chosen filter height
# Save data inside a dictionary :
for str_file_n in glob.glob(str_pathDirectory_CQ + str_filesFilter):
with open(str_file_n,mode='r') as file:
int_posi_name = str(file).find("mmv1")
str_name_key = str(file)[int_posi_name+5:int_posi_name+19]
dict_data_CQ[str_name_key]= pd.read_csv(file, delimiter=';')
# Since there is no barometer at 80m, we add this entry to compare
# pressure on site with the lidar
for str_file_m in glob.glob(str_pathDirectory_CQ+"*mmv1*Baroh*"):
with open(str_file_m,mode='r') as file:
int_posi_name = str(file).find("mmv1")
str_name_key = str(file)[int_posi_name+5:int_posi_name+19]
dict_data_CQ[str_name_key]= pd.read_csv(file, delimiter=';')
####################################################
######### Cleaning of Control Quality data #########
####################################################
# Loop to clean data from the CQ
# Iteration over all captors in dict_data_CQ
for captor in dict_data_CQ:
# Assign data from captor in a dataframe
df_captor_CQ = dict_data_CQ[captor]
# Initialisation of variables needed to clean dataframe
array_columnTotalValue = np.zeros(len(df_captor_CQ))
array_columnIndexValue=np.arange(0,len(df_captor_CQ))
list_DateTime=[]
# Inside loop to determine which row delete from CQ according to RXXX codes
for column in list_dropColumn_CQ:
# Add 1 in the row where RXXX is true
array_columnTotalValue += df_captor_CQ[column].to_numpy()
# Loop over all timestamp to uniform Cq timestamps with lidar timestamps
for iter_timestamp in df_captor_CQ["Timestamp"]:
# Correct the timestamp in a new list : list_DateTime
list_DateTime.append(datetime.datetime.strptime(iter_timestamp[0:17].replace("-"," "),'%d %b %Y %H:%M'))
# Convert list_DateTime to a dataframe : df_timeObject
df_timeObject = pd.DataFrame(list_DateTime,columns=["TimeObjectData"])
# Add a new column in df_captor_CQ with the new timestamps
df_captor_CQ=df_captor_CQ.join(df_timeObject)
# Find index where a selected RXXX code is true
array_columnIndex = (array_columnTotalValue > np.zeros(len(df_captor_CQ)))*array_columnIndexValue
# Remove all rows where value is 0 to only keep none zero numbers
array_columnIndex = array_columnIndex[array_columnIndex != 0]
# Remove all rows of df_captor_CQ where a RXXX code is true and save new dataframe
df_captor_CQ_cleaned = df_captor_CQ.drop(array_columnIndex)
# Add a column Month in dataframe from timestamp object
list_column_month = df_captor_CQ_cleaned["TimeObjectData"].astype(str).str[5:7]
df_column_month=pd.DataFrame({"Month":list_column_month})
df_captor_CQ_cleaned = df_captor_CQ_cleaned.join(df_column_month)
# Assign cleaned dataframe in the dictionary : dict_data_CQ_cleaned
dict_data_CQ_cleaned[captor] = df_captor_CQ_cleaned
####################################################
############ save data in pickle file ##############
####################################################
# Save lidar entry in pickle file for each height
for lidar_entry in dict_data_lidar_cleaned:
df_save_output = dict_data_lidar_cleaned[lidar_entry]
df_save_output.to_pickle("savedFiles/"+lidar_entry+".pkl")
# Save CQ entry in pickle file for each captor
for captor_entry in dict_data_CQ_cleaned:
df_save_output = dict_data_CQ_cleaned[captor_entry]
df_save_output.to_pickle("savedFiles/"+captor_entry+".pkl")
#%%
"""
This section initializes the function to plot graphs from the cleaned files of the previous section
You can directly go to the next section to use it
"""
####################################################
############## Function to plot graphs #############
####################################################
# Function that plot a comparison between lidar and Met Mast value for
# - a specified variable
# -> Variable must be a string : Temp_int, Temp_ext, Pressure, Rel_hum, WiperCounts, Vbatt, Wdspd, Data_availability, Data_availability_2, wdspd_dis, CNR and Z-wind
# - specified months (in a list, ex.; ["08", "09"])
# - dictionary of captors from CQ
# - dictionary of lidar dataframe
# - list of heights of lidar in lidar dictionary (ex.: ["80","40"])
# - with ice detection from double anemometry (use INFO01)
def FunctionPlotGraphsIceLidar(Variable, Months, dictionary_CQ, dictionary_lidar, list_height_lidar, INFO="INFO01"):
# loop over all months to create a graph for each month
for iter_months in Months:
plt.figure(num=int(iter_months))
# loop over captors in dictionary of CQ Data
for captor in dictionary_CQ:
# Assign captor to a dataframe and only keep specified months
df_captor_inter = dictionary_CQ[captor]
df_captor = df_captor_inter[df_captor_inter["Month"] == iter_months]
int_iteration = 0
# loop over heigts in dictionary of lidar Data
for iter_heights in dictionary_lidar:
df_lidar = dictionary_lidar[iter_heights]
# Don't take into account empty dataframes
if df_lidar.size > 1:
df_lidar_month=df_lidar[df_lidar["Month"] == iter_months]
else:
print("Empty dataframe")
break
if Variable == "Temp_int":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month["Int Temp (°C)"].astype("float"),'.k',markersize=1, label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*10, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Internal temperature (°C) ", fontsize=13)
plt.yticks(np.arange(-5,30,5))
plt.ylim(-5,30)
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
# plt.show()
elif Variable == "Temp_ext":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month["Ext Temp (°C)"].astype("float"),'.k',markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("External temperature (°C)", fontsize=13)
plt.yticks(np.arange(-20,20,5))
plt.ylim(-20,20)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "Pressure":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month["Pressure (hPa)"].astype("float"),".k",markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Pressure (hPa)", fontsize=13)
plt.yticks(np.arange(900,1010,10))
plt.ylim(900,1010)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "Rel_hum":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month["Rel Humidity (%)"].astype("float"),".k", markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Relative humidity (%)", fontsize=13)
plt.yticks(np.arange(0,100,5))
plt.ylim(0,100)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "WiperCounts":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month["Wiper count"].astype("float"),".k",markersize=1,label="_nolegend_")
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*25, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Wiper counts", fontsize=13)
plt.yticks(np.arange(0,50,5))
plt.ylim(0,50)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "Vbatt":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month["Vbatt (V)"].astype("float"),".k",markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Voltage batterie (V)", fontsize=13)
plt.yticks(np.arange(0,30,5))
plt.ylim(0,30)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "Wdspd":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month[list_height_lidar[int_iteration]+"m Wind Speed (m/s)"].astype("float"),".k",markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Windspeed (m/s)", fontsize=13)
plt.yticks(np.arange(0,30,5))
plt.ylim(0,30)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "Data_availability":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month[list_height_lidar[int_iteration]+"m Data Availability (%)"].astype("float"),".k",markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*70, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Data availability (%)", fontsize=13)
plt.yticks(np.arange(0,125,5))
plt.ylim(0,125)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "Data_availability_2":
vector_availability = (df_lidar_month[list_height_lidar[int_iteration]+"m Data Availability (%)"].astype("float") < 20)*1
plt.plot(df_lidar_month['TimeObjectData'], vector_availability.replace({0:np.nan}),".k",markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Data availability below 20%", fontsize=13)
plt.yticks(np.arange(-45,20,5))
plt.ylim(-45,20)
plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "wdspd_dis":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month[list_height_lidar[int_iteration]+"m Wind Speed Dispersion (m/s)"].astype("float"),".k",markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Windspeed dispersion (m/s)", fontsize=13)
plt.yticks(np.arange(0,15,5))
plt.ylim(0,15)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "CNR":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month[list_height_lidar[int_iteration]+"m CNR (dB)"].astype("float"),'.k',markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("CNR (dB)", fontsize=13)
plt.yticks(np.arange(-45,25,5))
plt.ylim(-45,25)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
elif Variable == "Z-wind":
plt.plot(df_lidar_month['TimeObjectData'], df_lidar_month[list_height_lidar[int_iteration]+"m Z-wind (m/s)"].astype("float"),'.k',markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor[INFO].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Vertical wind (m/s)", fontsize=13)
plt.yticks(np.arange(-8,16,2))
plt.ylim(-8,16)
# plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
else:
print("Variable must be a string in: Temp_int, Temp_ext, Pressure, Rel_hum, WiperCounts, Vbatt, Wdspd, Data_availability, Data_availability_2, wdspd_dis, CNR and Z-wind")
break
return df_lidar_month
#%%
"""
We had problems on our hand with the met mast on the lidar. Consequently, we are not able to extract:
- Outside temperature
- Outisde Pressure
- Outise Relative Humidity
To obtain correlation from this 3 criteria, we used the data from the met mast at Nergica site
This section present an example of a simple correlation between this 3 criteria and ice accumulation
as determined by the double anemometry
"""
########################################################
########## Test for correlation with CQ Data ###########
########################################################
def FunctionPlotCorrelationCQandIceDetection(list_Months):
# Initialisation of dataframe with the right timestamps
df_test_correlation = df_timeObject
# Initialization of the column names
str_name=""
int_interation = 2
# loop over captors in dictionary of CQ to
for captor in dict_data_CQ_cleaned:
# if captor is a pressure captor
if captor.find("Baroh") >= 0:
str_name = "Pressure"
# Save information in dataframe : df_captor_CQ
df_captor_CQ = dict_data_CQ_cleaned[captor]
# Add column with df_captor_CQ in df_test_correlation
df_test_correlation = df_test_correlation.join(df_captor_CQ["Moyenne"],lsuffix=1)
# if captor is a temperature captor
elif captor.find("Temp") >= 0:
str_name="Temperature"
# Save information in dataframe : df_captor_CQ
df_captor_CQ=dict_data_CQ_cleaned[captor]
# Add column with df_captor_CQ in df_test_correlation
df_test_correlation=df_test_correlation.join(df_captor_CQ["Moyenne"],lsuffix=1)
# if captor is a humidity captor
elif captor.find("RHH") >= 0:
str_name="Humidity"
# Save information in dataframe : df_captor_CQ
df_captor_CQ=dict_data_CQ_cleaned[captor]
# Add column with df_captor_CQ in df_test_correlation
df_test_correlation=df_test_correlation.join(df_captor_CQ["Moyenne"],lsuffix=1)
df_test_correlation.rename(columns = {"Moyenne":str_name},inplace=True)
int_interation += 1
# drop empty columns
df_test_correlation = df_test_correlation.dropna(axis=1, how='all')
# Add a column month to de dataFrame df_test_correlation
list_column_month = df_test_correlation["TimeObjectData"].astype(str).str[5:7]
df_column_month=pd.DataFrame({"Month":list_column_month})
df_test_correlation=df_test_correlation.join(df_column_month)
# Test when humidity is over 90%
list_bool_Test_Humidity = df_test_correlation["Humidity"] > 90
# Test when temperature is over -5°C
list_bool_Test_Temperature_1 = df_test_correlation["Temperature"].astype("float") > -5.0
# # Test when temperature is under 5°C
list_bool_Test_Temperature_2 = df_test_correlation["Temperature"].astype("float") < 5.0
# # Test when pressure is under 970 hpa
list_bool_Test_Pressure = df_test_correlation["Pressure"].astype("float") < 980
# When all conditions are true
list_bool_Test_all = list_bool_Test_Humidity & list_bool_Test_Temperature_1 & list_bool_Test_Temperature_2 & list_bool_Test_Pressure
# # Add a column for when all conditions are respected
df_test_correlation=df_test_correlation.join(pd.DataFrame(list_bool_Test_all,columns=["Test_all"]))
# loop over specified months
for iter_months in list_Months:
plt.figure(num=int(iter_months)+100)
# Loop over all captors
for captor in dict_data_CQ_cleaned:
# Assign captor to a dataframe and only keep specified months
df_captor_inter = dict_data_CQ_cleaned[captor]
df_captor = df_captor_inter[df_captor_inter["Month"] == iter_months]
# Extract the month of the iteration from the correlation dataframe : df_test_correlation
df_test_correlation_month = df_test_correlation[df_test_correlation["Month"] ==iter_months]
# plot for each month in comparison to double anemometry
plt.plot(df_test_correlation_month['TimeObjectData'], df_test_correlation_month["Test_all"].replace({0:np.nan}).astype("float"),'.k',markersize=1,label='_nolegend_')
plt.plot(df_captor["TimeObjectData"], df_captor["INFO01"].replace({0:np.nan}).astype("float")*5, 'sb', label='ice detected')
plt.xlabel("Time", fontsize=13)
plt.ylabel("Correlation Test", fontsize=13)
plt.yticks(np.arange(-45,25,5))
plt.ylim(-45,25)
plt.yticks([])
plt.xticks(rotation=45)
plt.legend(["ice detected"])
plt.tight_layout()
plt.grid(True)
plt.show()
#%%
"""
This section shows a demonstration of how to use the previous sections
"""
##########################################################
########## Use of FunctionPlotGraphsIceLidar #############
##########################################################
Variable = "Temp_int"
#######################
# Choices of variable #
#######################
# Internal Temperature ("Temp_int")
# Outside Temperature ("Temp_ext")
# Pressure ("Pressure")
# Relative Humidity ("Rel_hum")
# Wiper counts ("WiperCounts")
# Volt battery ("Vbatt")
# WindSpeed ("Wdspd")
# Data availability ("Data_availability")
# Data availability under 20 % ("Data_availability_2")
# WindSpeed dispersion ("wdspd_dis")
# CNR ("CNR")
# vertical wind ("Z-wind")
list_Months= ["10", "11", "12"]
dictionary_CQ = dict_data_CQ_cleaned
dictionary_lidar = dict_data_lidar_cleaned
list_height_lidar = list_heights
FunctionPlotGraphsIceLidar(Variable, list_Months, dictionary_CQ, dictionary_lidar, list_height_lidar, INFO="INFO01")
FunctionPlotCorrelationCQandIceDetection(list_Months) | [
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"numpy.arange",
"pickle.load",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.ylim"... | [((5515, 5564), 'glob.glob', 'glob.glob', (['(str_pathDirectory_CQ + str_filesFilter)'], {}), '(str_pathDirectory_CQ + str_filesFilter)\n', (5524, 5564), False, 'import glob\n'), ((5931, 5979), 'glob.glob', 'glob.glob', (["(str_pathDirectory_CQ + '*mmv1*Baroh*')"], {}), "(str_pathDirectory_CQ + '*mmv1*Baroh*')\n", (5940, 5979), False, 'import glob\n'), ((5090, 5138), 'pandas.DataFrame', 'pd.DataFrame', (["{'Month': list_column_month_lidar}"], {}), "({'Month': list_column_month_lidar})\n", (5102, 5138), True, 'import pandas as pd\n'), ((7440, 7495), 'pandas.DataFrame', 'pd.DataFrame', (['list_DateTime'], {'columns': "['TimeObjectData']"}), "(list_DateTime, columns=['TimeObjectData'])\n", (7452, 7495), True, 'import pandas as pd\n'), ((8225, 8267), 'pandas.DataFrame', 'pd.DataFrame', (["{'Month': list_column_month}"], {}), "({'Month': list_column_month})\n", (8237, 8267), True, 'import pandas as pd\n'), ((23913, 23955), 'pandas.DataFrame', 'pd.DataFrame', (["{'Month': list_column_month}"], {}), "({'Month': list_column_month})\n", (23925, 23955), True, 'import pandas as pd\n'), ((26247, 26257), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26255, 26257), True, 'import matplotlib.pyplot as plt\n'), ((4546, 4563), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (4557, 4563), False, 'import pickle\n'), ((5764, 5796), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delimiter': '""";"""'}), "(file, delimiter=';')\n", (5775, 5796), True, 'import pandas as pd\n'), ((6177, 6209), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delimiter': '""";"""'}), "(file, delimiter=';')\n", (6188, 6209), True, 'import pandas as pd\n'), ((24821, 24875), 'pandas.DataFrame', 'pd.DataFrame', (['list_bool_Test_all'], {'columns': "['Test_all']"}), "(list_bool_Test_all, columns=['Test_all'])\n", (24833, 24875), True, 'import pandas as pd\n'), ((25911, 25942), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (25921, 25942), True, 'import matplotlib.pyplot as plt\n'), ((25956, 25999), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Correlation Test"""'], {'fontsize': '(13)'}), "('Correlation Test', fontsize=13)\n", (25966, 25999), True, 'import matplotlib.pyplot as plt\n'), ((26058, 26075), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-45)', '(25)'], {}), '(-45, 25)\n', (26066, 26075), True, 'import matplotlib.pyplot as plt\n'), ((26088, 26102), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (26098, 26102), True, 'import matplotlib.pyplot as plt\n'), ((26116, 26139), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (26126, 26139), True, 'import matplotlib.pyplot as plt\n'), ((26153, 26181), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (26163, 26181), True, 'import matplotlib.pyplot as plt\n'), ((26195, 26213), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26211, 26213), True, 'import matplotlib.pyplot as plt\n'), ((26227, 26241), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (26235, 26241), True, 'import matplotlib.pyplot as plt\n'), ((26024, 26045), 'numpy.arange', 'np.arange', (['(-45)', '(25)', '(5)'], {}), '(-45, 25, 5)\n', (26033, 26045), True, 'import numpy as np\n'), ((11423, 11454), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (11433, 11454), True, 'import matplotlib.pyplot as plt\n'), ((11476, 11529), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Internal temperature (°C) """'], {'fontsize': '(13)'}), "('Internal temperature (°C) ', fontsize=13)\n", (11486, 11529), True, 'import matplotlib.pyplot as plt\n'), ((11603, 11619), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5)', '(30)'], {}), '(-5, 30)\n', (11611, 11619), True, 'import matplotlib.pyplot as plt\n'), ((11640, 11663), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (11650, 11663), True, 'import matplotlib.pyplot as plt\n'), ((11685, 11713), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (11695, 11713), True, 'import matplotlib.pyplot as plt\n'), ((11735, 11753), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11751, 11753), True, 'import matplotlib.pyplot as plt\n'), ((11775, 11789), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (11783, 11789), True, 'import matplotlib.pyplot as plt\n'), ((11562, 11582), 'numpy.arange', 'np.arange', (['(-5)', '(30)', '(5)'], {}), '(-5, 30, 5)\n', (11571, 11582), True, 'import numpy as np\n'), ((12183, 12214), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (12193, 12214), True, 'import matplotlib.pyplot as plt\n'), ((12236, 12288), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""External temperature (°C)"""'], {'fontsize': '(13)'}), "('External temperature (°C)', fontsize=13)\n", (12246, 12288), True, 'import matplotlib.pyplot as plt\n'), ((12363, 12380), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-20)', '(20)'], {}), '(-20, 20)\n', (12371, 12380), True, 'import matplotlib.pyplot as plt\n'), ((12439, 12462), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (12449, 12462), True, 'import matplotlib.pyplot as plt\n'), ((12484, 12512), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (12494, 12512), True, 'import matplotlib.pyplot as plt\n'), ((12534, 12552), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12550, 12552), True, 'import matplotlib.pyplot as plt\n'), ((12574, 12588), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (12582, 12588), True, 'import matplotlib.pyplot as plt\n'), ((12610, 12620), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12618, 12620), True, 'import matplotlib.pyplot as plt\n'), ((12321, 12342), 'numpy.arange', 'np.arange', (['(-20)', '(20)', '(5)'], {}), '(-20, 20, 5)\n', (12330, 12342), True, 'import numpy as np\n'), ((12995, 13026), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (13005, 13026), True, 'import matplotlib.pyplot as plt\n'), ((13048, 13089), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {'fontsize': '(13)'}), "('Pressure (hPa)', fontsize=13)\n", (13058, 13089), True, 'import matplotlib.pyplot as plt\n'), ((13167, 13186), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(900)', '(1010)'], {}), '(900, 1010)\n', (13175, 13186), True, 'import matplotlib.pyplot as plt\n'), ((13245, 13268), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (13255, 13268), True, 'import matplotlib.pyplot as plt\n'), ((13290, 13318), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (13300, 13318), True, 'import matplotlib.pyplot as plt\n'), ((13340, 13358), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13356, 13358), True, 'import matplotlib.pyplot as plt\n'), ((13380, 13394), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (13388, 13394), True, 'import matplotlib.pyplot as plt\n'), ((13416, 13426), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13424, 13426), True, 'import matplotlib.pyplot as plt\n'), ((13122, 13146), 'numpy.arange', 'np.arange', (['(900)', '(1010)', '(10)'], {}), '(900, 1010, 10)\n', (13131, 13146), True, 'import numpy as np\n'), ((13821, 13852), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (13831, 13852), True, 'import matplotlib.pyplot as plt\n'), ((13874, 13922), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative humidity (%)"""'], {'fontsize': '(13)'}), "('Relative humidity (%)', fontsize=13)\n", (13884, 13922), True, 'import matplotlib.pyplot as plt\n'), ((13996, 14012), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(100)'], {}), '(0, 100)\n', (14004, 14012), True, 'import matplotlib.pyplot as plt\n'), ((14071, 14094), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (14081, 14094), True, 'import matplotlib.pyplot as plt\n'), ((14116, 14144), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (14126, 14144), True, 'import matplotlib.pyplot as plt\n'), ((14166, 14184), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14182, 14184), True, 'import matplotlib.pyplot as plt\n'), ((14206, 14220), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (14214, 14220), True, 'import matplotlib.pyplot as plt\n'), ((14242, 14252), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14250, 14252), True, 'import matplotlib.pyplot as plt\n'), ((13955, 13975), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(5)'], {}), '(0, 100, 5)\n', (13964, 13975), True, 'import numpy as np\n'), ((14631, 14662), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (14641, 14662), True, 'import matplotlib.pyplot as plt\n'), ((14684, 14723), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Wiper counts"""'], {'fontsize': '(13)'}), "('Wiper counts', fontsize=13)\n", (14694, 14723), True, 'import matplotlib.pyplot as plt\n'), ((14796, 14811), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(50)'], {}), '(0, 50)\n', (14804, 14811), True, 'import matplotlib.pyplot as plt\n'), ((14870, 14893), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (14880, 14893), True, 'import matplotlib.pyplot as plt\n'), ((14915, 14943), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (14925, 14943), True, 'import matplotlib.pyplot as plt\n'), ((14965, 14983), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14981, 14983), True, 'import matplotlib.pyplot as plt\n'), ((15005, 15019), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (15013, 15019), True, 'import matplotlib.pyplot as plt\n'), ((15041, 15051), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15049, 15051), True, 'import matplotlib.pyplot as plt\n'), ((14756, 14775), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(5)'], {}), '(0, 50, 5)\n', (14765, 14775), True, 'import numpy as np\n'), ((15405, 15436), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (15415, 15436), True, 'import matplotlib.pyplot as plt\n'), ((15458, 15505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Voltage batterie (V)"""'], {'fontsize': '(13)'}), "('Voltage batterie (V)', fontsize=13)\n", (15468, 15505), True, 'import matplotlib.pyplot as plt\n'), ((15578, 15593), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(30)'], {}), '(0, 30)\n', (15586, 15593), True, 'import matplotlib.pyplot as plt\n'), ((15652, 15675), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (15662, 15675), True, 'import matplotlib.pyplot as plt\n'), ((15697, 15725), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (15707, 15725), True, 'import matplotlib.pyplot as plt\n'), ((15747, 15765), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15763, 15765), True, 'import matplotlib.pyplot as plt\n'), ((15787, 15801), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (15795, 15801), True, 'import matplotlib.pyplot as plt\n'), ((15823, 15833), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15831, 15833), True, 'import matplotlib.pyplot as plt\n'), ((15538, 15557), 'numpy.arange', 'np.arange', (['(0)', '(30)', '(5)'], {}), '(0, 30, 5)\n', (15547, 15557), True, 'import numpy as np\n'), ((16276, 16307), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (16286, 16307), True, 'import matplotlib.pyplot as plt\n'), ((16329, 16371), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Windspeed (m/s)"""'], {'fontsize': '(13)'}), "('Windspeed (m/s)', fontsize=13)\n", (16339, 16371), True, 'import matplotlib.pyplot as plt\n'), ((16444, 16459), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(30)'], {}), '(0, 30)\n', (16452, 16459), True, 'import matplotlib.pyplot as plt\n'), ((16518, 16541), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (16528, 16541), True, 'import matplotlib.pyplot as plt\n'), ((16563, 16591), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (16573, 16591), True, 'import matplotlib.pyplot as plt\n'), ((16613, 16631), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16629, 16631), True, 'import matplotlib.pyplot as plt\n'), ((16653, 16667), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (16661, 16667), True, 'import matplotlib.pyplot as plt\n'), ((16689, 16699), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16697, 16699), True, 'import matplotlib.pyplot as plt\n'), ((16404, 16423), 'numpy.arange', 'np.arange', (['(0)', '(30)', '(5)'], {}), '(0, 30, 5)\n', (16413, 16423), True, 'import numpy as np\n'), ((17145, 17176), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (17155, 17176), True, 'import matplotlib.pyplot as plt\n'), ((17198, 17246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Data availability (%)"""'], {'fontsize': '(13)'}), "('Data availability (%)', fontsize=13)\n", (17208, 17246), True, 'import matplotlib.pyplot as plt\n'), ((17320, 17336), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(125)'], {}), '(0, 125)\n', (17328, 17336), True, 'import matplotlib.pyplot as plt\n'), ((17395, 17418), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (17405, 17418), True, 'import matplotlib.pyplot as plt\n'), ((17440, 17468), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (17450, 17468), True, 'import matplotlib.pyplot as plt\n'), ((17490, 17508), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17506, 17508), True, 'import matplotlib.pyplot as plt\n'), ((17530, 17544), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (17538, 17544), True, 'import matplotlib.pyplot as plt\n'), ((17566, 17576), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17574, 17576), True, 'import matplotlib.pyplot as plt\n'), ((17279, 17299), 'numpy.arange', 'np.arange', (['(0)', '(125)', '(5)'], {}), '(0, 125, 5)\n', (17288, 17299), True, 'import numpy as np\n'), ((18127, 18158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (18137, 18158), True, 'import matplotlib.pyplot as plt\n'), ((18180, 18234), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Data availability below 20%"""'], {'fontsize': '(13)'}), "('Data availability below 20%', fontsize=13)\n", (18190, 18234), True, 'import matplotlib.pyplot as plt\n'), ((18309, 18326), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-45)', '(20)'], {}), '(-45, 20)\n', (18317, 18326), True, 'import matplotlib.pyplot as plt\n'), ((18347, 18361), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (18357, 18361), True, 'import matplotlib.pyplot as plt\n'), ((18383, 18406), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (18393, 18406), True, 'import matplotlib.pyplot as plt\n'), ((18428, 18456), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (18438, 18456), True, 'import matplotlib.pyplot as plt\n'), ((18478, 18496), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18494, 18496), True, 'import matplotlib.pyplot as plt\n'), ((18518, 18532), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (18526, 18532), True, 'import matplotlib.pyplot as plt\n'), ((18554, 18564), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18562, 18564), True, 'import matplotlib.pyplot as plt\n'), ((18267, 18288), 'numpy.arange', 'np.arange', (['(-45)', '(20)', '(5)'], {}), '(-45, 20, 5)\n', (18276, 18288), True, 'import numpy as np\n'), ((19007, 19038), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (19017, 19038), True, 'import matplotlib.pyplot as plt\n'), ((19060, 19113), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Windspeed dispersion (m/s)"""'], {'fontsize': '(13)'}), "('Windspeed dispersion (m/s)', fontsize=13)\n", (19070, 19113), True, 'import matplotlib.pyplot as plt\n'), ((19186, 19201), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(15)'], {}), '(0, 15)\n', (19194, 19201), True, 'import matplotlib.pyplot as plt\n'), ((19260, 19283), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (19270, 19283), True, 'import matplotlib.pyplot as plt\n'), ((19305, 19333), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (19315, 19333), True, 'import matplotlib.pyplot as plt\n'), ((19355, 19373), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19371, 19373), True, 'import matplotlib.pyplot as plt\n'), ((19395, 19409), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (19403, 19409), True, 'import matplotlib.pyplot as plt\n'), ((19431, 19441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19439, 19441), True, 'import matplotlib.pyplot as plt\n'), ((19146, 19165), 'numpy.arange', 'np.arange', (['(0)', '(15)', '(5)'], {}), '(0, 15, 5)\n', (19155, 19165), True, 'import numpy as np\n'), ((19827, 19858), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (19837, 19858), True, 'import matplotlib.pyplot as plt\n'), ((19880, 19915), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CNR (dB)"""'], {'fontsize': '(13)'}), "('CNR (dB)', fontsize=13)\n", (19890, 19915), True, 'import matplotlib.pyplot as plt\n'), ((19990, 20007), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-45)', '(25)'], {}), '(-45, 25)\n', (19998, 20007), True, 'import matplotlib.pyplot as plt\n'), ((20066, 20089), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (20076, 20089), True, 'import matplotlib.pyplot as plt\n'), ((20111, 20139), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (20121, 20139), True, 'import matplotlib.pyplot as plt\n'), ((20161, 20179), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20177, 20179), True, 'import matplotlib.pyplot as plt\n'), ((20201, 20215), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (20209, 20215), True, 'import matplotlib.pyplot as plt\n'), ((20237, 20247), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20245, 20247), True, 'import matplotlib.pyplot as plt\n'), ((19948, 19969), 'numpy.arange', 'np.arange', (['(-45)', '(25)', '(5)'], {}), '(-45, 25, 5)\n', (19957, 19969), True, 'import numpy as np\n'), ((20640, 20671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(13)'}), "('Time', fontsize=13)\n", (20650, 20671), True, 'import matplotlib.pyplot as plt\n'), ((20693, 20739), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical wind (m/s)"""'], {'fontsize': '(13)'}), "('Vertical wind (m/s)', fontsize=13)\n", (20703, 20739), True, 'import matplotlib.pyplot as plt\n'), ((20813, 20829), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-8)', '(16)'], {}), '(-8, 16)\n', (20821, 20829), True, 'import matplotlib.pyplot as plt\n'), ((20888, 20911), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (20898, 20911), True, 'import matplotlib.pyplot as plt\n'), ((20933, 20961), 'matplotlib.pyplot.legend', 'plt.legend', (["['ice detected']"], {}), "(['ice detected'])\n", (20943, 20961), True, 'import matplotlib.pyplot as plt\n'), ((20983, 21001), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20999, 21001), True, 'import matplotlib.pyplot as plt\n'), ((21023, 21037), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (21031, 21037), True, 'import matplotlib.pyplot as plt\n'), ((21059, 21069), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21067, 21069), True, 'import matplotlib.pyplot as plt\n'), ((20772, 20792), 'numpy.arange', 'np.arange', (['(-8)', '(16)', '(2)'], {}), '(-8, 16, 2)\n', (20781, 20792), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import os
import pandas as pd
import geopandas as gpd
import numpy as np
import networkx as nx
def map_setup(n_communities, area, small_network=False):
# directory of data
areas = ['London', 'UK']
if area not in areas:
raise ValueError("Invalid area name. Expected one of: %s" % areas)
dir = '../connectivity_data_' + area + '/'
# poly: spatial boundairies of communities (just useful for visualisation)
poly = gpd.read_file(os.path.join(dir, 'poly_' + area +'.shp'))
# points: the spatial centroids of the communities (just useful for visualisation)
points = poly.copy()
points.geometry = points['geometry'].centroid
points.crs = poly.crs
# option: if small network, focus on westminster
if small_network:
points = points[points['msoa11nm'].str.contains('Westminster')][:n_communities].reset_index()
poly = poly[poly['msoa11nm'].str.contains('Westminster')][:n_communities].reset_index()
node_positions = points.apply(lambda x: np.array(x['geometry']), axis=1)
# make graph
G = nx.Graph()
G.add_nodes_from(points.index)
return G, node_positions, poly
def init_figure():
# We will plot the map and the connectivity graph.
fig, ax = plt.subplots(1, figsize=(15, 15))
# Turn the axes off.
plt.axis('off')
return fig, ax
def update_plot_parameters(time_index, n_communities, community_active_infections):
# set up parameters for plotting nodes
# node style
node_size = [community_active_infections[time_index][i]**3 for i in range(n_communities)]
linewidths = [.5*community_active_infections[time_index][i] for i in range(n_communities)]
return node_size, linewidths
def convert_lists_to_timeseries(list):
timeseries = np.array(list).T
timeseries = timeseries.astype('float')
timeseries[timeseries == 0] = np.nan
return timeseries
def plot_timeseries(n_communities, time, infection_timeseries, symptomatic_timeseries, poly):
fig, axs = plt.subplots(n_communities, 2, figsize=(10,10))
for c in range(n_communities):
axs[c, 0].plot(time, infection_timeseries[c],
color='red')
axs[c, 0].fill_between(time, 0, symptomatic_timeseries[c],
color='blue', alpha=.2)
axs[c, 0].set_ylim([0, np.nanmax(infection_timeseries)])
axs[c, 0].set_xlim([0, time[-1]])
poly.loc[c:c].plot(
alpha=0.2,
edgecolor='k',
linewidth=0.8,
ax=axs[c, 1])
axs[c, 1].set_axis_off()
fig.tight_layout()
return fig
# plt.plot(time, df_to_active(time, dataframe))
# for n in range(n_communities):
# plt.plot(time, df_to_active(time, dataframe[dataframe['community'] == n]))
def df_to_active(time, df):
t_isos = sorted(df['isolation time'])
t_infs = sorted(df['infection time'])
active = [0]
t_iso = t_isos.pop(0)
t_inf = t_infs.pop(0)
for t in range(1, int(time[-1]) + 1):
change = 0
while t > t_iso:
change -= 1
t_iso = t_isos.pop(0)
while t > t_inf:
change += 1
t_inf = t_infs.pop(0)
active.append(active[-1] + change)
return active
# plt.plot(time, df_to_active(time, dataframe))
# for n in range(n_communities):
# plt.plot(time, df_to_active(time, dataframe[dataframe['community'] == n]))
def df_to_symptomatic(time, df):
t_isos = sorted(df['isolation time'])
t_syms = sorted(df['symptomn onset time'])
active = [0]
t_iso = t_isos.pop(0)
t_sym = t_syms.pop(0)
for t in range(1, int(time[-1]) + 1):
change = 0
while t > t_iso:
change -= 1
t_iso = t_isos.pop(0)
while t > t_sym:
change += 1
t_sym = t_syms.pop(0)
active.append(active[-1] + change)
return active
# plt.plot(time, df_to_Reff(time, dataframe))
def df_to_Reff(time, df):
df_sorted = df.sort_values(by='infection time')
t_infs = list(df_sorted['infection time'])
parent_ids = list(df_sorted['parent id'])
children = [0] # zero infected at t=0
counted_parent_ids = {None} # ids of parents who have infected anyone at t=0
parents = [1] # number of parents who have infected anyone at t=0
t_inf, parent_id = t_infs.pop(0), parent_ids.pop(0) # get next row
for t in range(1, int(time[-1]) + 1):
c_change = 0
while t > t_inf:
c_change += 1 # increment children
counted_parent_ids.add(parent_id) # increment parents if unique
t_inf, parent_id = t_infs.pop(0), parent_ids.pop(0) # get next row
children.append(children[-1] + c_change)
parents.append(len(counted_parent_ids))
Reff = [cp[0] / cp[1] for cp in zip(children, parents)]
return Reff
| [
"os.path.join",
"networkx.Graph",
"numpy.array",
"numpy.nanmax",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots"
] | [((1124, 1134), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1132, 1134), True, 'import networkx as nx\n'), ((1299, 1332), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(15, 15)'}), '(1, figsize=(15, 15))\n', (1311, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1378), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1371, 1378), True, 'import matplotlib.pyplot as plt\n'), ((2071, 2119), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_communities', '(2)'], {'figsize': '(10, 10)'}), '(n_communities, 2, figsize=(10, 10))\n', (2083, 2119), True, 'import matplotlib.pyplot as plt\n'), ((502, 544), 'os.path.join', 'os.path.join', (['dir', "('poly_' + area + '.shp')"], {}), "(dir, 'poly_' + area + '.shp')\n", (514, 544), False, 'import os\n'), ((1837, 1851), 'numpy.array', 'np.array', (['list'], {}), '(list)\n', (1845, 1851), True, 'import numpy as np\n'), ((1061, 1084), 'numpy.array', 'np.array', (["x['geometry']"], {}), "(x['geometry'])\n", (1069, 1084), True, 'import numpy as np\n'), ((2384, 2415), 'numpy.nanmax', 'np.nanmax', (['infection_timeseries'], {}), '(infection_timeseries)\n', (2393, 2415), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Example for computing the RIR between several sources and receivers in GPU.
@author: <EMAIL>
pyrirgen: https://github.com/phecda-xu/RIR-Generator
"""
import numpy as np
import soundfile as sf
import math
import pyrirgen
import argparse
import os
from multiprocessing import Process
def run(output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
c = 340
fs=16000 # Sampling frequency [Hz]
num_room = 80
utt_per_room = 8
room_x = 10
room_y = 10
room_z = 3
nb_src = 3 # Number of sources
nb_rcv = 8 # Number of receivers
for i in range(num_room):
x = np.random.uniform(3, room_x)
y = np.random.uniform(3, room_y)
z = room_z
for j in range(utt_per_room):
mic_distance = 0.05
room_sz = [x, y, z] # Size of the room [m]
pos_src1 = [np.random.uniform(0, x),np.random.uniform(0, y),np.random.uniform(1.2, 1.9)]
pos_src2 = [np.random.uniform(0, x),np.random.uniform(0, y),np.random.uniform(1.2, 1.9)]
pos_src3 = [np.random.uniform(0, x),np.random.uniform(0, y),np.random.uniform(0, z)] # Positions of the sources ([m]
mic_middle_point = [np.random.uniform(x/2-1.0, x/2+1.0),
np.random.uniform(y/2-1.0, y/2+1.0),
np.random.uniform(0.6, 1.2)]
baseangle = 0.125*np.pi
#pos_rcv = [[mic_middle_point[0]+mic_distance*np.cos(16*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*16), mic_middle_point[2]],
# [mic_middle_point[0]+mic_distance*np.cos(14*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*14), mic_middle_point[2]],
# [mic_middle_point[0]+mic_distance*np.cos(12*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*12), mic_middle_point[2]],
# [mic_middle_point[0]+mic_distance*np.cos(10*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*10), mic_middle_point[2]],
# [mic_middle_point[0]+mic_distance*np.cos(8*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*8), mic_middle_point[2]],
# [mic_middle_point[0]+mic_distance*np.cos(6*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*6), mic_middle_point[2]],
# [mic_middle_point[0]+mic_distance*np.cos(4*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*4), mic_middle_point[2]],
# [mic_middle_point[0]+mic_distance*np.cos(2*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*2), mic_middle_point[2]],]
pos_rcv = [[mic_middle_point[0]+mic_distance*np.cos(16*baseangle), mic_middle_point[1]+mic_distance*np.sin(baseangle*16), mic_middle_point[2]],]
mic_pattern = "omnidirectional" # Receiver polar pattern
T60 = np.random.uniform(0.2, 0.8) # Time for the RIR to reach 60dB of attenuation [s]
RIRs1 = pyrirgen.generateRir(room_sz, pos_src1, pos_rcv, soundVelocity=c, fs=fs, reverbTime = T60, nSamples = 8000,
micType = mic_pattern, nOrder=-1, nDim=3, isHighPassFilter=True) #source * mic * time
RIRs2 = pyrirgen.generateRir(room_sz, pos_src2, pos_rcv, soundVelocity=c, fs=fs, reverbTime = T60, nSamples = 8000,
micType = mic_pattern, nOrder=-1, nDim=3, isHighPassFilter=True) #source * mic * time
RIRs3 = pyrirgen.generateRir(room_sz, pos_src3, pos_rcv, soundVelocity=c, fs=fs, reverbTime = T60, nSamples = 8000,
micType = mic_pattern, nOrder=-1, nDim=3, isHighPassFilter=True) #source * mic * time
RIRs1=np.array(RIRs1)
RIRs2=np.array(RIRs2)
RIRs3=np.array(RIRs3)
#out = np.zeros([24, 8000])
#out[0:8]= RIRs1
#out[8:16]= RIRs2
#out[16:24]= RIRs3
out = np.zeros([3, 8000])
out[0:1]= RIRs1
out[1:2]= RIRs2
out[2:3]= RIRs3
out = out.transpose(1,0)
pos_src = np.array(pos_src1)
pos_src2 = np.array(pos_src2)
pos_src3 = np.array(pos_src3)
pos_src = np.stack((pos_src1, pos_src2, pos_src3), axis=0)
mic_middle_point = np.array(mic_middle_point)
distance = np.zeros(nb_src)
angle = np.zeros(nb_src)
for k in range(nb_src):
# distance[k] = np.linalg.norm(pos_src[k]-mic_middle_point)
distance[k] = np.sqrt((pos_src[k][0]-mic_middle_point[0])**2 + (pos_src[k][1]-mic_middle_point[1])**2)
angle[k] = np.arctan2((pos_src[k][1]-mic_middle_point[1]), (pos_src[k][0]-mic_middle_point[0]))
angle[k] = math.degrees(angle[k])
if angle[k]<0:
angle[k] = 360-np.abs(angle[k])
matrix_1m = np.array([0.5, 0.5, 0.5])
matrix_5m = np.array([5.0, 5.0, 5.0])
if (distance>matrix_1m).all() and (distance<matrix_5m).all() and abs(angle[1] - angle[0])>20.0 and abs(angle[2] - angle[0])>20.0 and abs(angle[2] - angle[1])>20.0:
wav_name = args.output_dir+'/' + \
('%.2f' % x) + '_' + ('%.2f' % y) + '_' + ('%.2f' % z) + '_' + \
('%.2f' % distance[0]) + '_' + ('%.2f' % distance[1]) + '_' + \
('%.4f' % angle[0]) + '_' + ('%.4f' % angle[1])+ '_' + ('%.4f' % T60) + '.wav'
sf.write(wav_name,out,16000)
if __name__ == '__main__':
num_process = 50
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir",
type=str,
help="output_dir",
default="rir")
args = parser.parse_args()
p_lst = []
for i in range(num_process):
p = Process(target=run, args=(args.output_dir,))
p.start()
p_lst.append(p)
| [
"os.path.exists",
"numpy.abs",
"numpy.sqrt",
"os.makedirs",
"argparse.ArgumentParser",
"multiprocessing.Process",
"math.degrees",
"soundfile.write",
"pyrirgen.generateRir",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"numpy.arctan2",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin"
] | [((5909, 5934), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5932, 5934), False, 'import argparse\n'), ((383, 409), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (397, 409), False, 'import os\n'), ((420, 443), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (431, 443), False, 'import os\n'), ((709, 737), 'numpy.random.uniform', 'np.random.uniform', (['(3)', 'room_x'], {}), '(3, room_x)\n', (726, 737), True, 'import numpy as np\n'), ((751, 779), 'numpy.random.uniform', 'np.random.uniform', (['(3)', 'room_y'], {}), '(3, room_y)\n', (768, 779), True, 'import numpy as np\n'), ((6190, 6234), 'multiprocessing.Process', 'Process', ([], {'target': 'run', 'args': '(args.output_dir,)'}), '(target=run, args=(args.output_dir,))\n', (6197, 6234), False, 'from multiprocessing import Process\n'), ((3063, 3090), 'numpy.random.uniform', 'np.random.uniform', (['(0.2)', '(0.8)'], {}), '(0.2, 0.8)\n', (3080, 3090), True, 'import numpy as np\n'), ((3169, 3343), 'pyrirgen.generateRir', 'pyrirgen.generateRir', (['room_sz', 'pos_src1', 'pos_rcv'], {'soundVelocity': 'c', 'fs': 'fs', 'reverbTime': 'T60', 'nSamples': '(8000)', 'micType': 'mic_pattern', 'nOrder': '(-1)', 'nDim': '(3)', 'isHighPassFilter': '(True)'}), '(room_sz, pos_src1, pos_rcv, soundVelocity=c, fs=fs,\n reverbTime=T60, nSamples=8000, micType=mic_pattern, nOrder=-1, nDim=3,\n isHighPassFilter=True)\n', (3189, 3343), False, 'import pyrirgen\n'), ((3426, 3600), 'pyrirgen.generateRir', 'pyrirgen.generateRir', (['room_sz', 'pos_src2', 'pos_rcv'], {'soundVelocity': 'c', 'fs': 'fs', 'reverbTime': 'T60', 'nSamples': '(8000)', 'micType': 'mic_pattern', 'nOrder': '(-1)', 'nDim': '(3)', 'isHighPassFilter': '(True)'}), '(room_sz, pos_src2, pos_rcv, soundVelocity=c, fs=fs,\n reverbTime=T60, nSamples=8000, micType=mic_pattern, nOrder=-1, nDim=3,\n isHighPassFilter=True)\n', (3446, 3600), False, 'import pyrirgen\n'), ((3683, 3857), 'pyrirgen.generateRir', 'pyrirgen.generateRir', (['room_sz', 'pos_src3', 'pos_rcv'], {'soundVelocity': 'c', 'fs': 'fs', 'reverbTime': 'T60', 'nSamples': '(8000)', 'micType': 'mic_pattern', 'nOrder': '(-1)', 'nDim': '(3)', 'isHighPassFilter': '(True)'}), '(room_sz, pos_src3, pos_rcv, soundVelocity=c, fs=fs,\n reverbTime=T60, nSamples=8000, micType=mic_pattern, nOrder=-1, nDim=3,\n isHighPassFilter=True)\n', (3703, 3857), False, 'import pyrirgen\n'), ((3938, 3953), 'numpy.array', 'np.array', (['RIRs1'], {}), '(RIRs1)\n', (3946, 3953), True, 'import numpy as np\n'), ((3973, 3988), 'numpy.array', 'np.array', (['RIRs2'], {}), '(RIRs2)\n', (3981, 3988), True, 'import numpy as np\n'), ((4008, 4023), 'numpy.array', 'np.array', (['RIRs3'], {}), '(RIRs3)\n', (4016, 4023), True, 'import numpy as np\n'), ((4195, 4214), 'numpy.zeros', 'np.zeros', (['[3, 8000]'], {}), '([3, 8000])\n', (4203, 4214), True, 'import numpy as np\n'), ((4363, 4381), 'numpy.array', 'np.array', (['pos_src1'], {}), '(pos_src1)\n', (4371, 4381), True, 'import numpy as np\n'), ((4406, 4424), 'numpy.array', 'np.array', (['pos_src2'], {}), '(pos_src2)\n', (4414, 4424), True, 'import numpy as np\n'), ((4449, 4467), 'numpy.array', 'np.array', (['pos_src3'], {}), '(pos_src3)\n', (4457, 4467), True, 'import numpy as np\n'), ((4491, 4539), 'numpy.stack', 'np.stack', (['(pos_src1, pos_src2, pos_src3)'], {'axis': '(0)'}), '((pos_src1, pos_src2, pos_src3), axis=0)\n', (4499, 4539), True, 'import numpy as np\n'), ((4574, 4600), 'numpy.array', 'np.array', (['mic_middle_point'], {}), '(mic_middle_point)\n', (4582, 4600), True, 'import numpy as np\n'), ((4625, 4641), 'numpy.zeros', 'np.zeros', (['nb_src'], {}), '(nb_src)\n', (4633, 4641), True, 'import numpy as np\n'), ((4663, 4679), 'numpy.zeros', 'np.zeros', (['nb_src'], {}), '(nb_src)\n', (4671, 4679), True, 'import numpy as np\n'), ((5199, 5224), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (5207, 5224), True, 'import numpy as np\n'), ((5250, 5275), 'numpy.array', 'np.array', (['[5.0, 5.0, 5.0]'], {}), '([5.0, 5.0, 5.0])\n', (5258, 5275), True, 'import numpy as np\n'), ((954, 977), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'x'], {}), '(0, x)\n', (971, 977), True, 'import numpy as np\n'), ((978, 1001), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'y'], {}), '(0, y)\n', (995, 1001), True, 'import numpy as np\n'), ((1002, 1029), 'numpy.random.uniform', 'np.random.uniform', (['(1.2)', '(1.9)'], {}), '(1.2, 1.9)\n', (1019, 1029), True, 'import numpy as np\n'), ((1056, 1079), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'x'], {}), '(0, x)\n', (1073, 1079), True, 'import numpy as np\n'), ((1080, 1103), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'y'], {}), '(0, y)\n', (1097, 1103), True, 'import numpy as np\n'), ((1104, 1131), 'numpy.random.uniform', 'np.random.uniform', (['(1.2)', '(1.9)'], {}), '(1.2, 1.9)\n', (1121, 1131), True, 'import numpy as np\n'), ((1158, 1181), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'x'], {}), '(0, x)\n', (1175, 1181), True, 'import numpy as np\n'), ((1182, 1205), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'y'], {}), '(0, y)\n', (1199, 1205), True, 'import numpy as np\n'), ((1206, 1229), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'z'], {}), '(0, z)\n', (1223, 1229), True, 'import numpy as np\n'), ((1310, 1353), 'numpy.random.uniform', 'np.random.uniform', (['(x / 2 - 1.0)', '(x / 2 + 1.0)'], {}), '(x / 2 - 1.0, x / 2 + 1.0)\n', (1327, 1353), True, 'import numpy as np\n'), ((1380, 1423), 'numpy.random.uniform', 'np.random.uniform', (['(y / 2 - 1.0)', '(y / 2 + 1.0)'], {}), '(y / 2 - 1.0, y / 2 + 1.0)\n', (1397, 1423), True, 'import numpy as np\n'), ((1450, 1477), 'numpy.random.uniform', 'np.random.uniform', (['(0.6)', '(1.2)'], {}), '(0.6, 1.2)\n', (1467, 1477), True, 'import numpy as np\n'), ((4839, 4939), 'numpy.sqrt', 'np.sqrt', (['((pos_src[k][0] - mic_middle_point[0]) ** 2 + (pos_src[k][1] -\n mic_middle_point[1]) ** 2)'], {}), '((pos_src[k][0] - mic_middle_point[0]) ** 2 + (pos_src[k][1] -\n mic_middle_point[1]) ** 2)\n', (4846, 4939), True, 'import numpy as np\n'), ((4956, 5044), 'numpy.arctan2', 'np.arctan2', (['(pos_src[k][1] - mic_middle_point[1])', '(pos_src[k][0] - mic_middle_point[0])'], {}), '(pos_src[k][1] - mic_middle_point[1], pos_src[k][0] -\n mic_middle_point[0])\n', (4966, 5044), True, 'import numpy as np\n'), ((5069, 5091), 'math.degrees', 'math.degrees', (['angle[k]'], {}), '(angle[k])\n', (5081, 5091), False, 'import math\n'), ((5814, 5844), 'soundfile.write', 'sf.write', (['wav_name', 'out', '(16000)'], {}), '(wav_name, out, 16000)\n', (5822, 5844), True, 'import soundfile as sf\n'), ((5157, 5173), 'numpy.abs', 'np.abs', (['angle[k]'], {}), '(angle[k])\n', (5163, 5173), True, 'import numpy as np\n'), ((2847, 2869), 'numpy.cos', 'np.cos', (['(16 * baseangle)'], {}), '(16 * baseangle)\n', (2853, 2869), True, 'import numpy as np\n'), ((2902, 2924), 'numpy.sin', 'np.sin', (['(baseangle * 16)'], {}), '(baseangle * 16)\n', (2908, 2924), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import os
import tempfile
import numpy as np
import pandas as pd
from mock import Mock
from tests.fixtures import DataTestCase
from tsfresh.feature_extraction.extraction import extract_features
from tsfresh.feature_extraction.settings import ComprehensiveFCParameters, PickeableSettings
from tsfresh.utilities.distribution import IterableDistributorBaseClass, MapDistributor
class ExtractionTestCase(DataTestCase):
"""The unit tests in this module make sure if the time series features are created properly"""
def setUp(self):
self.n_jobs = 1
self.directory = tempfile.gettempdir()
def test_extract_features(self):
# todo: implement more methods and test more aspects
df = self.create_test_data_sample()
extracted_features = extract_features(df, column_id="id", column_sort="sort",
column_kind="kind", column_value="val",
n_jobs=self.n_jobs)
self.assertIsInstance(extracted_features, pd.DataFrame)
self.assertTrue(np.all(extracted_features.a__maximum == np.array([71, 77])))
self.assertTrue(np.all(extracted_features.a__sum_values == np.array([691, 1017])))
self.assertTrue(np.all(extracted_features.a__abs_energy == np.array([32211, 63167])))
self.assertTrue(np.all(extracted_features.b__sum_values == np.array([757, 695])))
self.assertTrue(np.all(extracted_features.b__minimum == np.array([3, 1])))
self.assertTrue(np.all(extracted_features.b__abs_energy == np.array([36619, 35483])))
self.assertTrue(np.all(extracted_features.b__mean == np.array([37.85, 34.75])))
self.assertTrue(np.all(extracted_features.b__median == np.array([39.5, 28.0])))
df_sts = self.create_one_valued_time_series()
extracted_features_sts = extract_features(df_sts, column_id="id", column_sort="sort",
column_kind="kind", column_value="val",
n_jobs=self.n_jobs)
self.assertIsInstance(extracted_features_sts, pd.DataFrame)
self.assertTrue(np.all(extracted_features_sts.a__maximum == np.array([1.0, 6.0])))
self.assertTrue(np.all(extracted_features_sts.a__sum_values == np.array([1.0, 11.0])))
self.assertTrue(np.all(extracted_features_sts.a__count_above_mean == np.array([0, 1])))
def test_extract_features_uses_only_kind_to_fc_settings(self):
df = self.create_test_data_sample()
extracted_features = extract_features(df, column_id="id", column_sort="sort", column_kind="kind",
column_value="val", n_jobs=self.n_jobs,
kind_to_fc_parameters={"a": {"maximum": None, "minimum": None}})
assert len(extracted_features) == 2
def test_extract_features_for_one_time_series(self):
# todo: implement more methods and test more aspects
df = self.create_test_data_sample()
settings = ComprehensiveFCParameters()
extracted_features = extract_features(df, default_fc_parameters=settings,
column_value="val", column_id="id",
column_kind="kind", column_sort="sort")
self.assertIsInstance(extracted_features, pd.DataFrame)
self.assertTrue(np.all(extracted_features.b__sum_values == np.array([757, 695])))
self.assertTrue(np.all(extracted_features.b__minimum == np.array([3, 1])))
self.assertTrue(np.all(extracted_features.b__abs_energy == np.array([36619, 35483])))
self.assertTrue(np.all(extracted_features.b__mean == np.array([37.85, 34.75])))
self.assertTrue(np.all(extracted_features.b__median == np.array([39.5, 28.0])))
df_sts = self.create_one_valued_time_series()
extracted_features_sts = extract_features(df_sts, default_fc_parameters=settings,
column_value="val", column_id="id",
column_kind="kind", column_sort="sort")
self.assertIsInstance(extracted_features_sts, pd.DataFrame)
self.assertTrue(np.all(extracted_features_sts.a__maximum == np.array([1.0, 6.0])))
self.assertTrue(np.all(extracted_features_sts.a__sum_values == np.array([1.0, 11.0])))
self.assertTrue(np.all(extracted_features_sts.a__count_above_mean == np.array([0, 1])))
def test_extract_features_for_index_based_functions(self):
df = self.create_test_data_sample_with_time_index()
settings = {
'linear_trend_timewise': [{"attr": "slope"}],
'linear_trend': [{"attr": "slope"}]
}
extracted_features = extract_features(df, default_fc_parameters=settings,
column_value="val", column_id="id",
column_kind="kind",
column_sort="sort")
self.assertIsInstance(extracted_features, pd.DataFrame)
slope_a = extracted_features['a__linear_trend_timewise__attr_"slope"'].values
slope_b = extracted_features['b__linear_trend_timewise__attr_"slope"'].values
self.assertAlmostEqual(slope_a[0], -0.001347117)
self.assertAlmostEqual(slope_a[1], 0.052036340)
self.assertAlmostEqual(slope_b[0], 0.021898496)
self.assertAlmostEqual(slope_b[1], -0.012312)
# Test that the index of the returned df is the ID and not the timestamp
self.assertTrue(extracted_features.index.dtype != df.index.dtype)
self.assertTrue(extracted_features.index.dtype == df['id'].dtype)
self.assertEqual(
sorted(extracted_features.index.unique().tolist()), sorted(df['id'].unique().tolist())
)
def test_extract_features_custom_function(self):
df = self.create_test_data_sample()
def custom_function(x, p):
return len(x) + p
settings = PickeableSettings({
'mean': None,
custom_function: [{"p": 1}, {"p": -1}],
})
extracted_features = extract_features(df, default_fc_parameters=settings,
column_value="val", column_id="id",
column_kind="kind",
column_sort="sort")
self.assertIsInstance(extracted_features, pd.DataFrame)
mean_a = extracted_features['a__mean'].values
custom_function_a_1 = extracted_features['a__custom_function__p_1'].values
custom_function_a_m1 = extracted_features['a__custom_function__p_-1'].values
self.assertAlmostEqual(mean_a[0], 34.55)
self.assertAlmostEqual(mean_a[1], 50.85)
self.assertAlmostEqual(custom_function_a_1[0], 21)
self.assertAlmostEqual(custom_function_a_1[1], 21)
self.assertAlmostEqual(custom_function_a_m1[0], 19)
self.assertAlmostEqual(custom_function_a_m1[1], 19)
def test_extract_features_after_randomisation(self):
df = self.create_test_data_sample()
df_random = df.copy().sample(frac=1)
extracted_features = extract_features(df, column_id="id", column_sort="sort",
column_kind="kind",
column_value="val",
n_jobs=self.n_jobs).sort_index()
extracted_features_from_random = extract_features(df_random, column_id="id",
column_sort="sort",
column_kind="kind",
column_value="val",
n_jobs=self.n_jobs).sort_index()
self.assertCountEqual(extracted_features.columns,
extracted_features_from_random.columns)
for col in extracted_features:
self.assertIsNone(np.testing.assert_array_almost_equal(extracted_features[col],
extracted_features_from_random[
col]))
def test_profiling_file_written_out(self):
df = pd.DataFrame(data={"id": np.repeat([1, 2], 10), "val": np.random.normal(0, 1, 20)})
profiling_filename = os.path.join(self.directory, "test_profiling.txt")
X = extract_features(df, column_id="id", column_value="val", n_jobs=self.n_jobs,
profile=True, profiling_filename=profiling_filename)
self.assertTrue(os.path.isfile(profiling_filename))
os.remove(profiling_filename)
def test_profiling_cumulative_file_written_out(self):
PROFILING_FILENAME = os.path.join(self.directory, "test_profiling_cumulative.txt")
PROFILING_SORTING = "cumulative"
df = pd.DataFrame(data={"id": np.repeat([1, 2], 10), "val": np.random.normal(0, 1, 20)})
extract_features(df, column_id="id", column_value="val", n_jobs=self.n_jobs,
profile=True, profiling_filename=PROFILING_FILENAME,
profiling_sorting=PROFILING_SORTING)
self.assertTrue(os.path.isfile(PROFILING_FILENAME))
os.remove(PROFILING_FILENAME)
def test_extract_features_without_settings(self):
df = pd.DataFrame(data={"id": np.repeat([1, 2], 10),
"value1": np.random.normal(0, 1, 20),
"value2": np.random.normal(0, 1, 20)})
X = extract_features(df, column_id="id",
n_jobs=self.n_jobs)
self.assertIn("value1__maximum", list(X.columns))
self.assertIn("value2__maximum", list(X.columns))
def test_extract_features_with_and_without_parallelization(self):
df = self.create_test_data_sample()
features_parallel = extract_features(df, column_id="id", column_sort="sort",
column_kind="kind", column_value="val",
n_jobs=2)
features_serial = extract_features(df, column_id="id", column_sort="sort",
column_kind="kind", column_value="val",
n_jobs=0)
self.assertCountEqual(features_parallel.columns, features_serial.columns)
for col in features_parallel.columns:
np.testing.assert_array_almost_equal(features_parallel[col], features_serial[col])
def test_extract_index_preservation(self):
df = self.create_test_data_nearly_numerical_indices()
extracted_features = extract_features(df, column_id="id", column_sort="sort",
column_kind="kind", column_value="val",
n_jobs=self.n_jobs)
self.assertIsInstance(extracted_features, pd.DataFrame)
self.assertEqual(set(df["id"]), set(extracted_features.index))
def test_extract_features_alphabetically_sorted(self):
df = self.create_test_data_sample()
features = extract_features(df, column_id="id", column_sort="sort",
column_kind="kind", column_value="val")
for col_name in features.columns:
# split out the configuration of the features calculator
col_name_chunks = col_name.split("__")
# the name is always at the beginning, so remove it. Also remove the kind of the column
col_name_chunks = col_name_chunks[2:]
self.assertEqual(col_name_chunks, list(sorted(col_name_chunks)))
class ParallelExtractionTestCase(DataTestCase):
def setUp(self):
self.n_jobs = 2
# only calculate some features to reduce load on travis ci
self.name_to_param = {"maximum": None,
"sum_values": None,
"abs_energy": None,
"minimum": None,
"mean": None,
"median": None}
def test_extract_features(self):
# todo: implement more methods and test more aspects
df = self.create_test_data_sample()
extracted_features = extract_features(df, column_id="id", column_sort="sort",
column_kind="kind",
column_value="val",
n_jobs=self.n_jobs)
self.assertIsInstance(extracted_features, pd.DataFrame)
self.assertTrue(np.all(extracted_features.a__maximum == np.array([71, 77])))
self.assertTrue(np.all(extracted_features.a__sum_values == np.array([691, 1017])))
self.assertTrue(np.all(extracted_features.a__abs_energy == np.array([32211, 63167])))
self.assertTrue(np.all(extracted_features.b__sum_values == np.array([757, 695])))
self.assertTrue(np.all(extracted_features.b__minimum == np.array([3, 1])))
self.assertTrue(np.all(extracted_features.b__abs_energy == np.array([36619, 35483])))
self.assertTrue(np.all(extracted_features.b__mean == np.array([37.85, 34.75])))
self.assertTrue(np.all(extracted_features.b__median == np.array([39.5, 28.0])))
class DistributorUsageTestCase(DataTestCase):
def setUp(self):
# only calculate some features to reduce load on travis ci
self.name_to_param = {"maximum": None}
def test_distributor_map_reduce_is_called(self):
df = self.create_test_data_sample()
mock = Mock(spec=IterableDistributorBaseClass)
mock.close.return_value = None
mock.map_reduce.return_value = []
X = extract_features(timeseries_container=df, column_id="id", column_sort="sort",
column_kind="kind", column_value="val",
default_fc_parameters=self.name_to_param, distributor=mock)
self.assertTrue(mock.map_reduce.called)
def test_distributor_close_is_called(self):
df = self.create_test_data_sample()
mock = MapDistributor()
mock.close = Mock()
mock.close.return_value = None
X = extract_features(timeseries_container=df, column_id="id", column_sort="sort",
column_kind="kind", column_value="val",
default_fc_parameters=self.name_to_param, distributor=mock)
self.assertTrue(mock.close.called)
| [
"numpy.random.normal",
"numpy.testing.assert_array_almost_equal",
"numpy.repeat",
"tsfresh.feature_extraction.settings.ComprehensiveFCParameters",
"mock.Mock",
"os.path.join",
"tsfresh.feature_extraction.settings.PickeableSettings",
"os.path.isfile",
"numpy.array",
"tempfile.gettempdir",
"tsfres... | [((763, 784), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (782, 784), False, 'import tempfile\n'), ((957, 1077), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs'}), "(df, column_id='id', column_sort='sort', column_kind='kind',\n column_value='val', n_jobs=self.n_jobs)\n", (973, 1077), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((2031, 2156), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df_sts'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs'}), "(df_sts, column_id='id', column_sort='sort', column_kind=\n 'kind', column_value='val', n_jobs=self.n_jobs)\n", (2047, 2156), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((2744, 2934), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs', 'kind_to_fc_parameters': "{'a': {'maximum': None, 'minimum': None}}"}), "(df, column_id='id', column_sort='sort', column_kind='kind',\n column_value='val', n_jobs=self.n_jobs, kind_to_fc_parameters={'a': {\n 'maximum': None, 'minimum': None}})\n", (2760, 2934), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((3244, 3271), 'tsfresh.feature_extraction.settings.ComprehensiveFCParameters', 'ComprehensiveFCParameters', ([], {}), '()\n', (3269, 3271), False, 'from tsfresh.feature_extraction.settings import ComprehensiveFCParameters, PickeableSettings\n'), ((3302, 3434), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'default_fc_parameters': 'settings', 'column_value': '"""val"""', 'column_id': '"""id"""', 'column_kind': '"""kind"""', 'column_sort': '"""sort"""'}), "(df, default_fc_parameters=settings, column_value='val',\n column_id='id', column_kind='kind', column_sort='sort')\n", (3318, 3434), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((4119, 4255), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df_sts'], {'default_fc_parameters': 'settings', 'column_value': '"""val"""', 'column_id': '"""id"""', 'column_kind': '"""kind"""', 'column_sort': '"""sort"""'}), "(df_sts, default_fc_parameters=settings, column_value='val',\n column_id='id', column_kind='kind', column_sort='sort')\n", (4135, 4255), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((4995, 5127), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'default_fc_parameters': 'settings', 'column_value': '"""val"""', 'column_id': '"""id"""', 'column_kind': '"""kind"""', 'column_sort': '"""sort"""'}), "(df, default_fc_parameters=settings, column_value='val',\n column_id='id', column_kind='kind', column_sort='sort')\n", (5011, 5127), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((6273, 6346), 'tsfresh.feature_extraction.settings.PickeableSettings', 'PickeableSettings', (["{'mean': None, custom_function: [{'p': 1}, {'p': -1}]}"], {}), "({'mean': None, custom_function: [{'p': 1}, {'p': -1}]})\n", (6290, 6346), False, 'from tsfresh.feature_extraction.settings import ComprehensiveFCParameters, PickeableSettings\n'), ((6412, 6544), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'default_fc_parameters': 'settings', 'column_value': '"""val"""', 'column_id': '"""id"""', 'column_kind': '"""kind"""', 'column_sort': '"""sort"""'}), "(df, default_fc_parameters=settings, column_value='val',\n column_id='id', column_kind='kind', column_sort='sort')\n", (6428, 6544), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((8772, 8822), 'os.path.join', 'os.path.join', (['self.directory', '"""test_profiling.txt"""'], {}), "(self.directory, 'test_profiling.txt')\n", (8784, 8822), False, 'import os\n'), ((8835, 8968), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs', 'profile': '(True)', 'profiling_filename': 'profiling_filename'}), "(df, column_id='id', column_value='val', n_jobs=self.n_jobs,\n profile=True, profiling_filename=profiling_filename)\n", (8851, 8968), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((9063, 9092), 'os.remove', 'os.remove', (['profiling_filename'], {}), '(profiling_filename)\n', (9072, 9092), False, 'import os\n'), ((9182, 9243), 'os.path.join', 'os.path.join', (['self.directory', '"""test_profiling_cumulative.txt"""'], {}), "(self.directory, 'test_profiling_cumulative.txt')\n", (9194, 9243), False, 'import os\n'), ((9391, 9566), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs', 'profile': '(True)', 'profiling_filename': 'PROFILING_FILENAME', 'profiling_sorting': 'PROFILING_SORTING'}), "(df, column_id='id', column_value='val', n_jobs=self.n_jobs,\n profile=True, profiling_filename=PROFILING_FILENAME, profiling_sorting=\n PROFILING_SORTING)\n", (9407, 9566), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((9677, 9706), 'os.remove', 'os.remove', (['PROFILING_FILENAME'], {}), '(PROFILING_FILENAME)\n', (9686, 9706), False, 'import os\n'), ((9976, 10032), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'n_jobs': 'self.n_jobs'}), "(df, column_id='id', n_jobs=self.n_jobs)\n", (9992, 10032), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((10322, 10432), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': '(2)'}), "(df, column_id='id', column_sort='sort', column_kind='kind',\n column_value='val', n_jobs=2)\n", (10338, 10432), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((10546, 10656), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': '(0)'}), "(df, column_id='id', column_sort='sort', column_kind='kind',\n column_value='val', n_jobs=0)\n", (10562, 10656), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((11103, 11223), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs'}), "(df, column_id='id', column_sort='sort', column_kind='kind',\n column_value='val', n_jobs=self.n_jobs)\n", (11119, 11223), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((11572, 11672), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""'}), "(df, column_id='id', column_sort='sort', column_kind='kind',\n column_value='val')\n", (11588, 11672), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((12715, 12835), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs'}), "(df, column_id='id', column_sort='sort', column_kind='kind',\n column_value='val', n_jobs=self.n_jobs)\n", (12731, 12835), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((14045, 14084), 'mock.Mock', 'Mock', ([], {'spec': 'IterableDistributorBaseClass'}), '(spec=IterableDistributorBaseClass)\n', (14049, 14084), False, 'from mock import Mock\n'), ((14179, 14366), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', ([], {'timeseries_container': 'df', 'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'default_fc_parameters': 'self.name_to_param', 'distributor': 'mock'}), "(timeseries_container=df, column_id='id', column_sort=\n 'sort', column_kind='kind', column_value='val', default_fc_parameters=\n self.name_to_param, distributor=mock)\n", (14195, 14366), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((14573, 14589), 'tsfresh.utilities.distribution.MapDistributor', 'MapDistributor', ([], {}), '()\n', (14587, 14589), False, 'from tsfresh.utilities.distribution import IterableDistributorBaseClass, MapDistributor\n'), ((14611, 14617), 'mock.Mock', 'Mock', ([], {}), '()\n', (14615, 14617), False, 'from mock import Mock\n'), ((14670, 14857), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', ([], {'timeseries_container': 'df', 'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'default_fc_parameters': 'self.name_to_param', 'distributor': 'mock'}), "(timeseries_container=df, column_id='id', column_sort=\n 'sort', column_kind='kind', column_value='val', default_fc_parameters=\n self.name_to_param, distributor=mock)\n", (14686, 14857), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((9019, 9053), 'os.path.isfile', 'os.path.isfile', (['profiling_filename'], {}), '(profiling_filename)\n', (9033, 9053), False, 'import os\n'), ((9633, 9667), 'os.path.isfile', 'os.path.isfile', (['PROFILING_FILENAME'], {}), '(PROFILING_FILENAME)\n', (9647, 9667), False, 'import os\n'), ((10881, 10967), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['features_parallel[col]', 'features_serial[col]'], {}), '(features_parallel[col],\n features_serial[col])\n', (10917, 10967), True, 'import numpy as np\n'), ((7481, 7601), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs'}), "(df, column_id='id', column_sort='sort', column_kind='kind',\n column_value='val', n_jobs=self.n_jobs)\n", (7497, 7601), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((7790, 7918), 'tsfresh.feature_extraction.extraction.extract_features', 'extract_features', (['df_random'], {'column_id': '"""id"""', 'column_sort': '"""sort"""', 'column_kind': '"""kind"""', 'column_value': '"""val"""', 'n_jobs': 'self.n_jobs'}), "(df_random, column_id='id', column_sort='sort', column_kind\n ='kind', column_value='val', n_jobs=self.n_jobs)\n", (7806, 7918), False, 'from tsfresh.feature_extraction.extraction import extract_features\n'), ((8358, 8460), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['extracted_features[col]', 'extracted_features_from_random[col]'], {}), '(extracted_features[col],\n extracted_features_from_random[col])\n', (8394, 8460), True, 'import numpy as np\n'), ((1294, 1312), 'numpy.array', 'np.array', (['[71, 77]'], {}), '([71, 77])\n', (1302, 1312), True, 'import numpy as np\n'), ((1382, 1403), 'numpy.array', 'np.array', (['[691, 1017]'], {}), '([691, 1017])\n', (1390, 1403), True, 'import numpy as np\n'), ((1473, 1497), 'numpy.array', 'np.array', (['[32211, 63167]'], {}), '([32211, 63167])\n', (1481, 1497), True, 'import numpy as np\n'), ((1567, 1587), 'numpy.array', 'np.array', (['[757, 695]'], {}), '([757, 695])\n', (1575, 1587), True, 'import numpy as np\n'), ((1654, 1670), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (1662, 1670), True, 'import numpy as np\n'), ((1740, 1764), 'numpy.array', 'np.array', (['[36619, 35483]'], {}), '([36619, 35483])\n', (1748, 1764), True, 'import numpy as np\n'), ((1828, 1852), 'numpy.array', 'np.array', (['[37.85, 34.75]'], {}), '([37.85, 34.75])\n', (1836, 1852), True, 'import numpy as np\n'), ((1918, 1940), 'numpy.array', 'np.array', (['[39.5, 28.0]'], {}), '([39.5, 28.0])\n', (1926, 1940), True, 'import numpy as np\n'), ((2389, 2409), 'numpy.array', 'np.array', (['[1.0, 6.0]'], {}), '([1.0, 6.0])\n', (2397, 2409), True, 'import numpy as np\n'), ((2483, 2504), 'numpy.array', 'np.array', (['[1.0, 11.0]'], {}), '([1.0, 11.0])\n', (2491, 2504), True, 'import numpy as np\n'), ((2584, 2600), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2592, 2600), True, 'import numpy as np\n'), ((3655, 3675), 'numpy.array', 'np.array', (['[757, 695]'], {}), '([757, 695])\n', (3663, 3675), True, 'import numpy as np\n'), ((3742, 3758), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (3750, 3758), True, 'import numpy as np\n'), ((3828, 3852), 'numpy.array', 'np.array', (['[36619, 35483]'], {}), '([36619, 35483])\n', (3836, 3852), True, 'import numpy as np\n'), ((3916, 3940), 'numpy.array', 'np.array', (['[37.85, 34.75]'], {}), '([37.85, 34.75])\n', (3924, 3940), True, 'import numpy as np\n'), ((4006, 4028), 'numpy.array', 'np.array', (['[39.5, 28.0]'], {}), '([39.5, 28.0])\n', (4014, 4028), True, 'import numpy as np\n'), ((4489, 4509), 'numpy.array', 'np.array', (['[1.0, 6.0]'], {}), '([1.0, 6.0])\n', (4497, 4509), True, 'import numpy as np\n'), ((4583, 4604), 'numpy.array', 'np.array', (['[1.0, 11.0]'], {}), '([1.0, 11.0])\n', (4591, 4604), True, 'import numpy as np\n'), ((4684, 4700), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (4692, 4700), True, 'import numpy as np\n'), ((8684, 8705), 'numpy.repeat', 'np.repeat', (['[1, 2]', '(10)'], {}), '([1, 2], 10)\n', (8693, 8705), True, 'import numpy as np\n'), ((8714, 8740), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (8730, 8740), True, 'import numpy as np\n'), ((9324, 9345), 'numpy.repeat', 'np.repeat', (['[1, 2]', '(10)'], {}), '([1, 2], 10)\n', (9333, 9345), True, 'import numpy as np\n'), ((9354, 9380), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (9370, 9380), True, 'import numpy as np\n'), ((9800, 9821), 'numpy.repeat', 'np.repeat', (['[1, 2]', '(10)'], {}), '([1, 2], 10)\n', (9809, 9821), True, 'import numpy as np\n'), ((9865, 9891), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (9881, 9891), True, 'import numpy as np\n'), ((9935, 9961), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (9951, 9961), True, 'import numpy as np\n'), ((13099, 13117), 'numpy.array', 'np.array', (['[71, 77]'], {}), '([71, 77])\n', (13107, 13117), True, 'import numpy as np\n'), ((13187, 13208), 'numpy.array', 'np.array', (['[691, 1017]'], {}), '([691, 1017])\n', (13195, 13208), True, 'import numpy as np\n'), ((13278, 13302), 'numpy.array', 'np.array', (['[32211, 63167]'], {}), '([32211, 63167])\n', (13286, 13302), True, 'import numpy as np\n'), ((13372, 13392), 'numpy.array', 'np.array', (['[757, 695]'], {}), '([757, 695])\n', (13380, 13392), True, 'import numpy as np\n'), ((13459, 13475), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (13467, 13475), True, 'import numpy as np\n'), ((13545, 13569), 'numpy.array', 'np.array', (['[36619, 35483]'], {}), '([36619, 35483])\n', (13553, 13569), True, 'import numpy as np\n'), ((13633, 13657), 'numpy.array', 'np.array', (['[37.85, 34.75]'], {}), '([37.85, 34.75])\n', (13641, 13657), True, 'import numpy as np\n'), ((13723, 13745), 'numpy.array', 'np.array', (['[39.5, 28.0]'], {}), '([39.5, 28.0])\n', (13731, 13745), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH#24128
df = DataFrame(
[1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = DataFrame(
[0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(1, fill_value=0)
tm.assert_frame_equal(result, exp)
exp = DataFrame(
[0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(2, fill_value=0)
tm.assert_frame_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self):
# GH#35488
df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_frame.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.tshift(1)
expected = datetime_frame.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, inferred_ts)
no_freq = datetime_frame.iloc[[0, 5, 7], :]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_frame):
# GH#11631
with tm.assert_produces_warning(FutureWarning):
datetime_frame.tshift()
def test_period_index_frame_shift_with_freq(self):
ps = tm.makePeriodFrame()
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.shift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
def test_datetime_frame_shift_with_freq(self, datetime_frame):
shifted = datetime_frame.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.shift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.shift(1, freq="infer")
expected = datetime_frame.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(unshifted, inferred_ts)
def test_period_index_frame_shift_with_freq_error(self):
ps = tm.makePeriodFrame()
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="M")
def test_datetime_frame_shift_with_freq_error(self, datetime_frame):
no_freq = datetime_frame.iloc[[0, 5, 7], :]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.shift(freq="infer")
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
df = ser.to_frame()
with tm.assert_produces_warning(FutureWarning):
result = df.shift(1, fill_value=0)
expected = Series([pd.Timestamp(0), ser[0]]).to_frame()
tm.assert_frame_equal(result, expected)
# axis = 1
df2 = pd.DataFrame({"A": ser, "B": ser})
df2._consolidate_inplace()
with tm.assert_produces_warning(FutureWarning):
result = df2.shift(1, axis=1, fill_value=0)
expected = pd.DataFrame(
{"A": [pd.Timestamp(0), pd.Timestamp(0)], "B": df2["A"]}
)
tm.assert_frame_equal(result, expected)
| [
"pandas.Series",
"pandas.offsets.BDay",
"pandas._testing.assert_series_equal",
"pytest.mark.filterwarnings",
"numpy.random.rand",
"numpy.asarray",
"pandas._testing.assert_index_equal",
"numpy.array",
"pandas._testing.makePeriodFrame",
"numpy.random.randint",
"pandas.concat",
"pytest.raises",
... | [((5919, 5990), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:tshift is deprecated:FutureWarning"""'], {}), "('ignore:tshift is deprecated:FutureWarning')\n", (5945, 5990), False, 'import pytest\n'), ((306, 369), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['shiftedFrame.index', 'datetime_frame.index'], {}), '(shiftedFrame.index, datetime_frame.index)\n', (327, 369), True, 'import pandas._testing as tm\n'), ((432, 488), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (["shiftedFrame['A']", 'shiftedSeries'], {}), "(shiftedFrame['A'], shiftedSeries)\n", (454, 488), True, 'import pandas._testing as tm\n'), ((546, 609), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['shiftedFrame.index', 'datetime_frame.index'], {}), '(shiftedFrame.index, datetime_frame.index)\n', (567, 609), True, 'import pandas._testing as tm\n'), ((673, 729), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (["shiftedFrame['A']", 'shiftedSeries'], {}), "(shiftedFrame['A'], shiftedSeries)\n", (695, 729), True, 'import pandas._testing as tm\n'), ((804, 852), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['unshifted', 'datetime_frame'], {}), '(unshifted, datetime_frame)\n', (825, 852), True, 'import pandas._testing as tm\n'), ((1075, 1125), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shiftedFrame', 'shiftedFrame2'], {}), '(shiftedFrame, shiftedFrame2)\n', (1096, 1125), True, 'import pandas._testing as tm\n'), ((1451, 1471), 'pandas._testing.makePeriodFrame', 'tm.makePeriodFrame', ([], {}), '()\n', (1469, 1471), True, 'import pandas._testing as tm\n'), ((1548, 1594), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['shifted.index', 'ps.index'], {}), '(shifted.index, ps.index)\n', (1569, 1594), True, 'import pandas._testing as tm\n'), ((1603, 1651), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['unshifted.index', 'ps.index'], {}), '(unshifted.index, ps.index)\n', (1624, 1651), True, 'import pandas._testing as tm\n'), ((1864, 1905), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted2', 'shifted3'], {}), '(shifted2, shifted3)\n', (1885, 1905), True, 'import pandas._testing as tm\n'), ((2404, 2443), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (2425, 2443), True, 'import pandas._testing as tm\n'), ((2742, 2781), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (2763, 2781), True, 'import pandas._testing as tm\n'), ((2827, 2884), 'pandas.DataFrame', 'DataFrame', (["{'high': [True, False], 'low': [False, False]}"], {}), "({'high': [True, False], 'low': [False, False]})\n", (2836, 2884), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((3060, 3089), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['rs', 'xp'], {}), '(rs, xp)\n', (3081, 3089), True, 'import pandas._testing as tm\n'), ((3160, 3201), 'pandas.Series', 'Series', (["['a', 'b', 'c']"], {'dtype': '"""category"""'}), "(['a', 'b', 'c'], dtype='category')\n", (3166, 3201), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((3215, 3256), 'pandas.Series', 'Series', (["['A', 'B', 'C']"], {'dtype': '"""category"""'}), "(['A', 'B', 'C'], dtype='category')\n", (3221, 3256), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((3270, 3303), 'pandas.DataFrame', 'DataFrame', (["{'one': s1, 'two': s2}"], {}), "({'one': s1, 'two': s2})\n", (3279, 3303), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((3402, 3431), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['rs', 'xp'], {}), '(rs, xp)\n', (3423, 3431), True, 'import pandas._testing as tm\n'), ((3767, 3801), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'exp'], {}), '(result, exp)\n', (3788, 3801), True, 'import pandas._testing as tm\n'), ((3968, 4002), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'exp'], {}), '(result, exp)\n', (3989, 4002), True, 'import pandas._testing as tm\n'), ((4087, 4109), 'pandas.DataFrame', 'DataFrame', (["{'foo': []}"], {}), "({'foo': []})\n", (4096, 4109), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((4145, 4174), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['df', 'rs'], {}), '(df, rs)\n', (4166, 4174), True, 'import pandas._testing as tm\n'), ((4409, 4431), 'numpy.random.randn', 'np.random.randn', (['(20)', '(5)'], {}), '(20, 5)\n', (4424, 4431), True, 'import numpy as np\n'), ((4908, 4953), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted[0]', 'shifted[1]'], {}), '(shifted[0], shifted[1])\n', (4929, 4953), True, 'import pandas._testing as tm\n'), ((4962, 5007), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted[0]', 'shifted[2]'], {}), '(shifted[0], shifted[2])\n', (4983, 5007), True, 'import pandas._testing as tm\n'), ((5220, 5249), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)'}), '([df1, df2], axis=1)\n', (5229, 5249), True, 'import pandas as pd\n'), ((5472, 5511), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (5493, 5511), True, 'import pandas._testing as tm\n'), ((5620, 5649), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)'}), '([df1, df2], axis=1)\n', (5629, 5649), True, 'import pandas as pd\n'), ((5873, 5912), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (5894, 5912), True, 'import pandas._testing as tm\n'), ((6139, 6159), 'pandas._testing.makePeriodFrame', 'tm.makePeriodFrame', ([], {}), '()\n', (6157, 6159), True, 'import pandas._testing as tm\n'), ((6239, 6275), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['unshifted', 'ps'], {}), '(unshifted, ps)\n', (6260, 6275), True, 'import pandas._testing as tm\n'), ((6324, 6364), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted', 'shifted2'], {}), '(shifted, shifted2)\n', (6345, 6364), True, 'import pandas._testing as tm\n'), ((6424, 6464), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted', 'shifted3'], {}), '(shifted, shifted3)\n', (6445, 6464), True, 'import pandas._testing as tm\n'), ((6728, 6776), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['datetime_frame', 'unshifted'], {}), '(datetime_frame, unshifted)\n', (6749, 6776), True, 'import pandas._testing as tm\n'), ((6859, 6899), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted', 'shifted2'], {}), '(shifted, shifted2)\n', (6880, 6899), True, 'import pandas._testing as tm\n'), ((7226, 7266), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted', 'expected'], {}), '(shifted, expected)\n', (7247, 7266), True, 'import pandas._testing as tm\n'), ((7315, 7360), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['unshifted', 'inferred_ts'], {}), '(unshifted, inferred_ts)\n', (7336, 7360), True, 'import pandas._testing as tm\n'), ((7800, 7820), 'pandas._testing.makePeriodFrame', 'tm.makePeriodFrame', ([], {}), '()\n', (7818, 7820), True, 'import pandas._testing as tm\n'), ((7926, 7962), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['unshifted', 'ps'], {}), '(unshifted, ps)\n', (7947, 7962), True, 'import pandas._testing as tm\n'), ((8010, 8050), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted', 'shifted2'], {}), '(shifted, shifted2)\n', (8031, 8050), True, 'import pandas._testing as tm\n'), ((8109, 8149), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted', 'shifted3'], {}), '(shifted, shifted3)\n', (8130, 8149), True, 'import pandas._testing as tm\n'), ((8334, 8382), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['datetime_frame', 'unshifted'], {}), '(datetime_frame, unshifted)\n', (8355, 8382), True, 'import pandas._testing as tm\n'), ((8464, 8504), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted', 'shifted2'], {}), '(shifted, shifted2)\n', (8485, 8504), True, 'import pandas._testing as tm\n'), ((8856, 8896), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['shifted', 'expected'], {}), '(shifted, expected)\n', (8877, 8896), True, 'import pandas._testing as tm\n'), ((8958, 9003), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['unshifted', 'inferred_ts'], {}), '(unshifted, inferred_ts)\n', (8979, 9003), True, 'import pandas._testing as tm\n'), ((9079, 9099), 'pandas._testing.makePeriodFrame', 'tm.makePeriodFrame', ([], {}), '()\n', (9097, 9099), True, 'import pandas._testing as tm\n'), ((9894, 9933), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (9915, 9933), True, 'import pandas._testing as tm\n'), ((9968, 10002), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ser, 'B': ser}"], {}), "({'A': ser, 'B': ser})\n", (9980, 10002), True, 'import pandas as pd\n'), ((10272, 10311), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (10293, 10311), True, 'import pandas._testing as tm\n'), ((1187, 1202), 'pandas.offsets.BDay', 'offsets.BDay', (['(5)'], {}), '(5)\n', (1199, 1202), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((1840, 1854), 'pandas.offsets.BDay', 'offsets.BDay', ([], {}), '()\n', (1852, 1854), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((2027, 2063), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2040, 2063), False, 'import pytest\n'), ((2165, 2186), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (2179, 2186), True, 'import numpy as np\n'), ((2495, 2516), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (2509, 2516), True, 'import numpy as np\n'), ((2946, 3003), 'numpy.array', 'np.array', (['[[np.nan, np.nan], [True, False]]'], {'dtype': 'object'}), '([[np.nan, np.nan], [True, False]], dtype=object)\n', (2954, 3003), True, 'import numpy as np\n'), ((5103, 5139), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'size': '(5, 3)'}), '(1000, size=(5, 3))\n', (5120, 5139), True, 'import numpy as np\n'), ((5168, 5204), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'size': '(5, 2)'}), '(1000, size=(5, 2))\n', (5185, 5204), True, 'import numpy as np\n'), ((6542, 6578), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (6555, 6578), False, 'import pytest\n'), ((7498, 7534), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (7511, 7534), False, 'import pytest\n'), ((7652, 7693), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (7678, 7693), True, 'import pandas._testing as tm\n'), ((9176, 9212), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (9189, 9212), False, 'import pytest\n'), ((9455, 9491), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (9468, 9491), False, 'import pytest\n'), ((9731, 9772), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (9757, 9772), True, 'import pandas._testing as tm\n'), ((10052, 10093), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (10078, 10093), True, 'import pandas._testing as tm\n'), ((936, 950), 'pandas.offsets.BDay', 'offsets.BDay', ([], {}), '()\n', (948, 950), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((2231, 2277), 'pandas.DataFrame', 'DataFrame', (['np.nan'], {'index': 'df.index', 'columns': '[0]'}), '(np.nan, index=df.index, columns=[0])\n', (2240, 2277), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((2561, 2607), 'pandas.DataFrame', 'DataFrame', (['np.nan'], {'index': 'df.index', 'columns': '[0]'}), '(np.nan, index=df.index, columns=[0])\n', (2570, 2607), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((3548, 3591), 'pandas.date_range', 'date_range', (['"""1/1/2000"""'], {'periods': '(5)', 'freq': '"""H"""'}), "('1/1/2000', periods=5, freq='H')\n", (3558, 3591), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((3662, 3705), 'pandas.date_range', 'date_range', (['"""1/1/2000"""'], {'periods': '(5)', 'freq': '"""H"""'}), "('1/1/2000', periods=5, freq='H')\n", (3672, 3705), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((3863, 3906), 'pandas.date_range', 'date_range', (['"""1/1/2000"""'], {'periods': '(5)', 'freq': '"""H"""'}), "('1/1/2000', periods=5, freq='H')\n", (3873, 3906), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((6400, 6414), 'pandas.offsets.BDay', 'offsets.BDay', ([], {}), '()\n', (6412, 6414), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((6987, 7019), 'numpy.asarray', 'np.asarray', (['datetime_frame.index'], {}), '(datetime_frame.index)\n', (6997, 7019), True, 'import numpy as np\n'), ((8085, 8099), 'pandas.offsets.BDay', 'offsets.BDay', ([], {}), '()\n', (8097, 8099), False, 'from pandas import DataFrame, Index, Series, date_range, offsets\n'), ((8592, 8624), 'numpy.asarray', 'np.asarray', (['datetime_frame.index'], {}), '(datetime_frame.index)\n', (8602, 8624), True, 'import numpy as np\n'), ((9632, 9658), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (9644, 9658), True, 'import pandas as pd\n'), ((9660, 9686), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020-01-02"""'], {}), "('2020-01-02')\n", (9672, 9686), True, 'import pandas as pd\n'), ((10204, 10219), 'pandas.Timestamp', 'pd.Timestamp', (['(0)'], {}), '(0)\n', (10216, 10219), True, 'import pandas as pd\n'), ((10221, 10236), 'pandas.Timestamp', 'pd.Timestamp', (['(0)'], {}), '(0)\n', (10233, 10236), True, 'import pandas as pd\n'), ((9849, 9864), 'pandas.Timestamp', 'pd.Timestamp', (['(0)'], {}), '(0)\n', (9861, 9864), True, 'import pandas as pd\n')] |
import numpy as np
from gym.spaces.dict import Dict as GymDict
from rlpyt.utils.collections import NamedTupleSchema, NamedTuple
from rlpyt.spaces.composite import Composite
class GymSpaceWrapper:
"""Wraps a gym space to match the rlpyt interface; most of
the functionality is for automatically converting a GymDict (dictionary)
space into an rlpyt Composite space (and converting between the two). Use
inside the initialization of the environment wrapper for a gym environment.
"""
def __init__(self, space, null_value=0, name="obs", force_float32=True,
schemas=None):
"""Input ``space`` is a gym space instance.
Input ``name`` governs naming of internal NamedTupleSchemas used to
store Gym info.
"""
self._gym_space = space
self._base_name = name
self._null_value = null_value
if schemas is None:
schemas = {}
self._schemas = schemas
if isinstance(space, GymDict):
nt = self._schemas.get(name)
if nt is None:
nt = NamedTupleSchema(name, [k for k in space.spaces.keys()])
schemas[name] = nt # Put at module level for pickle.
elif not (isinstance(nt, NamedTupleSchema) and
sorted(nt._fields) ==
sorted([k for k in space.spaces.keys()])):
raise ValueError(f"Name clash in schemas: {name}.")
spaces = [GymSpaceWrapper(
space=v,
null_value=null_value,
name="_".join([name, k]),
force_float32=force_float32,
schemas=schemas)
for k, v in space.spaces.items()]
self.space = Composite(spaces, nt)
self._dtype = None
else:
self.space = space
self._dtype = np.float32 if (space.dtype == np.float64 and
force_float32) else None
def sample(self):
"""Returns a single sample in a namedtuple (for composite) or numpy
array using the the ``sample()`` method of the underlying gym
space(s)."""
sample = self.space.sample()
if self.space is self._gym_space: # Not Composite.
# Force numpy array, might force float64->float32.
sample = np.asarray(sample, dtype=self._dtype)
return sample
def null_value(self):
"""Similar to ``sample()`` but returning a null value."""
if self.space is self._gym_space:
null = np.asarray(self.space.sample(), dtype=self._dtype)
if self._null_value is not None:
try:
null[:] = self._null_value
except IndexError: # e.g. scalar.
null.fill(self._null_value)
else:
null.fill(0)
else: # Is composite.
null = self.space.null_value()
return null
def convert(self, value):
"""For dictionary space, use to convert wrapped env's dict to rlpyt
namedtuple, i.e. inside the environment wrapper's ``step()``, for
observation output to the rlpyt sampler (see helper function in
file)"""
return dict_to_nt(value, name=self._base_name, schemas=self._schemas)
def revert(self, value):
"""For dictionary space, use to revert namedtuple action into wrapped
env's dict, i.e. inside the environment wrappers ``step()``, for input
to the underlying gym environment (see helper function in file)."""
return nt_to_dict(value)
@property
def dtype(self):
return self._dtype or self.space.dtype
@property
def shape(self):
return self.space.shape
def contains(self, x):
return self.space.contains(x)
def __repr__(self):
return self.space.__repr__()
def __eq__(self, other):
return self.space.__eq__(other)
@property
def low(self):
return self.space.low
@property
def high(self):
return self.space.high
@property
def n(self):
return self.space.n
def seed(self, seed=None):
if type(self.space) is Composite:
return [space.seed(seed=seed) for space in self.space.spaces]
else:
return self.space.seed(seed=seed)
def dict_to_nt(value, name, schemas):
if isinstance(value, dict):
values = {k: dict_to_nt(v, "_".join([name, k]))
for k, v in value.items()}
return schemas[name](**values)
if isinstance(value, np.ndarray) and value.dtype == np.float64:
return np.asarray(value, dtype=np.float32)
return value
def nt_to_dict(value):
if isinstance(value, NamedTuple):
return {k: nt_to_dict(v) for k, v in zip(value._fields, value)}
return value
| [
"rlpyt.spaces.composite.Composite",
"numpy.asarray"
] | [((4641, 4676), 'numpy.asarray', 'np.asarray', (['value'], {'dtype': 'np.float32'}), '(value, dtype=np.float32)\n', (4651, 4676), True, 'import numpy as np\n'), ((1762, 1783), 'rlpyt.spaces.composite.Composite', 'Composite', (['spaces', 'nt'], {}), '(spaces, nt)\n', (1771, 1783), False, 'from rlpyt.spaces.composite import Composite\n'), ((2343, 2380), 'numpy.asarray', 'np.asarray', (['sample'], {'dtype': 'self._dtype'}), '(sample, dtype=self._dtype)\n', (2353, 2380), True, 'import numpy as np\n')] |
"""PROGRAM FOR LEC COMPUTATION.
The module contains the following functions:
- lorenz: it is the main program, calling functions that compute the
reservoirs and conversion terms, storing them separately in
NetCDF files and providing a flux diagram and a table outputs,
the latter separately for the two hemispheres;
- averages: a script computing time, global and zonal averages;
- averages_comp: a script computing global mean of the output fields;
- bsslzr: it contains the coefficients for the conversion from regular
lonlat grid to Gaussian grid;
- diagram: it is the interface between the main program and a
class "Fluxogram", producing the flux diagram;
- gauaw: it uses the coefficients provided in bsslzr for the lonlat to
Gaussian grid conversion;
- globall_cg: it computes the global and hemispheric means at each
timestep;
- init: initializes the table and ingests input fields;
- makek: computes the KE reservoirs;
- makea: computes the APE reservoirs;
- mka2k: computes the APE->KE conversion terms;
- mkaeaz: computes the zonal APE - eddy APE conversion terms;
- mkkekz: computes the zonal KE - eddy KE conversion terms;
- mkatas: computes the stationay eddy - transient eddy APE conversions;
- mkktks: computes the stationay eddy - transient eddy KE conversions;
- output: compute vertical integrals and print NC output;
- preprocess_lec: a script handling the input files, separating the real
from imaginary part of the Fourier coefficients,
reordering the latitudinal dimension (from N to S),
interpolating on a reference sigma coordinate,
- pr_output: prints a single component of the LEC computations to a
single Nc file;
- removeif: removes a file if it exists;
- stabil: calculates the stability parameter;
- table: prints the global and hemispheric mean values of
the reservoirs;
- table_conv: prints the global and hemispheric mean values of the
conversion terms;
- varatts: prints the attributes of a variable in a Nc file;
- weights: computes the weights for vertical integrations and meridional
averages;
- write_to_tab: a script for writing global and hemispheric means to table;
References.
Ulbrich P. and <NAME> (1991) The global energy cycle of stationary
and transient atmospheric waves: Results from ECMWF analyses, Met.
@author: <EMAIL>, <NAME>, Hamburg University, 2018.
"""
import math
import os
import sys
import numpy as np
from cdo import Cdo
from netCDF4 import Dataset
import warnings
warnings.filterwarnings("ignore", message="ComplexWarning")
G = 9.81
R = 287.00
CP = 1003.5
AA = 6.371E6
PS = 101100.0
NW_1 = 3
NW_2 = 9
NW_3 = 21
def lorenz(outpath, model, year, filenc, plotfile, logfile):
"""Manage input and output fields and calling functions.
Receive fields t,u,v,w as input fields in Fourier
coefficients (time,level,wave,lon) and compute the LEC.
Arguments:
- outpath: ath where otput fields are stored (as NetCDF fields);
- model: name of the model that is analysed;
- year: year that is considered;
- filenc: name of the file containing the input fields;
- plotfile: name of the file that will contain the flux diagram;
- logfile: name of the file containing the table as a .txt file.
"""
ta_c, ua_c, va_c, wap_c, dims, lev, lat = init(logfile, filenc)
nlev = int(dims[0])
ntime = int(dims[1])
nlat = int(dims[2])
ntp = int(dims[3])
d_s, y_l, g_w = weights(lev, nlev, lat)
# Compute time mean
ta_tmn = np.nanmean(ta_c, axis=1)
ta_ztmn, ta_gmn = averages(ta_tmn, g_w)
ua_tmn = np.nanmean(ua_c, axis=1)
va_tmn = np.nanmean(va_c, axis=1)
wap_tmn = np.nanmean(wap_c, axis=1)
_, wap_gmn = averages(wap_tmn, g_w)
# Compute stability parameter
gam_ztmn = np.zeros([nlev, nlat])
for l_l in range(nlat):
gam_ztmn[:, l_l] = stabil(ta_ztmn[:, l_l], lev, nlev)
gam_tmn = stabil(ta_gmn, lev, nlev)
e_k = np.zeros([nlev, ntime, nlat, ntp - 1])
ape = np.zeros([nlev, ntime, nlat, ntp - 1])
a2k = np.zeros([nlev, ntime, nlat, ntp - 1])
ae2az = np.zeros([nlev, ntime, nlat, ntp - 1])
ke2kz = np.zeros([nlev, ntime, nlat, ntp - 1])
at2as = np.zeros([nlev, ntime, nlat, ntp - 1])
kt2ks = np.zeros([nlev, ntime, nlat, ntp - 1])
for t_t in range(ntime):
ta_tan = ta_c[:, t_t, :, :] - ta_tmn
ua_tan = ua_c[:, t_t, :, :] - ua_tmn
va_tan = va_c[:, t_t, :, :] - va_tmn
wap_tan = wap_c[:, t_t, :, :] - wap_tmn
# Compute zonal means
_, ta_tgan = averages(ta_tan, g_w)
_, wap_tgan = averages(wap_tan, g_w)
# Compute kinetic energy
e_k[:, t_t, :, :] = makek(ua_tan, va_tan)
# Compute available potential energy
ape[:, t_t, :, :] = makea(ta_tan, ta_tgan, gam_tmn)
# Compute conversion between kin.en. and pot.en.
a2k[:, t_t, :, :] = mka2k(wap_tan, ta_tan, wap_tgan, ta_tgan, lev)
# Compute conversion between zonal and eddy APE
ae2az[:, t_t, :, :] = mkaeaz(va_tan, wap_tan, ta_tan, ta_tmn, ta_gmn,
lev, y_l, gam_tmn, nlat, nlev)
# Compute conversion between zonal and eddy KE
ke2kz[:, t_t, :, :] = mkkekz(ua_tan, va_tan, wap_tan, ua_tmn, va_tmn,
lev, y_l, nlat, ntp, nlev)
# Compute conversion between stationary and transient eddy APE
at2as[:, t_t, :, :] = mkatas(ua_tan, va_tan, wap_tan, ta_tan, ta_ztmn,
gam_ztmn, lev, y_l, nlat, ntp, nlev)
# Compute conversion between stationary and transient eddy KE
kt2ks[:, t_t, :, :] = mkktks(ua_tan, va_tan, ua_tmn, va_tmn, y_l, nlat,
ntp, nlev)
ek_tgmn = averages_comp(e_k, g_w, d_s, dims)
table(ek_tgmn, ntp, 'TOT. KIN. EN. ', logfile, flag=0)
ape_tgmn = averages_comp(ape, g_w, d_s, dims)
table(ape_tgmn, ntp, 'TOT. POT. EN. ', logfile, flag=0)
a2k_tgmn = averages_comp(a2k, g_w, d_s, dims)
table(a2k_tgmn, ntp, 'KE -> APE (trans) ', logfile, flag=1)
ae2az_tgmn = averages_comp(ae2az, g_w, d_s, dims)
table(ae2az_tgmn, ntp, 'AZ <-> AE (trans) ', logfile, flag=1)
ke2kz_tgmn = averages_comp(ke2kz, g_w, d_s, dims)
table(ke2kz_tgmn, ntp, 'KZ <-> KE (trans) ', logfile, flag=1)
at2as_tgmn = averages_comp(at2as, g_w, d_s, dims)
table(at2as_tgmn, ntp, 'ASE <-> ATE ', logfile, flag=1)
kt2ks_tgmn = averages_comp(kt2ks, g_w, d_s, dims)
table(kt2ks_tgmn, ntp, 'KSE <-> KTE ', logfile, flag=1)
ek_st = makek(ua_tmn, va_tmn)
ek_stgmn = globall_cg(ek_st, g_w, d_s, dims)
table(ek_stgmn, ntp, 'STAT. KIN. EN. ', logfile, flag=0)
ape_st = makea(ta_tmn, ta_gmn, gam_tmn)
ape_stgmn = globall_cg(ape_st, g_w, d_s, dims)
table(ape_stgmn, ntp, 'STAT. POT. EN. ', logfile, flag=0)
a2k_st = mka2k(wap_tmn, ta_tmn, wap_gmn, ta_gmn, lev)
a2k_stgmn = globall_cg(a2k_st, g_w, d_s, dims)
table(a2k_stgmn, ntp, 'KE -> APE (stat)', logfile, flag=1)
ae2az_st = mkaeaz(va_tmn, wap_tmn, ta_tmn, ta_tmn, ta_gmn, lev, y_l,
gam_tmn, nlat, nlev)
ae2az_stgmn = globall_cg(ae2az_st, g_w, d_s, dims)
table(ae2az_stgmn, ntp, 'AZ <-> AE (stat)', logfile, flag=1)
ke2kz_st = mkkekz(ua_tmn, va_tmn, wap_tmn, ua_tmn, va_tmn, lev, y_l, nlat,
ntp, nlev)
ke2kz_stgmn = globall_cg(ke2kz_st, g_w, d_s, dims)
table(ke2kz_stgmn, ntp, 'KZ <-> KE (stat)', logfile, flag=1)
list_diag = [
ape_tgmn, ape_stgmn, ek_tgmn, ek_stgmn, ae2az_tgmn, ae2az_stgmn,
a2k_tgmn, a2k_stgmn, at2as_tgmn, kt2ks_tgmn, ke2kz_tgmn, ke2kz_stgmn
]
lec_strength = diagram(plotfile, list_diag, dims)
nc_f = outpath + '/ek_tmap_{}_{}.nc'.format(model, year)
output(e_k, d_s, filenc, 'ek', nc_f)
nc_f = outpath + '/ape_tmap_{}_{}.nc'.format(model, year)
output(ape, d_s, filenc, 'ape', nc_f)
nc_f = outpath + '/a2k_tmap_{}_{}.nc'.format(model, year)
output(a2k, d_s, filenc, 'a2k', nc_f)
nc_f = outpath + '/ae2az_tmap_{}_{}.nc'.format(model, year)
output(ae2az, d_s, filenc, 'ae2az', nc_f)
nc_f = outpath + '/ke2kz_tmap_{}_{}.nc'.format(model, year)
output(ke2kz, d_s, filenc, 'ke2kz', nc_f)
return lec_strength
def averages(x_c, g_w):
"""Compute time, zonal and global mean averages of initial fields.
Arguments:
- x_c: the input field as (lev, lat, wave);
- g_w: the Gaussian weights for meridional averaging;
"""
xc_ztmn = np.squeeze(np.real(x_c[:, :, 0]))
xc_gmn = np.nansum(xc_ztmn * g_w[np.newaxis, :], axis=1) / np.nansum(g_w)
return xc_ztmn, xc_gmn
def averages_comp(fld, g_w, d_s, dims):
"""Compute the global mean averages of reservoirs and conversion terms.
Arguments:
- fld: the component of the LEC (time, lev, lat, wave);
- g_w: the Gaussian weights for meridional averaging;
- d_s: the Delta sigma of the sigma levels;
- dims: a list containing the dimensions length0;
"""
fld_tmn = np.nanmean(fld, axis=1)
fld_tgmn = globall_cg(fld_tmn, g_w, d_s, dims)
return fld_tgmn
def bsslzr(kdim):
"""Obtain parameters for the Gaussian coefficients.
@author: <NAME>
"""
ndim = 50
p_i = math.pi
zbes = [
2.4048255577, 5.5200781103, 8.6537279129, 11.7915344391, 14.9309177086,
18.0710639679, 21.2116366299, 24.3524715308, 27.4934791320,
30.6346064684, 33.7758202136, 36.9170983537, 40.0584257646,
43.1997917132, 46.3411883717, 49.4826098974, 52.6240518411,
55.7655107550, 58.9069839261, 62.0484691902, 65.1899648002,
68.3314693299, 71.4729816036, 74.6145006437, 77.7560256304,
80.8975558711, 84.0390907769, 87.1806298436, 90.3221726372,
93.4637187819, 96.6052679510, 99.7468198587, 102.8883742542,
106.0299309165, 109.1714896498, 112.3130502805, 115.4546126537,
118.5961766309, 121.7377420880, 124.8793089132, 128.0208770059,
131.1624462752, 134.3040166383, 137.4455880203, 140.5871603528,
143.7287335737, 146.8703076258, 150.0118824570, 153.1534580192,
156.2950342685
]
pbes = np.zeros(kdim)
idim = min([kdim, ndim])
pbes[0:idim] = zbes[0:idim]
for j in range(idim, kdim - 1, 1):
pbes[j] = pbes[j - 1] + p_i
return pbes
def diagram(filen, listf, dims):
"""Diagram interface script.
Call the class fluxogram, serving as
interface between the main script and the class for flux
diagrams design.
Arguments:
- filen: the filename of the diagram flux;
- listf: a list containing the fluxes and storages;
- dims: the dimensions of the variables;
"""
import fluxogram
ntp = int(dims[3])
apet = listf[0]
apes = listf[1]
ekt = listf[2]
eks = listf[3]
ae2azt = listf[4]
ae2azs = listf[5]
a2kt = listf[6]
a2ks = listf[7]
at2as = listf[8]
kt2ks = listf[9]
ke2kzt = listf[10]
ke2kzs = listf[11]
apz = '{:.2f}'.format(apet[0, 0] + apes[0, 0])
az2kz = '{:.2f}'.format(-1e5 * (a2kt[0, 0]))
az2at = '{:.2f}'.format(-1e5 * np.nansum(ae2azt[0, 1:ntp - 1]))
aps = '{:.2f}'.format(np.nansum(apes[0, 1:ntp - 1]))
as2ks = '{:.2f}'.format(1e5 * np.nansum(a2ks[0, 1:ntp - 1]))
apt = '{:.2f}'.format(np.nansum(apet[0, 1:ntp - 1]))
at2kt = '{:.2f}'.format(1e5 * np.nansum(a2kt[0, 1:ntp - 1]))
az2as = '{:.2f}'.format(-1e5 * np.nansum(ae2azs[0, 1:ntp - 1]))
as2at = '{:.2f}'.format(1e5 * np.nansum(at2as[0, 1:ntp - 1]))
azin = '{:.2f}'.format((float(az2at) + float(az2as) - float(az2kz)))
asein = '{:.2f}'.format((float(as2ks) + float(as2at) - float(az2as)))
atein = '{:.2f}'.format(float(at2kt) - float(az2at) - float(as2at))
k_z = '{:.2f}'.format(ekt[0, 0] + eks[0, 0])
kte = '{:.2f}'.format(np.nansum(ekt[0, 1:ntp - 1]))
kse = '{:.2f}'.format(np.nansum(eks[0, 1:ntp - 1]))
kt2kz = '{:.2f}'.format(1e5 * np.nansum(ke2kzt[0, 1:ntp - 1]))
ks2kt = '{:.2f}'.format(-1e5 * np.nansum(kt2ks[0, 1:ntp - 1]))
ks2kz = '{:.2f}'.format(1e5 * np.nansum(ke2kzs[0, 1:ntp - 1]))
kteout = '{:.2f}'.format(float(at2kt) - float(ks2kt) - float(kt2kz))
kseout = '{:.2f}'.format(float(ks2kt) + float(as2ks) - float(ks2kz))
kzout = '{:.2f}'.format(float(kt2kz) + float(ks2kz) - float(az2kz))
list_lorenz = [
azin, apz, asein, aps, atein, apt, as2ks, at2kt, kteout, kte, kseout,
kse, kzout, k_z, az2kz, az2at, az2as, as2at, kt2kz, ks2kt, ks2kz
]
flux = fluxogram.Fluxogram(1000, 1000)
flux.add_storage("AZ", 600, 0, 0)
flux.add_storage("ASE", 600, 0.75, 0.25)
flux.add_storage("ATE", 600, 1.5, 0)
flux.add_storage("KTE", 600, 1.5, 1.5)
flux.add_storage("KSE", 600, 0.75, 1.25)
flux.add_storage("KZ", 600, 0, 1.5)
flux.add_storage("AZ+", 0, 0, -1)
flux.add_storage("ASE+", 0, 0.75, -1)
flux.add_storage("ATE+", 0, 1.5, -1)
flux.add_storage("KTE-", 0, 1.5, 2.5)
flux.add_storage("KSE-", 0, 0.75, 2.5)
flux.add_storage("KZ-", 0, 0, 2.5)
flux.add_flux("A2KZ", flux.storages[5], flux.storages[0], 100)
flux.add_flux("AE2AZ", flux.storages[0], flux.storages[2], 150)
flux.add_flux("AE2AS", flux.storages[0], flux.storages[1], 60)
flux.add_flux("AE2AT", flux.storages[1], flux.storages[2], 60)
flux.add_flux("A2KS", flux.storages[1], flux.storages[4], 60)
flux.add_flux("A2KT", flux.storages[2], flux.storages[3], 100)
flux.add_flux("KE2KS", flux.storages[3], flux.storages[4], 60)
flux.add_flux("KS2KZ", flux.storages[4], flux.storages[5], 60)
flux.add_flux("KE2KZ", flux.storages[3], flux.storages[5], 150)
flux.add_flux("AZ+", flux.storages[6], flux.storages[0], 60)
flux.add_flux("ASE+", flux.storages[7], flux.storages[1], 60)
flux.add_flux("ATE+", flux.storages[8], flux.storages[2], 60)
flux.add_flux("KTE-", flux.storages[3], flux.storages[9], 60)
flux.add_flux("KSE-", flux.storages[4], flux.storages[10], 60)
flux.add_flux("KZ-", flux.storages[5], flux.storages[11], 60)
flux.draw(filen, list_lorenz)
lec = float(kteout) + float(kseout) + float(kzout)
return lec
def gauaw(n_y):
"""Compute the Gaussian coefficients for the Gaussian grid conversion.
Arguments:
- n_y: the latitude dimension;
"""
c_c = (1 - (2 / math.pi)**2) / 4
eps = 0.00000000000001
k_k = n_y / 2
p_a = np.zeros(n_y)
p_a[0:k_k] = bsslzr(k_k)
p_w = np.zeros(n_y)
for i_l in range(k_k):
x_z = np.cos(p_a[i_l] / math.sqrt((n_y + 0.5)**2 + c_c))
iterr = 0.
zsp = 1.0
while (abs(zsp) > eps and iterr <= 10):
pkm1 = x_z
pkm2 = 1.0
for n_n in range(2, n_y, 1):
p_k = ((n_n * 2 - 1.0) * x_z * pkm1 - (n_n - 1.0) * pkm2) / n_n
pkm2 = pkm1
pkm1 = p_k
pkm1 = pkm2
pkmrk = (n_y * (pkm1 - x_z * p_k)) / (1.0 - x_z**2)
zsp = p_k / pkmrk
x_z = x_z - zsp
iterr = iterr + 1
if iterr > 15:
sys.exit("*** no convergence in gauaw ***")
p_a[i_l] = x_z
p_w[i_l] = (2.0 * (1.0 - x_z**2)) / ((n_y**2) * (pkm1**2))
p_a[n_y - 1 - i_l] = -p_a[i_l]
p_w[n_y - 1 - i_l] = p_w[i_l]
psi = p_a
pgw = p_w
return psi, pgw
def globall_cg(d3v, g_w, d_s, dims):
"""Compute the global and hemispheric averages.
Arguments:
- d3v: the 3D dataset to be averaged;
- g_w: the gaussian weights;
- d_s: the vertical levels;
- dims: a list containing the sizes of the dimensions;
"""
nlev = int(dims[0])
nlat = int(dims[2])
ntp = int(dims[3])
gmn = np.zeros([3, ntp - 1])
aux1 = np.zeros([nlev, int(nlat / 2), ntp - 1])
aux2 = np.zeros([nlev, int(nlat / 2), ntp - 1])
aux1v = np.zeros([nlev, ntp - 1])
aux2v = np.zeros([nlev, ntp - 1])
nhem = int(nlat / 2)
fac = 1 / G * PS / 1e5
for l_l in range(nlev):
for i_h in range(nhem):
aux1[l_l, i_h, :] = fac * np.real(d3v[l_l, i_h, :]) * g_w[i_h]
aux2[l_l, i_h, :] = (fac * np.real(d3v[l_l, i_h + nhem - 1, :]) *
g_w[i_h + nhem - 1])
aux1v[l_l, :] = (np.nansum(aux1[l_l, :, :], axis=0) / np.nansum(
g_w[0:nhem]) * d_s[l_l])
aux2v[l_l, :] = (np.nansum(aux2[l_l, :, :], axis=0) / np.nansum(
g_w[0:nhem]) * d_s[l_l])
gmn[1, :] = (np.nansum(aux1v, axis=0) / np.nansum(d_s))
gmn[2, :] = (np.nansum(aux2v, axis=0) / np.nansum(d_s))
gmn[0, :] = 0.5 * (gmn[1, :] + gmn[2, :])
return gmn
def init(logfile, filep):
"""Ingest input fields as complex fields and initialise tables.
Receive fields t,u,v,w as input fields in Fourier
coefficients (time,level,wave,lon), with real as even and imaginary parts
as odd. Convert them to complex fields for Python.
Arguments:
- filenc: name of the file containing the input fields;
- logfile: name of the file containing the table as a .txt file.
"""
with open(logfile, 'w') as log:
log.write('########################################################\n')
log.write('# #\n')
log.write('# LORENZ ENERGY CYCLE #\n')
log.write('# #\n')
log.write('########################################################\n')
with Dataset(filep) as dataset0:
t_a = dataset0.variables['ta'][:, :, :, :]
u_a = dataset0.variables['ua'][:, :, :, :]
v_a = dataset0.variables['va'][:, :, :, :]
wap = dataset0.variables['wap'][:, :, :, :]
lev = dataset0.variables['plev'][:]
time = dataset0.variables['time'][:]
lat = dataset0.variables['lat'][:]
nfc = np.shape(t_a)[3]
nlev = len(lev)
ntime = len(time)
nlat = len(lat)
ntp = nfc / 2 + 1
dims = [nlev, ntime, nlat, ntp]
if max(lev) < 1000:
lev = lev * 100
wap = wap * 100
t_a = np.transpose(t_a, (1, 0, 2, 3))
ta_r = t_a[:, :, :, 0::2]
ta_i = t_a[:, :, :, 1::2]
u_a = np.transpose(u_a, (1, 0, 2, 3))
ua_r = u_a[:, :, :, 0::2]
ua_i = u_a[:, :, :, 1::2]
v_a = np.transpose(v_a, (1, 0, 2, 3))
va_r = v_a[:, :, :, 0::2]
va_i = v_a[:, :, :, 1::2]
wap = np.transpose(wap, (1, 0, 2, 3))
wap_r = wap[:, :, :, 0::2]
wap_i = wap[:, :, :, 1::2]
ta_c = ta_r + 1j * ta_i
ua_c = ua_r + 1j * ua_i
va_c = va_r + 1j * va_i
wap_c = wap_r + 1j * wap_i
with open(logfile, 'a+') as log:
log.write(' \n')
log.write(' \n')
log.write('INPUT DATA:\n')
log.write('-----------\n')
log.write(' \n')
log.write('SPECTRAL RESOLUTION : {}\n'.format(nfc))
log.write('NUMBER OF LATITUDES : {}\n'.format(nlat))
log.write('NUMBER OF LEVEL : {}'.format(nlev))
log.write('LEVEL : {} Pa\n'.format(lev))
log.write(' \n')
log.write('WAVES:\n')
log.write(' \n')
log.write('(1) : 1 - {}\n'.format(NW_1))
log.write('(2) : {} - {}\n'.format(NW_1, NW_2))
log.write('(3) : {} - {}\n'.format(NW_2, NW_3))
log.write(' \n')
log.write('GLOBAL DIAGNOSTIC: \n')
log.write(' \n')
log.write(' I GLOBAL I NORTH I SOUTH I\n')
log.write('------------------------------------------------------\n')
return ta_c, ua_c, va_c, wap_c, dims, lev, lat
def makek(u_t, v_t):
"""Compute the kinetic energy reservoirs from u and v.
Arguments:
- u_t: a 3D zonal velocity field;
- v_t: a 3D meridional velocity field;
"""
ck1 = u_t * np.conj(u_t)
ck2 = v_t * np.conj(v_t)
e_k = np.real(ck1 + ck2)
e_k[:, :, 0] = 0.5 * np.real(u_t[:, :, 0] * u_t[:, :, 0] +
v_t[:, :, 0] * v_t[:, :, 0])
return e_k
def makea(t_t, t_g, gam):
"""Compute the kinetic energy reservoirs from t.
Arguments:
- t_t_ a 3D temperature field;
- t_g: a temperature vertical profile;
- gam: a vertical profile of the stability parameter;
"""
ape = gam[:, np.newaxis, np.newaxis] * np.real(t_t * np.conj(t_t))
ape[:, :, 0] = (gam[:, np.newaxis] * 0.5 * np.real(
(t_t[:, :, 0] - t_g[:, np.newaxis]) *
(t_t[:, :, 0] - t_g[:, np.newaxis])))
return ape
def mka2k(wap, t_t, w_g, t_g, p_l):
"""Compute the KE to APE energy conversions from t and w.
Arguments:
- wap: a 3D vertical velocity field;
- t_t: a 3D temperature field;
- w_g: a vertical velocity vertical profile;
- t_g: a temperature vertical profile;
- p_l: the pressure levels;
"""
a2k = - np.real(R / p_l[:, np.newaxis, np.newaxis] *
(t_t * np.conj(wap) + np.conj(t_t) * wap))
a2k[:, :, 0] = - np.real(R / p_l[:, np.newaxis] *
(t_t[:, :, 0] - t_g[:, np.newaxis]) *
(wap[:, :, 0] - w_g[:, np.newaxis]))
return a2k
def mkaeaz(v_t, wap, t_t, ttt, ttg, p_l, lat, gam, nlat, nlev):
"""Compute the zonal mean - eddy APE conversions from t and v.
Arguments:
- v_t: a 3D meridional velocity field;
- wap: a 3D vertical velocity field;
- t_t: a 3D temperature field;
- ttt: a climatological mean 3D temperature field;
- p_l: the pressure levels;
- lat: the latudinal dimension;
- gam: a vertical profile of the stability parameter;
- nlat: the number of latitudes;
- nlev: the number of levels;
"""
dtdp = np.zeros([nlev, nlat])
dtdy = np.zeros([nlev, nlat])
ttt = np.real(ttt)
for l_l in np.arange(nlev):
if l_l == 0:
t_1 = np.real(ttt[l_l, :, 0]) - ttg[l_l]
t_2 = np.real(ttt[l_l + 1, :, 0]) - ttg[l_l + 1]
dtdp[l_l, :] = (t_2 - t_1) / (p_l[l_l + 1] - p_l[l_l])
elif l_l == nlev - 1:
t_1 = np.real(ttt[l_l - 1, :, 0]) - ttg[l_l - 1]
t_2 = np.real(ttt[l_l, :, 0]) - ttg[l_l]
dtdp[l_l, :] = (t_2 - t_1) / (p_l[l_l] - p_l[l_l - 1])
else:
t_1 = np.real(ttt[l_l, :, 0]) - ttg[l_l]
t_2 = np.real(ttt[l_l + 1, :, 0]) - ttg[l_l + 1]
dtdp1 = (t_2 - t_1) / (p_l[l_l + 1] - p_l[l_l])
t_2 = t_1
t_1 = np.real(ttt[l_l - 1, :, 0]) - ttg[l_l - 1]
dtdp2 = (t_2 - t_1) / (p_l[l_l] - p_l[l_l - 1])
dtdp[l_l, :] = (
(dtdp1 * (p_l[l_l] - p_l[l_l - 1]) + dtdp2 *
(p_l[l_l + 1] - p_l[l_l])) / (p_l[l_l + 1] - p_l[l_l - 1]))
dtdp[l_l, :] = dtdp[l_l, :] - (R / (CP * p_l[l_l]) *
(ttt[l_l, :, 0] - ttg[l_l]))
for i_l in np.arange(nlat):
if i_l == 0:
t_1 = np.real(ttt[:, i_l, 0])
t_2 = np.real(ttt[:, i_l + 1, 0])
dtdy[:, i_l] = (t_2 - t_1) / (lat[i_l + 1] - lat[i_l])
elif i_l == nlat - 1:
t_1 = np.real(ttt[:, i_l - 1, 0])
t_2 = np.real(ttt[:, i_l, 0])
dtdy[:, i_l] = (t_2 - t_1) / (lat[i_l] - lat[i_l - 1])
else:
t_1 = np.real(ttt[:, i_l - 1, 0])
t_2 = np.real(ttt[:, i_l + 1, 0])
dtdy[:, i_l] = (t_2 - t_1) / (lat[i_l + 1] - lat[i_l - 1])
dtdy = dtdy / AA
c_1 = np.real(v_t * np.conj(t_t) + t_t * np.conj(v_t))
c_2 = np.real(wap * np.conj(t_t) + t_t * np.conj(wap))
ae2az = (gam[:, np.newaxis, np.newaxis] *
(dtdy[:, :, np.newaxis] * c_1 + dtdp[:, :, np.newaxis] * c_2))
ae2az[:, :, 0] = 0.
return ae2az
def mkkekz(u_t, v_t, wap, utt, vtt, p_l, lat, nlat, ntp, nlev):
"""Compute the zonal mean - eddy KE conversions from u and v.
Arguments:
- u_t: a 3D zonal velocity field;
- v_t: a 3D meridional velocity field;
- wap: a 3D vertical velocity field;
- utt: a climatological mean 3D zonal velocity field;
- vtt: a climatological mean 3D meridional velocity field;
- p_l: the pressure levels;
- lat: the latitude dimension;
- nlat: the number of latitudes;
- ntp: the number of wavenumbers;
- nlev: the number of vertical levels;
"""
dudp = np.zeros([nlev, nlat])
dvdp = np.zeros([nlev, nlat])
dudy = np.zeros([nlev, nlat])
dvdy = np.zeros([nlev, nlat])
for l_l in np.arange(nlev):
if l_l == 0:
dudp[l_l, :] = ((np.real(utt[l_l + 1, :, 0] - utt[l_l, :, 0])) /
(p_l[l_l + 1] - p_l[l_l]))
dvdp[l_l, :] = ((np.real(vtt[l_l + 1, :, 0] - vtt[l_l, :, 0])) /
(p_l[l_l + 1] - p_l[l_l]))
elif l_l == nlev - 1:
dudp[l_l, :] = ((np.real(utt[l_l, :, 0] - utt[l_l - 1, :, 0])) /
(p_l[l_l] - p_l[l_l - 1]))
dvdp[l_l, :] = ((np.real(vtt[l_l, :, 0] - vtt[l_l - 1, :, 0])) /
(p_l[l_l] - p_l[l_l - 1]))
else:
dudp1 = ((np.real(utt[l_l + 1, :, 0] - utt[l_l, :, 0])) /
(p_l[l_l + 1] - p_l[l_l]))
dvdp1 = ((np.real(vtt[l_l + 1, :, 0] - vtt[l_l, :, 0])) /
(p_l[l_l + 1] - p_l[l_l]))
dudp2 = ((np.real(utt[l_l, :, 0] - utt[l_l - 1, :, 0])) /
(p_l[l_l] - p_l[l_l - 1]))
dvdp2 = ((np.real(vtt[l_l, :, 0] - vtt[l_l - 1, :, 0])) /
(p_l[l_l] - p_l[l_l - 1]))
dudp[l_l, :] = (
(dudp1 * (p_l[l_l] - p_l[l_l - 1]) + dudp2 *
(p_l[l_l + 1] - p_l[l_l])) / (p_l[l_l + 1] - p_l[l_l - 1]))
dvdp[l_l, :] = (
(dvdp1 * (p_l[l_l] - p_l[l_l - 1]) + dvdp2 *
(p_l[l_l + 1] - p_l[l_l])) / (p_l[l_l + 1] - p_l[l_l - 1]))
for i_l in np.arange(nlat):
if i_l == 0:
dudy[:, i_l] = ((np.real(utt[:, i_l + 1, 0] - utt[:, i_l, 0])) /
(lat[i_l + 1] - lat[i_l]))
dvdy[:, i_l] = ((np.real(vtt[:, i_l + 1, 0] - vtt[:, i_l, 0])) /
(lat[i_l + 1] - lat[i_l]))
elif i_l == nlat - 1:
dudy[:, i_l] = ((np.real(utt[:, i_l, 0] - utt[:, i_l - 1, 0])) /
(lat[i_l] - lat[i_l - 1]))
dvdy[:, i_l] = ((np.real(vtt[:, i_l, 0] - vtt[:, i_l - 1, 0])) /
(lat[i_l] - lat[i_l - 1]))
else:
dudy[:, i_l] = ((np.real(utt[:, i_l + 1, 0] - utt[:, i_l - 1, 0]))
/ (lat[i_l + 1] - lat[i_l - 1]))
dvdy[:, i_l] = ((np.real(vtt[:, i_l + 1, 0] - vtt[:, i_l - 1, 0]))
/ (lat[i_l + 1] - lat[i_l - 1]))
dudy = dudy / AA
dvdy = dvdy / AA
c_1 = np.zeros([nlev, nlat, ntp - 1])
c_2 = np.zeros([nlev, nlat, ntp - 1])
c_3 = np.zeros([nlev, nlat, ntp - 1])
c_4 = np.zeros([nlev, nlat, ntp - 1])
c_5 = np.zeros([nlev, nlat, ntp - 1])
c_6 = np.zeros([nlev, nlat, ntp - 1])
u_u = np.real(u_t * np.conj(u_t) + u_t * np.conj(u_t))
u_v = np.real(u_t * np.conj(v_t) + v_t * np.conj(u_t))
v_v = np.real(v_t * np.conj(v_t) + v_t * np.conj(v_t))
u_w = np.real(u_t * np.conj(wap) + wap * np.conj(u_t))
v_w = np.real(v_t * np.conj(wap) + wap * np.conj(v_t))
for i_l in np.arange(nlat):
c_1[:, i_l, :] = dudy[:, i_l][:, np.newaxis] * u_v[:, i_l, :]
c_2[:, i_l, :] = dvdy[:, i_l][:, np.newaxis] * v_v[:, i_l, :]
c_5[:, i_l, :] = (np.tan(lat[i_l]) / AA * np.real(
utt[:, i_l, 0])[:, np.newaxis] * (u_v[:, i_l, :]))
c_6[:, i_l, :] = -(np.tan(lat[i_l]) / AA * np.real(
vtt[:, i_l, 0])[:, np.newaxis] * (u_u[:, i_l, :]))
for l_l in np.arange(nlev):
c_3[l_l, :, :] = dudp[l_l, :][:, np.newaxis] * u_w[l_l, :, :]
c_4[l_l, :, :] = dvdp[l_l, :][:, np.newaxis] * v_w[l_l, :, :]
ke2kz = (c_1 + c_2 + c_3 + c_4 + c_5 + c_6)
ke2kz[:, :, 0] = 0.
return ke2kz
def mkatas(u_t, v_t, wap, t_t, ttt, g_w, p_l, lat, nlat, ntp, nlev):
"""Compute the stat.-trans. eddy APE conversions from u, v, wap and t.
Arguments:
- u_t: a 3D zonal velocity field;
- v_t: a 3D meridional velocity field;
- wap: a 3D vertical velocity field;
- t_t: a 3D temperature field;
- ttt: a climatological mean 3D temperature field;
- g_w: the gaussian weights;
- p_l: the pressure levels;
- lat: the latitude dimension;
- nlat: the number of latitudes;
- ntp: the number of wavenumbers;
- nlev: the number of vertical levels;
"""
t_r = np.fft.ifft(t_t, axis=2)
u_r = np.fft.ifft(u_t, axis=2)
v_r = np.fft.ifft(v_t, axis=2)
w_r = np.fft.ifft(wap, axis=2)
tur = t_r * u_r
tvr = t_r * v_r
twr = t_r * w_r
t_u = np.fft.fft(tur, axis=2)
t_v = np.fft.fft(tvr, axis=2)
t_w = np.fft.fft(twr, axis=2)
c_1 = (t_u * np.conj(ttt[:, :, np.newaxis]) -
ttt[:, :, np.newaxis] * np.conj(t_u))
c_6 = (t_w * np.conj(ttt[:, :, np.newaxis]) -
ttt[:, :, np.newaxis] * np.conj(t_w))
c_2 = np.zeros([nlev, nlat, ntp - 1])
c_3 = np.zeros([nlev, nlat, ntp - 1])
c_5 = np.zeros([nlev, nlat, ntp - 1])
for i_l in range(nlat):
if i_l == 0:
c_2[:, i_l, :] = np.real(
t_v[:, i_l, :] / (AA * (lat[i_l + 1] - lat[i_l])) *
np.conj(ttt[:, i_l + 1, np.newaxis] - ttt[:, i_l, np.newaxis]))
c_3[:, i_l, :] = np.real(
np.conj(t_v[:, i_l, :]) / (AA * (lat[i_l + 1] - lat[i_l])) *
(ttt[:, i_l + 1, np.newaxis] - ttt[:, i_l, np.newaxis]))
elif i_l == nlat - 1:
c_2[:, i_l, :] = np.real(
t_v[:, i_l, :] / (AA * (lat[i_l] - lat[i_l - 1])) *
np.conj(ttt[:, i_l, np.newaxis] - ttt[:, i_l - 1, np.newaxis]))
c_3[:, i_l, :] = np.real(
np.conj(t_v[:, i_l, :]) / (AA * (lat[i_l] - lat[i_l - 1])) *
(ttt[:, i_l, np.newaxis] - ttt[:, i_l - 1, np.newaxis]))
else:
c_2[:, i_l, :] = np.real(
t_v[:, i_l, :] / (AA * (lat[i_l + 1] - lat[i_l - 1])) *
np.conj(ttt[:, i_l + 1, np.newaxis] -
ttt[:, i_l - 1, np.newaxis]))
c_3[:, i_l, :] = np.real(
np.conj(t_v[:, i_l, :]) / (AA * (lat[i_l + 1] - lat[i_l - 1]))
* (ttt[:, i_l + 1, np.newaxis] - ttt[:, i_l - 1, np.newaxis]))
for l_l in range(nlev):
if l_l == 0:
c_5[l_l, :, :] = np.real(
(ttt[l_l + 1, :, np.newaxis] - ttt[l_l, :, np.newaxis]) /
(p_l[l_l + 1] - p_l[l_l]))
elif l_l == nlev - 1:
c_5[l_l, :, :] = np.real(
(ttt[l_l, :, np.newaxis] - ttt[l_l - 1, :, np.newaxis]) /
(p_l[l_l] - p_l[l_l - 1]))
else:
c51 = ((ttt[l_l + 1, :, np.newaxis] - ttt[l_l, :, np.newaxis]) /
(p_l[l_l + 1] - p_l[l_l]))
c52 = ((ttt[l_l, :, np.newaxis] - ttt[l_l - 1, :, np.newaxis]) /
(p_l[l_l] - p_l[l_l - 1]))
c_5[l_l, :, :] = (
(c51 * (p_l[l_l] - p_l[l_l - 1]) + c52 *
(p_l[l_l + 1] - p_l[l_l])) / (p_l[l_l + 1] - p_l[l_l - 1]))
k_k = np.arange(0, ntp - 1)
at2as = (
((k_k - 1)[np.newaxis, np.newaxis, :] * np.imag(c_1) /
(AA * np.cos(lat[np.newaxis, :, np.newaxis])) +
np.real(t_w * np.conj(c_5) + np.conj(t_w) * c_5) + np.real(c_2 + c_3)
+ R / (CP * p_l[:, np.newaxis, np.newaxis]) * np.real(c_6)) *
g_w[:, :, np.newaxis])
at2as[:, :, 0] = 0.
return at2as
def mkktks(u_t, v_t, utt, vtt, lat, nlat, ntp, nlev):
"""Compute the stat.-trans. eddy KE conversions from u, v and t.
Arguments:
- u_t: a 3D zonal velocity field;
- v_t: a 3D meridional velocity field;
- utt: a climatological mean 3D zonal velocity field;
- vtt: a climatological mean 3D meridional velocity field;
- lat: the latitude dimension;
- nlat: the number of latitudes;
- ntp: the number of wavenumbers;
- nlev: the number of vertical levels;
"""
dut = np.zeros([nlev, nlat, ntp - 1])
dvt = np.zeros([nlev, nlat, ntp - 1])
dlat = np.zeros([nlat])
u_r = np.fft.irfft(u_t, axis=2)
v_r = np.fft.irfft(v_t, axis=2)
uur = u_r * u_r
uvr = u_r * v_r
vvr = v_r * v_r
u_u = np.fft.rfft(uur, axis=2)
v_v = np.fft.rfft(vvr, axis=2)
u_v = np.fft.rfft(uvr, axis=2)
c_1 = u_u * np.conj(u_t) - u_t * np.conj(u_u)
# c_3 = u_v * np.conj(u_t) + u_t * np.conj(u_v)
c_5 = u_u * np.conj(v_t) + v_t * np.conj(u_u)
c_6 = u_v * np.conj(v_t) - v_t * np.conj(u_v)
for i_l in range(nlat):
if i_l == 0:
dut[:, i_l, :] = (utt[:, i_l + 1, :] - utt[:, i_l, :])
dvt[:, i_l, :] = (vtt[:, i_l + 1, :] - vtt[:, i_l, :])
dlat[i_l] = (lat[i_l + 1] - lat[i_l])
elif i_l == nlat - 1:
dut[:, i_l, :] = (utt[:, i_l, :] - utt[:, i_l - 1, :])
dvt[:, i_l, :] = (vtt[:, i_l, :] - vtt[:, i_l - 1, :])
dlat[i_l] = (lat[i_l] - lat[i_l - 1])
else:
dut[:, i_l, :] = (utt[:, i_l + 1, :] - utt[:, i_l - 1, :])
dvt[:, i_l, :] = (vtt[:, i_l + 1, :] - vtt[:, i_l - 1, :])
dlat[i_l] = (lat[i_l + 1] - lat[i_l - 1])
c21 = np.conj(u_u) * dut / dlat[np.newaxis, :, np.newaxis]
c22 = u_u * np.conj(dut) / dlat[np.newaxis, :, np.newaxis]
c41 = np.conj(v_v) * dvt / dlat[np.newaxis, :, np.newaxis]
c42 = v_v * np.conj(dvt) / dlat[np.newaxis, :, np.newaxis]
k_k = np.arange(0, ntp - 1)
kt2ks = (np.real(c21 + c22 + c41 + c42) / AA +
np.tan(lat)[np.newaxis, :, np.newaxis] * np.real(c_1 - c_5) / AA +
np.imag(c_1 + c_6) * (k_k - 1)[np.newaxis, np.newaxis, :] /
(AA * np.cos(lat)[np.newaxis, :, np.newaxis]))
kt2ks[:, :, 0] = 0
return kt2ks
def output(fld, d_s, filenc, name, nc_f):
"""Compute vertical integrals and print (time,lat,ntp) to NC output.
Arguments:
- fld: the annual mean fields (lev, lat, wave);
- d_s: Delta sigma;
- filenc: the input file containing the Fourier coefficients of t,u,v,w;
- name: the variable name;
- nc_f: the name of the output file (with path)
"""
fld_tmn = np.nanmean(fld, axis=1)
fld_aux = fld_tmn * d_s[:, np.newaxis, np.newaxis]
fld_vmn = np.nansum(fld_aux, axis=0) / np.nansum(d_s)
removeif(nc_f)
pr_output(fld_vmn, name, filenc, nc_f)
def pr_output(varo, varname, filep, nc_f):
"""Print outputs to NetCDF.
Save fields to NetCDF, retrieving information from an existing
NetCDF file. Metadata are transferred from the existing file to the
new one.
Arguments:
- varo: the field to be stored;
- varname: the name of the variables to be saved;
- filep: the existing dataset, containing the metadata;
- nc_f: the name of the output file;
PROGRAMMER(S)
<NAME> (2014), modified by <NAME> (2018).
"""
import fourier_coefficients
fourc = fourier_coefficients
with Dataset(nc_f, 'w', format='NETCDF4') as w_nc_fid:
w_nc_fid.description = "Outputs of LEC program"
with Dataset(filep, 'r') as nc_fid:
# Extract data from NetCDF file
wave = nc_fid.variables['wave'][:]
ntp = int(len(wave) / 2)
# Writing NetCDF files
fourc.extr_lat(nc_fid, w_nc_fid, 'lat')
w_nc_fid.createDimension('wave', ntp)
w_nc_dim = w_nc_fid.createVariable(
'wave', nc_fid.variables['wave'].dtype, ('wave', ))
for ncattr in nc_fid.variables['wave'].ncattrs():
w_nc_dim.setncattr(
ncattr, nc_fid.variables['wave'].getncattr(ncattr))
w_nc_fid.variables['wave'][:] = wave[0:ntp]
w_nc_var = w_nc_fid.createVariable(varname, 'f8', ('lat', 'wave'))
varatts(w_nc_var, varname, 1, 0)
w_nc_fid.variables[varname][:] = varo
def removeif(filename):
"""Remove filename if it exists."""
try:
os.remove(filename)
except OSError:
pass
def preproc_lec(model, wdir, ldir, filedict):
"""Preprocess fields for LEC computations and send it to lorenz program.
This function computes the interpolation of ta, ua, va, wap daily fields to
fill gaps using near-surface data, then computes the Fourier coefficients
and performs the LEC computations. For every year, (lev,lat,wave) fields,
global and hemispheric time series of each conversion and reservoir term
of the LEC is provided.
Arguments:
- model: the model name;
- wdir: the working directory where the outputs are stored;
- ldir: the directory where the LEC tables and the diagram of the LEC for
each year are stored;
- filedict: a dictionary of file names containing the input fields;
"""
import fourier_coefficients
cdo = Cdo()
fourc = fourier_coefficients
ta_file = filedict['/ta_']
tas_file = filedict['/tas_']
ua_file = filedict['/ua_']
uas_file = filedict['/uas_']
va_file = filedict['/va_']
vas_file = filedict['/vas_']
wap_file = filedict['/wap_']
maskorog = wdir + '/orog.nc'
ua_file_mask = wdir + '/ua_fill.nc'
va_file_mask = wdir + '/va_fill.nc'
energy3_file = wdir + '/energy_short.nc'
cdo.setmisstoc(
'0',
input='-setmisstoc,1 -sub {0} {0}'.format(ua_file),
options='-b F32',
output=maskorog)
cdo.add(
input=('-setmisstoc,0 -selvar,ua {} '
'-setmisstoc,0 -mul -selvar,uas {} -selvar,ua {}').format(
ua_file, uas_file, maskorog),
options='-b F32',
output=ua_file_mask)
cdo.add(
input=('-setmisstoc,0 -selvar,va {} '
'-setmisstoc,0 -mul -selvar,vas {} -selvar,ua {}').format(
va_file, vas_file, maskorog),
options='-b F32',
output=va_file_mask)
cdo.setmisstoc(
'0',
input=('-invertlat -sellevel,10000/90000 '
'-merge {} {} {} {}').format(ta_file, ua_file_mask,
va_file_mask, wap_file),
options='-b F32',
output=energy3_file)
yrs = cdo.showyear(input=energy3_file)
yrs = str(yrs)
yrs2 = yrs.split()
y_i = 0
lect = np.zeros(len(yrs2))
for y_r in yrs2:
y_rl = [y_n for y_n in y_r]
y_ro = ''
for e_l in y_rl:
e_l = str(e_l)
if e_l.isdigit() is True:
y_ro += e_l
# print(filter(str.isdigit, str(y_r)))
enfile_yr = wdir + '/inputen.nc'
tasfile_yr = wdir + '/tas_yr.nc'
tadiag_file = wdir + '/ta_filled.nc'
ncfile = wdir + '/fourier_coeff.nc'
cdo.selyear(
y_ro, input=energy3_file, options='-b F32', output=enfile_yr)
cdo.selyear(y_ro, input=tas_file, options='-b F32', output=tasfile_yr)
fourc.fourier_coeff(tadiag_file, ncfile, enfile_yr, tasfile_yr)
diagfile = (ldir + '/{}_{}_lec_diagram.png'.format(model, y_ro))
logfile = (ldir + '/{}_{}_lec_table.txt'.format(model, y_ro))
lect[y_i] = lorenz(wdir, model, y_ro, ncfile, diagfile, logfile)
y_i = y_i + 1
os.remove(enfile_yr)
os.remove(tasfile_yr)
os.remove(tadiag_file)
os.remove(ncfile)
os.remove(maskorog)
os.remove(ua_file_mask)
os.remove(va_file_mask)
os.remove(energy3_file)
return lect
def stabil(ta_gmn, p_l, nlev):
"""Compute the stability parameter from temp. and pressure levels.
Arguments
- ta_gmn: a temperature vertical profile;
- p_l: the vertical levels;
- nlev: the number of vertical levels;
"""
cpdr = CP / R
t_g = ta_gmn
g_s = np.zeros(nlev)
for i_l in range(nlev):
if i_l == 0:
dtdp = (t_g[i_l + 1] - t_g[i_l]) / (p_l[i_l + 1] - p_l[i_l])
elif i_l == nlev - 1:
dtdp = (t_g[i_l] - t_g[i_l - 1]) / (p_l[i_l] - p_l[i_l - 1])
else:
dtdp1 = (t_g[i_l + 1] - t_g[i_l]) / (p_l[i_l + 1] - p_l[i_l])
dtdp2 = (t_g[i_l] - t_g[i_l - 1]) / (p_l[i_l] - p_l[i_l - 1])
dtdp = (
(dtdp1 * (p_l[i_l] - p_l[i_l - 1]) + dtdp2 *
(p_l[i_l + 1] - p_l[i_l])) / (p_l[i_l + 1] - p_l[i_l - 1]))
g_s[i_l] = CP / (t_g[i_l] - p_l[i_l] * dtdp * cpdr)
return g_s
def table(varin, ntp, name, logfile, flag):
"""Write global and hem. storage terms to .txt table.
Arguments:
- varin: the variable to be printed out;
- ntp: the number of wavenumbers;
- name: the name of the variable to be printed out;
- logfile: the filename of the .txt where the variable is printed out;
- flag: a flag for NH, SH, global;
"""
if flag is True:
fac = 1e5
varin = fac * varin
varzon = varin[:, 0]
vared = np.nansum(varin[:, 1:ntp - 1], axis=1)
vared1 = np.nansum(varin[:, 1:NW_1 - 1], axis=1)
vared2 = np.nansum(varin[:, NW_1:NW_2 - 1], axis=1)
vared3 = np.nansum(varin[:, NW_2:NW_3 - 1], axis=1)
vared_tog = [vared, vared1, vared2, vared3]
write_to_tab(logfile, name, vared_tog, varzon)
def varatts(w_nc_var, varname, tres, vres):
"""Add attibutes to the variables, depending on name and time res.
Arguments:
- w_nc_var: a variable object;
- varname: the name of the variable, among ta, ua, va and wap;
- tres: the time resolution (daily or annual);
- vres: the vertical resolution (pressure levels or vert. integr.).
@author: <NAME> (2014), modified by <NAME> (2018).
"""
if tres == 0:
tatt = "Daily\nM"
elif tres == 1:
tatt = "Annual mean\nM"
if vres == 0:
vatt = "Pressure levels\n"
elif vres == 1:
vatt = "Vertically integrated\n"
if varname == 'a':
w_nc_var.setncatts({
'long_name': "Available Potential Energy",
'units': "W m-2",
'level_desc': vatt,
'var_desc': "APE -> KE",
'statistic': tatt
})
elif varname == 'ek':
w_nc_var.setncatts({
'long_name': "Kinetic Energy",
'units': "W m-2",
'level_desc': vatt,
'var_desc': "APE -> KE",
'statistic': tatt
})
elif varname == 'a2k':
w_nc_var.setncatts({
'long_name': "Conversion between APE and KE",
'units': "W m-2",
'level_desc': vatt,
'var_desc': "APE <-> KE",
'statistic': tatt
})
elif varname == 'k':
w_nc_var.setncatts({
'long_name': "Kinetic Energy",
'units': "W m-2",
'level_desc': vatt,
'var_desc': "APE -> KE",
'statistic': tatt
})
def weights(lev, nlev, lat):
"""Compute weigths for vertical integration and meridional averages.
Arguments:
- lev: the pressure levels;
- nlev: the number of pressure levels;
- lat: the latitudes in degrees;
- nlat: the number of latitudinal gridsteps;
"""
# Compute sigma level and dsigma
sig = lev / PS
d_s = np.zeros(nlev)
for j_l in range(1, nlev - 1, 1):
d_s[j_l] = 0.5 * abs(sig[j_l + 1] - sig[j_l - 1])
d_s[0] = sig[0] + 0.5 * abs(sig[1] - sig[0])
d_s[nlev -
1] = 1 - sig[nlev - 1] + 0.5 * abs(sig[nlev - 1] - sig[nlev - 2])
# Compute Gaussian weights
y_l = np.zeros(lat.shape)
np.deg2rad(lat, out=y_l)
g_w = np.cos(y_l)
return d_s, y_l, g_w
def write_to_tab(logfile, name, vared, varzon):
"""Specify the formats for table entries.
Arguments:
- log: the logfile where the entries must be written;
- name: the name of the variable;
- vared: a list of arrays containing the overall eddy components, the LW,
the SW and the KW components;
- varzon: an array containing the zonal mean component;
"""
vartot = varzon + vared[0]
with open(logfile, 'a+') as log:
log.write(' {} TOTAL {: 4.3f} {: 4.3f} {: 4.3f}\n'.format(
name, vartot[0], vartot[1], vartot[2]))
log.write('--------------------------------------\n')
log.write(' {} ZONAL {: 4.3f} {: 4.3f} {: 4.3f}\n'.format(
name, varzon[0], varzon[1], varzon[2]))
log.write('--------------------------------------\n')
log.write(' {} EDDY {: 4.3f} {: 4.3f} {: 4.3f}\n'.format(
name, vared[0][0], vared[0][1], vared[0][2]))
log.write('--------------------------------------\n')
log.write(' {} EDDY(LW) {: 4.3f} {: 4.3f} {: 4.3f}\n'.format(
name, vared[1][0], vared[1][1], vared[1][2]))
log.write('--------------------------------------\n')
log.write(' {} EDDY(SW) {: 4.3f} {: 4.3f} {: 4.3f}\n'.format(
name, vared[2][0], vared[2][1], vared[2][2]))
log.write('--------------------------------------\n')
log.write(' {} EDDY(KW) {: 4.3f} {: 4.3f} {: 4.3f}\n'.format(
name, vared[3][0], vared[3][1], vared[3][2]))
log.write('--------------------------------------\n')
| [
"numpy.fft.irfft",
"math.sqrt",
"numpy.nanmean",
"sys.exit",
"numpy.imag",
"numpy.arange",
"os.remove",
"numpy.fft.fft",
"netCDF4.Dataset",
"numpy.fft.rfft",
"numpy.real",
"cdo.Cdo",
"numpy.conj",
"numpy.deg2rad",
"numpy.cos",
"numpy.shape",
"numpy.nansum",
"numpy.transpose",
"nu... | [((2768, 2827), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""ComplexWarning"""'}), "('ignore', message='ComplexWarning')\n", (2791, 2827), False, 'import warnings\n'), ((3801, 3825), 'numpy.nanmean', 'np.nanmean', (['ta_c'], {'axis': '(1)'}), '(ta_c, axis=1)\n', (3811, 3825), True, 'import numpy as np\n'), ((3883, 3907), 'numpy.nanmean', 'np.nanmean', (['ua_c'], {'axis': '(1)'}), '(ua_c, axis=1)\n', (3893, 3907), True, 'import numpy as np\n'), ((3921, 3945), 'numpy.nanmean', 'np.nanmean', (['va_c'], {'axis': '(1)'}), '(va_c, axis=1)\n', (3931, 3945), True, 'import numpy as np\n'), ((3960, 3985), 'numpy.nanmean', 'np.nanmean', (['wap_c'], {'axis': '(1)'}), '(wap_c, axis=1)\n', (3970, 3985), True, 'import numpy as np\n'), ((4075, 4097), 'numpy.zeros', 'np.zeros', (['[nlev, nlat]'], {}), '([nlev, nlat])\n', (4083, 4097), True, 'import numpy as np\n'), ((4238, 4276), 'numpy.zeros', 'np.zeros', (['[nlev, ntime, nlat, ntp - 1]'], {}), '([nlev, ntime, nlat, ntp - 1])\n', (4246, 4276), True, 'import numpy as np\n'), ((4287, 4325), 'numpy.zeros', 'np.zeros', (['[nlev, ntime, nlat, ntp - 1]'], {}), '([nlev, ntime, nlat, ntp - 1])\n', (4295, 4325), True, 'import numpy as np\n'), ((4336, 4374), 'numpy.zeros', 'np.zeros', (['[nlev, ntime, nlat, ntp - 1]'], {}), '([nlev, ntime, nlat, ntp - 1])\n', (4344, 4374), True, 'import numpy as np\n'), ((4387, 4425), 'numpy.zeros', 'np.zeros', (['[nlev, ntime, nlat, ntp - 1]'], {}), '([nlev, ntime, nlat, ntp - 1])\n', (4395, 4425), True, 'import numpy as np\n'), ((4438, 4476), 'numpy.zeros', 'np.zeros', (['[nlev, ntime, nlat, ntp - 1]'], {}), '([nlev, ntime, nlat, ntp - 1])\n', (4446, 4476), True, 'import numpy as np\n'), ((4489, 4527), 'numpy.zeros', 'np.zeros', (['[nlev, ntime, nlat, ntp - 1]'], {}), '([nlev, ntime, nlat, ntp - 1])\n', (4497, 4527), True, 'import numpy as np\n'), ((4540, 4578), 'numpy.zeros', 'np.zeros', (['[nlev, ntime, nlat, ntp - 1]'], {}), '([nlev, ntime, nlat, ntp - 1])\n', (4548, 4578), True, 'import numpy as np\n'), ((9348, 9371), 'numpy.nanmean', 'np.nanmean', (['fld'], {'axis': '(1)'}), '(fld, axis=1)\n', (9358, 9371), True, 'import numpy as np\n'), ((10478, 10492), 'numpy.zeros', 'np.zeros', (['kdim'], {}), '(kdim)\n', (10486, 10492), True, 'import numpy as np\n'), ((12836, 12867), 'fluxogram.Fluxogram', 'fluxogram.Fluxogram', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (12855, 12867), False, 'import fluxogram\n'), ((14713, 14726), 'numpy.zeros', 'np.zeros', (['n_y'], {}), '(n_y)\n', (14721, 14726), True, 'import numpy as np\n'), ((14766, 14779), 'numpy.zeros', 'np.zeros', (['n_y'], {}), '(n_y)\n', (14774, 14779), True, 'import numpy as np\n'), ((16011, 16033), 'numpy.zeros', 'np.zeros', (['[3, ntp - 1]'], {}), '([3, ntp - 1])\n', (16019, 16033), True, 'import numpy as np\n'), ((16150, 16175), 'numpy.zeros', 'np.zeros', (['[nlev, ntp - 1]'], {}), '([nlev, ntp - 1])\n', (16158, 16175), True, 'import numpy as np\n'), ((16188, 16213), 'numpy.zeros', 'np.zeros', (['[nlev, ntp - 1]'], {}), '([nlev, ntp - 1])\n', (16196, 16213), True, 'import numpy as np\n'), ((18419, 18450), 'numpy.transpose', 'np.transpose', (['t_a', '(1, 0, 2, 3)'], {}), '(t_a, (1, 0, 2, 3))\n', (18431, 18450), True, 'import numpy as np\n'), ((18521, 18552), 'numpy.transpose', 'np.transpose', (['u_a', '(1, 0, 2, 3)'], {}), '(u_a, (1, 0, 2, 3))\n', (18533, 18552), True, 'import numpy as np\n'), ((18623, 18654), 'numpy.transpose', 'np.transpose', (['v_a', '(1, 0, 2, 3)'], {}), '(v_a, (1, 0, 2, 3))\n', (18635, 18654), True, 'import numpy as np\n'), ((18725, 18756), 'numpy.transpose', 'np.transpose', (['wap', '(1, 0, 2, 3)'], {}), '(wap, (1, 0, 2, 3))\n', (18737, 18756), True, 'import numpy as np\n'), ((20139, 20157), 'numpy.real', 'np.real', (['(ck1 + ck2)'], {}), '(ck1 + ck2)\n', (20146, 20157), True, 'import numpy as np\n'), ((21958, 21980), 'numpy.zeros', 'np.zeros', (['[nlev, nlat]'], {}), '([nlev, nlat])\n', (21966, 21980), True, 'import numpy as np\n'), ((21992, 22014), 'numpy.zeros', 'np.zeros', (['[nlev, nlat]'], {}), '([nlev, nlat])\n', (22000, 22014), True, 'import numpy as np\n'), ((22025, 22037), 'numpy.real', 'np.real', (['ttt'], {}), '(ttt)\n', (22032, 22037), True, 'import numpy as np\n'), ((22053, 22068), 'numpy.arange', 'np.arange', (['nlev'], {}), '(nlev)\n', (22062, 22068), True, 'import numpy as np\n'), ((23125, 23140), 'numpy.arange', 'np.arange', (['nlat'], {}), '(nlat)\n', (23134, 23140), True, 'import numpy as np\n'), ((24577, 24599), 'numpy.zeros', 'np.zeros', (['[nlev, nlat]'], {}), '([nlev, nlat])\n', (24585, 24599), True, 'import numpy as np\n'), ((24611, 24633), 'numpy.zeros', 'np.zeros', (['[nlev, nlat]'], {}), '([nlev, nlat])\n', (24619, 24633), True, 'import numpy as np\n'), ((24645, 24667), 'numpy.zeros', 'np.zeros', (['[nlev, nlat]'], {}), '([nlev, nlat])\n', (24653, 24667), True, 'import numpy as np\n'), ((24679, 24701), 'numpy.zeros', 'np.zeros', (['[nlev, nlat]'], {}), '([nlev, nlat])\n', (24687, 24701), True, 'import numpy as np\n'), ((24717, 24732), 'numpy.arange', 'np.arange', (['nlev'], {}), '(nlev)\n', (24726, 24732), True, 'import numpy as np\n'), ((26148, 26163), 'numpy.arange', 'np.arange', (['nlat'], {}), '(nlat)\n', (26157, 26163), True, 'import numpy as np\n'), ((27090, 27121), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (27098, 27121), True, 'import numpy as np\n'), ((27132, 27163), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (27140, 27163), True, 'import numpy as np\n'), ((27174, 27205), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (27182, 27205), True, 'import numpy as np\n'), ((27216, 27247), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (27224, 27247), True, 'import numpy as np\n'), ((27258, 27289), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (27266, 27289), True, 'import numpy as np\n'), ((27300, 27331), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (27308, 27331), True, 'import numpy as np\n'), ((27642, 27657), 'numpy.arange', 'np.arange', (['nlat'], {}), '(nlat)\n', (27651, 27657), True, 'import numpy as np\n'), ((28059, 28074), 'numpy.arange', 'np.arange', (['nlev'], {}), '(nlev)\n', (28068, 28074), True, 'import numpy as np\n'), ((28915, 28939), 'numpy.fft.ifft', 'np.fft.ifft', (['t_t'], {'axis': '(2)'}), '(t_t, axis=2)\n', (28926, 28939), True, 'import numpy as np\n'), ((28950, 28974), 'numpy.fft.ifft', 'np.fft.ifft', (['u_t'], {'axis': '(2)'}), '(u_t, axis=2)\n', (28961, 28974), True, 'import numpy as np\n'), ((28985, 29009), 'numpy.fft.ifft', 'np.fft.ifft', (['v_t'], {'axis': '(2)'}), '(v_t, axis=2)\n', (28996, 29009), True, 'import numpy as np\n'), ((29020, 29044), 'numpy.fft.ifft', 'np.fft.ifft', (['wap'], {'axis': '(2)'}), '(wap, axis=2)\n', (29031, 29044), True, 'import numpy as np\n'), ((29115, 29138), 'numpy.fft.fft', 'np.fft.fft', (['tur'], {'axis': '(2)'}), '(tur, axis=2)\n', (29125, 29138), True, 'import numpy as np\n'), ((29149, 29172), 'numpy.fft.fft', 'np.fft.fft', (['tvr'], {'axis': '(2)'}), '(tvr, axis=2)\n', (29159, 29172), True, 'import numpy as np\n'), ((29183, 29206), 'numpy.fft.fft', 'np.fft.fft', (['twr'], {'axis': '(2)'}), '(twr, axis=2)\n', (29193, 29206), True, 'import numpy as np\n'), ((29415, 29446), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (29423, 29446), True, 'import numpy as np\n'), ((29457, 29488), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (29465, 29488), True, 'import numpy as np\n'), ((29499, 29530), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (29507, 29530), True, 'import numpy as np\n'), ((31610, 31631), 'numpy.arange', 'np.arange', (['(0)', '(ntp - 1)'], {}), '(0, ntp - 1)\n', (31619, 31631), True, 'import numpy as np\n'), ((32502, 32533), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (32510, 32533), True, 'import numpy as np\n'), ((32544, 32575), 'numpy.zeros', 'np.zeros', (['[nlev, nlat, ntp - 1]'], {}), '([nlev, nlat, ntp - 1])\n', (32552, 32575), True, 'import numpy as np\n'), ((32587, 32603), 'numpy.zeros', 'np.zeros', (['[nlat]'], {}), '([nlat])\n', (32595, 32603), True, 'import numpy as np\n'), ((32614, 32639), 'numpy.fft.irfft', 'np.fft.irfft', (['u_t'], {'axis': '(2)'}), '(u_t, axis=2)\n', (32626, 32639), True, 'import numpy as np\n'), ((32650, 32675), 'numpy.fft.irfft', 'np.fft.irfft', (['v_t'], {'axis': '(2)'}), '(v_t, axis=2)\n', (32662, 32675), True, 'import numpy as np\n'), ((32746, 32770), 'numpy.fft.rfft', 'np.fft.rfft', (['uur'], {'axis': '(2)'}), '(uur, axis=2)\n', (32757, 32770), True, 'import numpy as np\n'), ((32781, 32805), 'numpy.fft.rfft', 'np.fft.rfft', (['vvr'], {'axis': '(2)'}), '(vvr, axis=2)\n', (32792, 32805), True, 'import numpy as np\n'), ((32816, 32840), 'numpy.fft.rfft', 'np.fft.rfft', (['uvr'], {'axis': '(2)'}), '(uvr, axis=2)\n', (32827, 32840), True, 'import numpy as np\n'), ((33962, 33983), 'numpy.arange', 'np.arange', (['(0)', '(ntp - 1)'], {}), '(0, ntp - 1)\n', (33971, 33983), True, 'import numpy as np\n'), ((34679, 34702), 'numpy.nanmean', 'np.nanmean', (['fld'], {'axis': '(1)'}), '(fld, axis=1)\n', (34689, 34702), True, 'import numpy as np\n'), ((37326, 37331), 'cdo.Cdo', 'Cdo', ([], {}), '()\n', (37329, 37331), False, 'from cdo import Cdo\n'), ((39784, 39803), 'os.remove', 'os.remove', (['maskorog'], {}), '(maskorog)\n', (39793, 39803), False, 'import os\n'), ((39808, 39831), 'os.remove', 'os.remove', (['ua_file_mask'], {}), '(ua_file_mask)\n', (39817, 39831), False, 'import os\n'), ((39836, 39859), 'os.remove', 'os.remove', (['va_file_mask'], {}), '(va_file_mask)\n', (39845, 39859), False, 'import os\n'), ((39864, 39887), 'os.remove', 'os.remove', (['energy3_file'], {}), '(energy3_file)\n', (39873, 39887), False, 'import os\n'), ((40197, 40211), 'numpy.zeros', 'np.zeros', (['nlev'], {}), '(nlev)\n', (40205, 40211), True, 'import numpy as np\n'), ((41318, 41356), 'numpy.nansum', 'np.nansum', (['varin[:, 1:ntp - 1]'], {'axis': '(1)'}), '(varin[:, 1:ntp - 1], axis=1)\n', (41327, 41356), True, 'import numpy as np\n'), ((41370, 41409), 'numpy.nansum', 'np.nansum', (['varin[:, 1:NW_1 - 1]'], {'axis': '(1)'}), '(varin[:, 1:NW_1 - 1], axis=1)\n', (41379, 41409), True, 'import numpy as np\n'), ((41423, 41465), 'numpy.nansum', 'np.nansum', (['varin[:, NW_1:NW_2 - 1]'], {'axis': '(1)'}), '(varin[:, NW_1:NW_2 - 1], axis=1)\n', (41432, 41465), True, 'import numpy as np\n'), ((41479, 41521), 'numpy.nansum', 'np.nansum', (['varin[:, NW_2:NW_3 - 1]'], {'axis': '(1)'}), '(varin[:, NW_2:NW_3 - 1], axis=1)\n', (41488, 41521), True, 'import numpy as np\n'), ((43585, 43599), 'numpy.zeros', 'np.zeros', (['nlev'], {}), '(nlev)\n', (43593, 43599), True, 'import numpy as np\n'), ((43875, 43894), 'numpy.zeros', 'np.zeros', (['lat.shape'], {}), '(lat.shape)\n', (43883, 43894), True, 'import numpy as np\n'), ((43899, 43923), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {'out': 'y_l'}), '(lat, out=y_l)\n', (43909, 43923), True, 'import numpy as np\n'), ((43934, 43945), 'numpy.cos', 'np.cos', (['y_l'], {}), '(y_l)\n', (43940, 43945), True, 'import numpy as np\n'), ((8844, 8865), 'numpy.real', 'np.real', (['x_c[:, :, 0]'], {}), '(x_c[:, :, 0])\n', (8851, 8865), True, 'import numpy as np\n'), ((8880, 8927), 'numpy.nansum', 'np.nansum', (['(xc_ztmn * g_w[np.newaxis, :])'], {'axis': '(1)'}), '(xc_ztmn * g_w[np.newaxis, :], axis=1)\n', (8889, 8927), True, 'import numpy as np\n'), ((8930, 8944), 'numpy.nansum', 'np.nansum', (['g_w'], {}), '(g_w)\n', (8939, 8944), True, 'import numpy as np\n'), ((11497, 11526), 'numpy.nansum', 'np.nansum', (['apes[0, 1:ntp - 1]'], {}), '(apes[0, 1:ntp - 1])\n', (11506, 11526), True, 'import numpy as np\n'), ((11619, 11648), 'numpy.nansum', 'np.nansum', (['apet[0, 1:ntp - 1]'], {}), '(apet[0, 1:ntp - 1])\n', (11628, 11648), True, 'import numpy as np\n'), ((12143, 12171), 'numpy.nansum', 'np.nansum', (['ekt[0, 1:ntp - 1]'], {}), '(ekt[0, 1:ntp - 1])\n', (12152, 12171), True, 'import numpy as np\n'), ((12199, 12227), 'numpy.nansum', 'np.nansum', (['eks[0, 1:ntp - 1]'], {}), '(eks[0, 1:ntp - 1])\n', (12208, 12227), True, 'import numpy as np\n'), ((16770, 16794), 'numpy.nansum', 'np.nansum', (['aux1v'], {'axis': '(0)'}), '(aux1v, axis=0)\n', (16779, 16794), True, 'import numpy as np\n'), ((16797, 16811), 'numpy.nansum', 'np.nansum', (['d_s'], {}), '(d_s)\n', (16806, 16811), True, 'import numpy as np\n'), ((16830, 16854), 'numpy.nansum', 'np.nansum', (['aux2v'], {'axis': '(0)'}), '(aux2v, axis=0)\n', (16839, 16854), True, 'import numpy as np\n'), ((16857, 16871), 'numpy.nansum', 'np.nansum', (['d_s'], {}), '(d_s)\n', (16866, 16871), True, 'import numpy as np\n'), ((17825, 17839), 'netCDF4.Dataset', 'Dataset', (['filep'], {}), '(filep)\n', (17832, 17839), False, 'from netCDF4 import Dataset\n'), ((18200, 18213), 'numpy.shape', 'np.shape', (['t_a'], {}), '(t_a)\n', (18208, 18213), True, 'import numpy as np\n'), ((20087, 20099), 'numpy.conj', 'np.conj', (['u_t'], {}), '(u_t)\n', (20094, 20099), True, 'import numpy as np\n'), ((20116, 20128), 'numpy.conj', 'np.conj', (['v_t'], {}), '(v_t)\n', (20123, 20128), True, 'import numpy as np\n'), ((20183, 20249), 'numpy.real', 'np.real', (['(u_t[:, :, 0] * u_t[:, :, 0] + v_t[:, :, 0] * v_t[:, :, 0])'], {}), '(u_t[:, :, 0] * u_t[:, :, 0] + v_t[:, :, 0] * v_t[:, :, 0])\n', (20190, 20249), True, 'import numpy as np\n'), ((20657, 20744), 'numpy.real', 'np.real', (['((t_t[:, :, 0] - t_g[:, np.newaxis]) * (t_t[:, :, 0] - t_g[:, np.newaxis]))'], {}), '((t_t[:, :, 0] - t_g[:, np.newaxis]) * (t_t[:, :, 0] - t_g[:, np.\n newaxis]))\n', (20664, 20744), True, 'import numpy as np\n'), ((21238, 21350), 'numpy.real', 'np.real', (['(R / p_l[:, np.newaxis] * (t_t[:, :, 0] - t_g[:, np.newaxis]) * (wap[:, :, \n 0] - w_g[:, np.newaxis]))'], {}), '(R / p_l[:, np.newaxis] * (t_t[:, :, 0] - t_g[:, np.newaxis]) * (wap\n [:, :, 0] - w_g[:, np.newaxis]))\n', (21245, 21350), True, 'import numpy as np\n'), ((34772, 34798), 'numpy.nansum', 'np.nansum', (['fld_aux'], {'axis': '(0)'}), '(fld_aux, axis=0)\n', (34781, 34798), True, 'import numpy as np\n'), ((34801, 34815), 'numpy.nansum', 'np.nansum', (['d_s'], {}), '(d_s)\n', (34810, 34815), True, 'import numpy as np\n'), ((35465, 35501), 'netCDF4.Dataset', 'Dataset', (['nc_f', '"""w"""'], {'format': '"""NETCDF4"""'}), "(nc_f, 'w', format='NETCDF4')\n", (35472, 35501), False, 'from netCDF4 import Dataset\n'), ((36463, 36482), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (36472, 36482), False, 'import os\n'), ((39672, 39692), 'os.remove', 'os.remove', (['enfile_yr'], {}), '(enfile_yr)\n', (39681, 39692), False, 'import os\n'), ((39701, 39722), 'os.remove', 'os.remove', (['tasfile_yr'], {}), '(tasfile_yr)\n', (39710, 39722), False, 'import os\n'), ((39731, 39753), 'os.remove', 'os.remove', (['tadiag_file'], {}), '(tadiag_file)\n', (39740, 39753), False, 'import os\n'), ((39762, 39779), 'os.remove', 'os.remove', (['ncfile'], {}), '(ncfile)\n', (39771, 39779), False, 'import os\n'), ((11438, 11469), 'numpy.nansum', 'np.nansum', (['ae2azt[0, 1:ntp - 1]'], {}), '(ae2azt[0, 1:ntp - 1])\n', (11447, 11469), True, 'import numpy as np\n'), ((11562, 11591), 'numpy.nansum', 'np.nansum', (['a2ks[0, 1:ntp - 1]'], {}), '(a2ks[0, 1:ntp - 1])\n', (11571, 11591), True, 'import numpy as np\n'), ((11684, 11713), 'numpy.nansum', 'np.nansum', (['a2kt[0, 1:ntp - 1]'], {}), '(a2kt[0, 1:ntp - 1])\n', (11693, 11713), True, 'import numpy as np\n'), ((11750, 11781), 'numpy.nansum', 'np.nansum', (['ae2azs[0, 1:ntp - 1]'], {}), '(ae2azs[0, 1:ntp - 1])\n', (11759, 11781), True, 'import numpy as np\n'), ((11817, 11847), 'numpy.nansum', 'np.nansum', (['at2as[0, 1:ntp - 1]'], {}), '(at2as[0, 1:ntp - 1])\n', (11826, 11847), True, 'import numpy as np\n'), ((12263, 12294), 'numpy.nansum', 'np.nansum', (['ke2kzt[0, 1:ntp - 1]'], {}), '(ke2kzt[0, 1:ntp - 1])\n', (12272, 12294), True, 'import numpy as np\n'), ((12331, 12361), 'numpy.nansum', 'np.nansum', (['kt2ks[0, 1:ntp - 1]'], {}), '(kt2ks[0, 1:ntp - 1])\n', (12340, 12361), True, 'import numpy as np\n'), ((12397, 12428), 'numpy.nansum', 'np.nansum', (['ke2kzs[0, 1:ntp - 1]'], {}), '(ke2kzs[0, 1:ntp - 1])\n', (12406, 12428), True, 'import numpy as np\n'), ((15390, 15433), 'sys.exit', 'sys.exit', (['"""*** no convergence in gauaw ***"""'], {}), "('*** no convergence in gauaw ***')\n", (15398, 15433), False, 'import sys\n'), ((23181, 23204), 'numpy.real', 'np.real', (['ttt[:, i_l, 0]'], {}), '(ttt[:, i_l, 0])\n', (23188, 23204), True, 'import numpy as np\n'), ((23223, 23250), 'numpy.real', 'np.real', (['ttt[:, i_l + 1, 0]'], {}), '(ttt[:, i_l + 1, 0])\n', (23230, 23250), True, 'import numpy as np\n'), ((29224, 29254), 'numpy.conj', 'np.conj', (['ttt[:, :, np.newaxis]'], {}), '(ttt[:, :, np.newaxis])\n', (29231, 29254), True, 'import numpy as np\n'), ((29292, 29304), 'numpy.conj', 'np.conj', (['t_u'], {}), '(t_u)\n', (29299, 29304), True, 'import numpy as np\n'), ((29323, 29353), 'numpy.conj', 'np.conj', (['ttt[:, :, np.newaxis]'], {}), '(ttt[:, :, np.newaxis])\n', (29330, 29353), True, 'import numpy as np\n'), ((29391, 29403), 'numpy.conj', 'np.conj', (['t_w'], {}), '(t_w)\n', (29398, 29403), True, 'import numpy as np\n'), ((30864, 30960), 'numpy.real', 'np.real', (['((ttt[l_l + 1, :, np.newaxis] - ttt[l_l, :, np.newaxis]) / (p_l[l_l + 1] -\n p_l[l_l]))'], {}), '((ttt[l_l + 1, :, np.newaxis] - ttt[l_l, :, np.newaxis]) / (p_l[l_l +\n 1] - p_l[l_l]))\n', (30871, 30960), True, 'import numpy as np\n'), ((32857, 32869), 'numpy.conj', 'np.conj', (['u_t'], {}), '(u_t)\n', (32864, 32869), True, 'import numpy as np\n'), ((32878, 32890), 'numpy.conj', 'np.conj', (['u_u'], {}), '(u_u)\n', (32885, 32890), True, 'import numpy as np\n'), ((32959, 32971), 'numpy.conj', 'np.conj', (['v_t'], {}), '(v_t)\n', (32966, 32971), True, 'import numpy as np\n'), ((32980, 32992), 'numpy.conj', 'np.conj', (['u_u'], {}), '(u_u)\n', (32987, 32992), True, 'import numpy as np\n'), ((33009, 33021), 'numpy.conj', 'np.conj', (['v_t'], {}), '(v_t)\n', (33016, 33021), True, 'import numpy as np\n'), ((33030, 33042), 'numpy.conj', 'np.conj', (['u_v'], {}), '(u_v)\n', (33037, 33042), True, 'import numpy as np\n'), ((33710, 33722), 'numpy.conj', 'np.conj', (['u_u'], {}), '(u_u)\n', (33717, 33722), True, 'import numpy as np\n'), ((33779, 33791), 'numpy.conj', 'np.conj', (['dut'], {}), '(dut)\n', (33786, 33791), True, 'import numpy as np\n'), ((33836, 33848), 'numpy.conj', 'np.conj', (['v_v'], {}), '(v_v)\n', (33843, 33848), True, 'import numpy as np\n'), ((33905, 33917), 'numpy.conj', 'np.conj', (['dvt'], {}), '(dvt)\n', (33912, 33917), True, 'import numpy as np\n'), ((35584, 35603), 'netCDF4.Dataset', 'Dataset', (['filep', '"""r"""'], {}), "(filep, 'r')\n", (35591, 35603), False, 'from netCDF4 import Dataset\n'), ((14839, 14872), 'math.sqrt', 'math.sqrt', (['((n_y + 0.5) ** 2 + c_c)'], {}), '((n_y + 0.5) ** 2 + c_c)\n', (14848, 14872), False, 'import math\n'), ((16558, 16592), 'numpy.nansum', 'np.nansum', (['aux1[l_l, :, :]'], {'axis': '(0)'}), '(aux1[l_l, :, :], axis=0)\n', (16567, 16592), True, 'import numpy as np\n'), ((16595, 16617), 'numpy.nansum', 'np.nansum', (['g_w[0:nhem]'], {}), '(g_w[0:nhem])\n', (16604, 16617), True, 'import numpy as np\n'), ((16668, 16702), 'numpy.nansum', 'np.nansum', (['aux2[l_l, :, :]'], {'axis': '(0)'}), '(aux2[l_l, :, :], axis=0)\n', (16677, 16702), True, 'import numpy as np\n'), ((16705, 16727), 'numpy.nansum', 'np.nansum', (['g_w[0:nhem]'], {}), '(g_w[0:nhem])\n', (16714, 16727), True, 'import numpy as np\n'), ((20596, 20608), 'numpy.conj', 'np.conj', (['t_t'], {}), '(t_t)\n', (20603, 20608), True, 'import numpy as np\n'), ((22109, 22132), 'numpy.real', 'np.real', (['ttt[l_l, :, 0]'], {}), '(ttt[l_l, :, 0])\n', (22116, 22132), True, 'import numpy as np\n'), ((22162, 22189), 'numpy.real', 'np.real', (['ttt[l_l + 1, :, 0]'], {}), '(ttt[l_l + 1, :, 0])\n', (22169, 22189), True, 'import numpy as np\n'), ((23366, 23393), 'numpy.real', 'np.real', (['ttt[:, i_l - 1, 0]'], {}), '(ttt[:, i_l - 1, 0])\n', (23373, 23393), True, 'import numpy as np\n'), ((23412, 23435), 'numpy.real', 'np.real', (['ttt[:, i_l, 0]'], {}), '(ttt[:, i_l, 0])\n', (23419, 23435), True, 'import numpy as np\n'), ((23535, 23562), 'numpy.real', 'np.real', (['ttt[:, i_l - 1, 0]'], {}), '(ttt[:, i_l - 1, 0])\n', (23542, 23562), True, 'import numpy as np\n'), ((23581, 23608), 'numpy.real', 'np.real', (['ttt[:, i_l + 1, 0]'], {}), '(ttt[:, i_l + 1, 0])\n', (23588, 23608), True, 'import numpy as np\n'), ((23725, 23737), 'numpy.conj', 'np.conj', (['t_t'], {}), '(t_t)\n', (23732, 23737), True, 'import numpy as np\n'), ((23746, 23758), 'numpy.conj', 'np.conj', (['v_t'], {}), '(v_t)\n', (23753, 23758), True, 'import numpy as np\n'), ((23784, 23796), 'numpy.conj', 'np.conj', (['t_t'], {}), '(t_t)\n', (23791, 23796), True, 'import numpy as np\n'), ((23805, 23817), 'numpy.conj', 'np.conj', (['wap'], {}), '(wap)\n', (23812, 23817), True, 'import numpy as np\n'), ((24784, 24828), 'numpy.real', 'np.real', (['(utt[l_l + 1, :, 0] - utt[l_l, :, 0])'], {}), '(utt[l_l + 1, :, 0] - utt[l_l, :, 0])\n', (24791, 24828), True, 'import numpy as np\n'), ((24916, 24960), 'numpy.real', 'np.real', (['(vtt[l_l + 1, :, 0] - vtt[l_l, :, 0])'], {}), '(vtt[l_l + 1, :, 0] - vtt[l_l, :, 0])\n', (24923, 24960), True, 'import numpy as np\n'), ((26215, 26259), 'numpy.real', 'np.real', (['(utt[:, i_l + 1, 0] - utt[:, i_l, 0])'], {}), '(utt[:, i_l + 1, 0] - utt[:, i_l, 0])\n', (26222, 26259), True, 'import numpy as np\n'), ((26347, 26391), 'numpy.real', 'np.real', (['(vtt[:, i_l + 1, 0] - vtt[:, i_l, 0])'], {}), '(vtt[:, i_l + 1, 0] - vtt[:, i_l, 0])\n', (26354, 26391), True, 'import numpy as np\n'), ((27356, 27368), 'numpy.conj', 'np.conj', (['u_t'], {}), '(u_t)\n', (27363, 27368), True, 'import numpy as np\n'), ((27377, 27389), 'numpy.conj', 'np.conj', (['u_t'], {}), '(u_t)\n', (27384, 27389), True, 'import numpy as np\n'), ((27415, 27427), 'numpy.conj', 'np.conj', (['v_t'], {}), '(v_t)\n', (27422, 27427), True, 'import numpy as np\n'), ((27436, 27448), 'numpy.conj', 'np.conj', (['u_t'], {}), '(u_t)\n', (27443, 27448), True, 'import numpy as np\n'), ((27474, 27486), 'numpy.conj', 'np.conj', (['v_t'], {}), '(v_t)\n', (27481, 27486), True, 'import numpy as np\n'), ((27495, 27507), 'numpy.conj', 'np.conj', (['v_t'], {}), '(v_t)\n', (27502, 27507), True, 'import numpy as np\n'), ((27533, 27545), 'numpy.conj', 'np.conj', (['wap'], {}), '(wap)\n', (27540, 27545), True, 'import numpy as np\n'), ((27554, 27566), 'numpy.conj', 'np.conj', (['u_t'], {}), '(u_t)\n', (27561, 27566), True, 'import numpy as np\n'), ((27592, 27604), 'numpy.conj', 'np.conj', (['wap'], {}), '(wap)\n', (27599, 27604), True, 'import numpy as np\n'), ((27613, 27625), 'numpy.conj', 'np.conj', (['v_t'], {}), '(v_t)\n', (27620, 27625), True, 'import numpy as np\n'), ((31049, 31145), 'numpy.real', 'np.real', (['((ttt[l_l, :, np.newaxis] - ttt[l_l - 1, :, np.newaxis]) / (p_l[l_l] - p_l[\n l_l - 1]))'], {}), '((ttt[l_l, :, np.newaxis] - ttt[l_l - 1, :, np.newaxis]) / (p_l[l_l] -\n p_l[l_l - 1]))\n', (31056, 31145), True, 'import numpy as np\n'), ((31826, 31844), 'numpy.real', 'np.real', (['(c_2 + c_3)'], {}), '(c_2 + c_3)\n', (31833, 31844), True, 'import numpy as np\n'), ((31900, 31912), 'numpy.real', 'np.real', (['c_6'], {}), '(c_6)\n', (31907, 31912), True, 'import numpy as np\n'), ((33997, 34027), 'numpy.real', 'np.real', (['(c21 + c22 + c41 + c42)'], {}), '(c21 + c22 + c41 + c42)\n', (34004, 34027), True, 'import numpy as np\n'), ((34128, 34146), 'numpy.imag', 'np.imag', (['(c_1 + c_6)'], {}), '(c_1 + c_6)\n', (34135, 34146), True, 'import numpy as np\n'), ((16364, 16389), 'numpy.real', 'np.real', (['d3v[l_l, i_h, :]'], {}), '(d3v[l_l, i_h, :])\n', (16371, 16389), True, 'import numpy as np\n'), ((16440, 16476), 'numpy.real', 'np.real', (['d3v[l_l, i_h + nhem - 1, :]'], {}), '(d3v[l_l, i_h + nhem - 1, :])\n', (16447, 16476), True, 'import numpy as np\n'), ((22320, 22347), 'numpy.real', 'np.real', (['ttt[l_l - 1, :, 0]'], {}), '(ttt[l_l - 1, :, 0])\n', (22327, 22347), True, 'import numpy as np\n'), ((22381, 22404), 'numpy.real', 'np.real', (['ttt[l_l, :, 0]'], {}), '(ttt[l_l, :, 0])\n', (22388, 22404), True, 'import numpy as np\n'), ((22515, 22538), 'numpy.real', 'np.real', (['ttt[l_l, :, 0]'], {}), '(ttt[l_l, :, 0])\n', (22522, 22538), True, 'import numpy as np\n'), ((22568, 22595), 'numpy.real', 'np.real', (['ttt[l_l + 1, :, 0]'], {}), '(ttt[l_l + 1, :, 0])\n', (22575, 22595), True, 'import numpy as np\n'), ((22711, 22738), 'numpy.real', 'np.real', (['ttt[l_l - 1, :, 0]'], {}), '(ttt[l_l - 1, :, 0])\n', (22718, 22738), True, 'import numpy as np\n'), ((25078, 25122), 'numpy.real', 'np.real', (['(utt[l_l, :, 0] - utt[l_l - 1, :, 0])'], {}), '(utt[l_l, :, 0] - utt[l_l - 1, :, 0])\n', (25085, 25122), True, 'import numpy as np\n'), ((25210, 25254), 'numpy.real', 'np.real', (['(vtt[l_l, :, 0] - vtt[l_l - 1, :, 0])'], {}), '(vtt[l_l, :, 0] - vtt[l_l - 1, :, 0])\n', (25217, 25254), True, 'import numpy as np\n'), ((25349, 25393), 'numpy.real', 'np.real', (['(utt[l_l + 1, :, 0] - utt[l_l, :, 0])'], {}), '(utt[l_l + 1, :, 0] - utt[l_l, :, 0])\n', (25356, 25393), True, 'import numpy as np\n'), ((25467, 25511), 'numpy.real', 'np.real', (['(vtt[l_l + 1, :, 0] - vtt[l_l, :, 0])'], {}), '(vtt[l_l + 1, :, 0] - vtt[l_l, :, 0])\n', (25474, 25511), True, 'import numpy as np\n'), ((25585, 25629), 'numpy.real', 'np.real', (['(utt[l_l, :, 0] - utt[l_l - 1, :, 0])'], {}), '(utt[l_l, :, 0] - utt[l_l - 1, :, 0])\n', (25592, 25629), True, 'import numpy as np\n'), ((25703, 25747), 'numpy.real', 'np.real', (['(vtt[l_l, :, 0] - vtt[l_l - 1, :, 0])'], {}), '(vtt[l_l, :, 0] - vtt[l_l - 1, :, 0])\n', (25710, 25747), True, 'import numpy as np\n'), ((26509, 26553), 'numpy.real', 'np.real', (['(utt[:, i_l, 0] - utt[:, i_l - 1, 0])'], {}), '(utt[:, i_l, 0] - utt[:, i_l - 1, 0])\n', (26516, 26553), True, 'import numpy as np\n'), ((26641, 26685), 'numpy.real', 'np.real', (['(vtt[:, i_l, 0] - vtt[:, i_l - 1, 0])'], {}), '(vtt[:, i_l, 0] - vtt[:, i_l - 1, 0])\n', (26648, 26685), True, 'import numpy as np\n'), ((26787, 26835), 'numpy.real', 'np.real', (['(utt[:, i_l + 1, 0] - utt[:, i_l - 1, 0])'], {}), '(utt[:, i_l + 1, 0] - utt[:, i_l - 1, 0])\n', (26794, 26835), True, 'import numpy as np\n'), ((26927, 26975), 'numpy.real', 'np.real', (['(vtt[:, i_l + 1, 0] - vtt[:, i_l - 1, 0])'], {}), '(vtt[:, i_l + 1, 0] - vtt[:, i_l - 1, 0])\n', (26934, 26975), True, 'import numpy as np\n'), ((27825, 27841), 'numpy.tan', 'np.tan', (['lat[i_l]'], {}), '(lat[i_l])\n', (27831, 27841), True, 'import numpy as np\n'), ((27849, 27872), 'numpy.real', 'np.real', (['utt[:, i_l, 0]'], {}), '(utt[:, i_l, 0])\n', (27856, 27872), True, 'import numpy as np\n'), ((29702, 29764), 'numpy.conj', 'np.conj', (['(ttt[:, i_l + 1, np.newaxis] - ttt[:, i_l, np.newaxis])'], {}), '(ttt[:, i_l + 1, np.newaxis] - ttt[:, i_l, np.newaxis])\n', (29709, 29764), True, 'import numpy as np\n'), ((34089, 34107), 'numpy.real', 'np.real', (['(c_1 - c_5)'], {}), '(c_1 - c_5)\n', (34096, 34107), True, 'import numpy as np\n'), ((34207, 34218), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (34213, 34218), True, 'import numpy as np\n'), ((21181, 21193), 'numpy.conj', 'np.conj', (['wap'], {}), '(wap)\n', (21188, 21193), True, 'import numpy as np\n'), ((21196, 21208), 'numpy.conj', 'np.conj', (['t_t'], {}), '(t_t)\n', (21203, 21208), True, 'import numpy as np\n'), ((27948, 27964), 'numpy.tan', 'np.tan', (['lat[i_l]'], {}), '(lat[i_l])\n', (27954, 27964), True, 'import numpy as np\n'), ((27972, 27995), 'numpy.real', 'np.real', (['vtt[:, i_l, 0]'], {}), '(vtt[:, i_l, 0])\n', (27979, 27995), True, 'import numpy as np\n'), ((29820, 29843), 'numpy.conj', 'np.conj', (['t_v[:, i_l, :]'], {}), '(t_v[:, i_l, :])\n', (29827, 29843), True, 'import numpy as np\n'), ((30106, 30168), 'numpy.conj', 'np.conj', (['(ttt[:, i_l, np.newaxis] - ttt[:, i_l - 1, np.newaxis])'], {}), '(ttt[:, i_l, np.newaxis] - ttt[:, i_l - 1, np.newaxis])\n', (30113, 30168), True, 'import numpy as np\n'), ((30498, 30564), 'numpy.conj', 'np.conj', (['(ttt[:, i_l + 1, np.newaxis] - ttt[:, i_l - 1, np.newaxis])'], {}), '(ttt[:, i_l + 1, np.newaxis] - ttt[:, i_l - 1, np.newaxis])\n', (30505, 30564), True, 'import numpy as np\n'), ((34048, 34059), 'numpy.tan', 'np.tan', (['lat'], {}), '(lat)\n', (34054, 34059), True, 'import numpy as np\n'), ((30224, 30247), 'numpy.conj', 'np.conj', (['t_v[:, i_l, :]'], {}), '(t_v[:, i_l, :])\n', (30231, 30247), True, 'import numpy as np\n'), ((30644, 30667), 'numpy.conj', 'np.conj', (['t_v[:, i_l, :]'], {}), '(t_v[:, i_l, :])\n', (30651, 30667), True, 'import numpy as np\n'), ((31694, 31706), 'numpy.imag', 'np.imag', (['c_1'], {}), '(c_1)\n', (31701, 31706), True, 'import numpy as np\n'), ((31724, 31762), 'numpy.cos', 'np.cos', (['lat[np.newaxis, :, np.newaxis]'], {}), '(lat[np.newaxis, :, np.newaxis])\n', (31730, 31762), True, 'import numpy as np\n'), ((31789, 31801), 'numpy.conj', 'np.conj', (['c_5'], {}), '(c_5)\n', (31796, 31801), True, 'import numpy as np\n'), ((31804, 31816), 'numpy.conj', 'np.conj', (['t_w'], {}), '(t_w)\n', (31811, 31816), True, 'import numpy as np\n')] |
import argparse
import collections
import random
import numpy
class ValueRange(collections.Container):
'''A class wrapping a list with some extra functional magic, like head,
tail, init, last, drop, and take.'''
def __init__(self, min=0., max=1., dtype=float):
self.dtype = dtype
if dtype is not int and dtype is not float:
raise TypeError("data type not understood")
self.min_value = dtype(min)
self.max_value = dtype(max)
def __len__(self):
return self.max_value - self.min_value
def __getitem__(self, key):
# Takes a key value between 0 and 1 and converts that into a
# value in the range
# key = numpy.clip(key, self.min_value, self.max_value)
# return self.__rescale(key, self.min_value, self.max_value)
raise IndexError()
def __contains__(self, value):
return (value >= self.min_value) & (value <= self.max_value)
def __rescale(self, value, min_val, max_val):
return value * (max_val - min_val) + min_val
def min(self):
return self.min_value
def max(self):
return self.max_value
def sample_rand(self, size=None):
if self.dtype is int:
return numpy.random.randint(self.min_value, self.max_value + 1, size=size)
else:
return self.__rescale(numpy.random.random(size=size), self.min_value, self.max_value)
def sample_logrand(self, size=None, tau=1.0):
values = numpy.exp(self.__rescale(numpy.random.random(size=size), -34., 0.) / tau)
values = self.__rescale(values, self.min_value, self.max_value)
if size is None:
values = self.dtype(values)
else:
values = numpy.array(values, dtype=self.dtype)
return values
def sample_exprand(self, size=None):
values = numpy.log(self.__rescale(numpy.random.random(size=size), 1., numpy.exp(1.)))
values = self.__rescale(values, self.min_value, self.max_value)
if size is None:
values = self.dtype(values)
else:
values = numpy.array(values, dtype=self.dtype)
return values
def parameter_set(alg_name, **kwargs):
kwargs['prog'] = alg_name
kwargs['conflict_handler'] = 'resolve'
kwargs['add_help'] = False
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument_group(title="optimizable",
description="Algorithm parameters that should/can be optimized. " + \
"Only these parameters are modified during parameter optimizations.")
return parser
def add_parameter(parser, name, min=0., max=1.0, optimize=True, **kwargs):
# All keyword arguments besides min/max should be
# valid keyword args for add_argument.
# If choices specified, try to guess type from its contents
if kwargs.has_key('nargs') and kwargs['nargs'].__class__ is not int:
raise TypeError("Parameters only allowed to have integer number of arguments")
if kwargs.has_key('choices'):
kwargs.setdefault('type', kwargs['choices'][0].__class__)
else:
# Otherwise, default to float
kwargs.setdefault('type', float)
# No choices specified, so generate them based on type
if kwargs['type'] in [int, float]:
value_range = ValueRange(min, max, dtype=kwargs['type'])
kwargs['choices'] = value_range
kwargs['metavar'] = str(min) + ".." + str(max)
elif kwargs['type'] is not bool:
raise TypeError("String typed parameter requires 'choices' argument")
if optimize:
i = map(lambda k: k.title, parser._action_groups).index("optimizable")
parser._action_groups[i].add_argument("--" + name, **kwargs)
else:
parser.add_argument("--" + name, **kwargs)
def remove_parameter(parser, name):
# First find the argument/parameter itself
argument = parser._actions[map(lambda k: k.dest, parser._actions).index(name)]
# Next, remove it from all the _actions lists
parser._remove_action(argument)
# Finally, needs to be removed from the _group_actions list for its group
for grp in parser._action_groups:
try:
grp._group_actions.remove(argument)
except:
pass
def get_optimize_group(parser):
i = map(lambda k: k.title, parser._action_groups).index("optimizable")
return parser._action_groups[i]
def sample_parameter(param):
if param.type is bool:
return bool(random.getrandbits(1))
elif param.choices: # All other cases should be handled by sampling choices
try:
if param.nargs > 1:
return param.choices.sample_rand(size=param.nargs)
else:
return param.choices.sample_rand()
except:
return numpy.random.choice(param.choices)
else:
# This *SHOULDNT* happen, it means it is not a boolean
# but also has empty choices...
raise TypeError("non-boolean parameter must have 'choices' of some container")
def randomize_parameters(parser):
opt_grp = get_optimize_group(parser)
param_samples = []
opt_pnames = set()
for param in opt_grp._group_actions:
param_samples.append((param.dest, sample_parameter(param)))
opt_pnames.add(param.dest)
for param in parser._actions:
if param.dest not in opt_pnames:
opt_pnames.add(param.dest)
param_samples.append((param.dest, param.default))
return param_samples
| [
"argparse.ArgumentParser",
"numpy.random.random",
"numpy.random.choice",
"numpy.exp",
"numpy.array",
"numpy.random.randint",
"random.getrandbits"
] | [((2323, 2356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '(**kwargs)\n', (2346, 2356), False, 'import argparse\n'), ((1243, 1310), 'numpy.random.randint', 'numpy.random.randint', (['self.min_value', '(self.max_value + 1)'], {'size': 'size'}), '(self.min_value, self.max_value + 1, size=size)\n', (1263, 1310), False, 'import numpy\n'), ((1737, 1774), 'numpy.array', 'numpy.array', (['values'], {'dtype': 'self.dtype'}), '(values, dtype=self.dtype)\n', (1748, 1774), False, 'import numpy\n'), ((2105, 2142), 'numpy.array', 'numpy.array', (['values'], {'dtype': 'self.dtype'}), '(values, dtype=self.dtype)\n', (2116, 2142), False, 'import numpy\n'), ((4541, 4562), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (4559, 4562), False, 'import random\n'), ((1359, 1389), 'numpy.random.random', 'numpy.random.random', ([], {'size': 'size'}), '(size=size)\n', (1378, 1389), False, 'import numpy\n'), ((1881, 1911), 'numpy.random.random', 'numpy.random.random', ([], {'size': 'size'}), '(size=size)\n', (1900, 1911), False, 'import numpy\n'), ((1917, 1931), 'numpy.exp', 'numpy.exp', (['(1.0)'], {}), '(1.0)\n', (1926, 1931), False, 'import numpy\n'), ((1516, 1546), 'numpy.random.random', 'numpy.random.random', ([], {'size': 'size'}), '(size=size)\n', (1535, 1546), False, 'import numpy\n'), ((4861, 4895), 'numpy.random.choice', 'numpy.random.choice', (['param.choices'], {}), '(param.choices)\n', (4880, 4895), False, 'import numpy\n')] |
# predict probabilities for counts, given a set of future features
# execute: python predict.py
# This file can be scheduled to run at any time.
# It will generate hourly predictions for the remaining hours in today and N-additional days:
# - current hour - if the job is running at 8:45, the first prediction will be for time between 8AM and 9AM
# - next hours - till the end of the day, so until 23:59
# - and next N-days (so for 2 extra days - 48 additional predictions)
# Import necessary libraries
import logging
import pickle
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from tqdm import tqdm
import requests
import os
import json
# Define constants to use
DATASETS_PATH = '../Datasets'
OBJECT_CLASSES = ['person', 'vehicle']
PICKLED_SCALER_FILE = 'final_scaler__%%OBJECT_NAME%%.pickle'
PICKLED_MODEL_FILE = 'final_model__%%OBJECT_NAME%%.pickle'
PREDICTIONS_FILE = 'final_predictions__%%OBJECT_NAME%%.parquet'
N_SAMPLES = 2000 # how many simulations to execute in a Poisson process
PROBA_THRESH = 0.05 # reject probabilities below this threshold
ANOMALY_RATE = 0.007 # when sampling from Poisson, use this quantile to detect anomalies
# Define weather API parameters
API_BASE_URL = 'https://api.darksky.net/forecast'
API_KEY = '3d8137be1b3eb40d88ba1793e47f7071'
LAT, LONG = 51.802931199999996, -8.302591999999999 # camera coordinates
API_HEADERS = {'Accept-Encoding': 'gzip'}
# Define attributes to pull from the API
cur_keys = list(map(str.strip, """summary, precipIntensity, precipProbability, temperature, apparentTemperature,
humidity, windSpeed, windGust, windBearing, cloudCover, uvIndex, visibility""".split(',')))
daily_keys = list(map(str.strip, """summary, sunriseTime, sunsetTime, temperatureHigh, temperatureLow""".split(',')))
def make_ts(year: int, month: int, day: int, hour: int, minute: int, second: int = 0) -> int:
return int(datetime(year, month, day, hour, minute, second).timestamp())
# pull most recent weather data for all hours
def make_api_url(ts: int) -> str:
return f'{API_BASE_URL}/{API_KEY}/{LAT},{LONG},{ts}?exclude=hourly,flags,minutely&units=ca'
def main(n_days: int = 3) -> None:
"""
Create predictions for the next N days (including today)
(meaning hours remaining for today + extra 2 days)
:param n_days: int
:return: None
"""
# start date will be 30 minutes after current hour
start_date = datetime.now().replace(minute=30, second=0)
# end date will be + N-days at 23:45
end_date = (start_date + timedelta(days=n_days-1)).replace(hour=23)
# generate hourly date ranges
idx = pd.date_range(start=start_date, end=end_date, freq='1H')
hourly_df = pd.DataFrame({'dt': idx})
# add date-time related features
hourly_df['date'] = hourly_df['dt'].astype(str).str[0:19]
hourly_df['hour'] = hourly_df['dt'].dt.hour
hourly_df['n_month'] = hourly_df['dt'].dt.month
hourly_df['day_of_week'] = hourly_df['dt'].dt.dayofweek
hourly_df['is_weekend_day'] = (hourly_df['dt'].dt.dayofweek // 5 == 1).astype(int)
# check if cached file with weather already exists, and use it
# (to avoid making expensive API calls)
cached_filename = f'{DATASETS_PATH}/weather_cached_{str(start_date)[:16]}_{str(end_date)[:16]}.csv'
if os.path.isfile(cached_filename):
logging.info(f'Using cached weather file {cached_filename}')
weather_df = pd.read_csv(cached_filename)
else:
logging.info(f'Pulling weather data from Dark Sky API for {n_days} day(s)')
# pull weather data for each hour from API
dark_sky = []
prev_cur_obj = {}
prev_daily_obj = {}
for index, d in tqdm(hourly_df.iterrows(), total=hourly_df.shape[0]):
# extract date-time info from dt object
args = (d['dt'].year, d['dt'].month, d['dt'].day, d['dt'].hour, d['dt'].minute)
ts = make_ts(*args)
# make a call to Dark Sky API
url = make_api_url(ts)
sky_data = requests.get(url, headers=API_HEADERS)
if sky_data.status_code != 200:
logging.error(f'Status code {sky_data.status_code} for url {url}')
exit(1)
sky_data = sky_data.json()
currently = sky_data['currently']
daily = sky_data['daily']
# init object to save
cur_obj = {'dt': d['dt']}
# keep only selected data elements
for k in cur_keys:
if k in currently:
cur_obj[f'cur__{k}'] = currently[k]
prev_cur_obj[k] = currently[k]
else:
cur_obj[f'cur__{k}'] = prev_cur_obj[k]
for k in daily_keys:
if k in daily['data'][0]:
cur_obj[f'daily__{k}'] = daily['data'][0][k]
prev_daily_obj[k] = daily['data'][0][k]
else:
cur_obj[f'daily__{k}'] = prev_daily_obj[k]
dark_sky.append(cur_obj)
logging.info(f'Caching weather data')
weather_df = pd.DataFrame(dark_sky)
weather_df.to_csv(cached_filename, index=False)
# add date-time related features, so we can merge the datasets together
weather_df['dt'] = pd.to_datetime(weather_df['dt'])
weather_df['date'] = weather_df['dt'].astype(str).str[0:19]
# join detections and weather data
logging.info(f'Merging hourly and weather datasets')
merged = hourly_df.merge(weather_df, on=['date'])
assert hourly_df.shape[0] == merged.shape[0]
# generate dataset for prediction
logging.info(f'Preparing features for predictions')
use_cols = ['hour', 'n_month', 'day_of_week', 'is_weekend_day', 'cur__precipIntensity',
'cur__precipProbability', 'cur__apparentTemperature', 'cur__humidity', 'cur__windSpeed',
'cur__uvIndex']
X = merged[use_cols]
# load models for object classes and generate predictions
for ob_class in OBJECT_CLASSES:
logging.info(f'Loading scaler for {ob_class}')
with open(f'{DATASETS_PATH}/{PICKLED_SCALER_FILE.replace("%%OBJECT_NAME%%", ob_class)}', 'rb') as f:
scaler = pickle.load(f)
# scale features using pickled scaler
logging.info(f'Scaling features for {ob_class}')
X_scaled = scaler.transform(X)
logging.info(f'Loading model for {ob_class}')
with open(f'{DATASETS_PATH}/{PICKLED_MODEL_FILE.replace("%%OBJECT_NAME%%", ob_class)}', 'rb') as f:
model = pickle.load(f)
logging.info(f'Generating predictions for {ob_class}')
predictions = model.predict(X_scaled)
logging.info(f'Generating probabilities from predictions for {ob_class}')
predictions_probas = []
expected_counts = []
anomaly_thresholds = []
# sample from Poisson probability distribution for each prediction,
# the assumption is that predictions can be interpreted as the rates
# in a Poisson process (https://en.wikipedia.org/wiki/Poisson_distribution)
for event_rate in predictions:
samples = np.random.poisson(lam=event_rate, size=N_SAMPLES)
# count unique counts from the simulation
numbers, counts = np.unique(samples, return_counts=True)
# generate probabilities from the unique counts
probabilities = counts / counts.sum()
# index with highest value will represent the count to expect
expected_count = np.argmax(probabilities)
expected_counts.append(expected_count)
# filter out weak probabilities
numbers_filtered = numbers[probabilities > PROBA_THRESH]
probabilities_filtered = probabilities[probabilities > PROBA_THRESH]
predictions_probas.append(json.dumps({
'counts': numbers_filtered.tolist(),
'probas': probabilities_filtered.tolist()
}))
# calculate threshold for anomaly detection
anomaly_thresholds.append(np.quantile(samples, 1 - ANOMALY_RATE))
# create a copy of X with added predictions
X_cp = X.copy()
X_cp['pred'] = predictions
X_cp['expected_count'] = expected_counts
X_cp['pred_proba'] = predictions_probas
X_cp['anom_thresh'] = anomaly_thresholds
# export to parquet (for now), ideally this should be persisted
# in the DB, so the results can be analyzed later on,
# parquet is a good choice, as it keeps the data types,
# and pred_proba column contains Python dictionaries
logging.info(f'Saving probabilities for {ob_class}')
X_cp.to_parquet(f'{DATASETS_PATH}/{PREDICTIONS_FILE.replace("%%OBJECT_NAME%%", ob_class)}',
index=False)
if __name__ == '__main__':
logging.basicConfig(format="%(asctime)s.%(msecs)03f %(levelname)s %(message)s",
level=logging.INFO, datefmt="%H:%M:%S")
main()
| [
"logging.basicConfig",
"datetime.datetime",
"numpy.unique",
"pandas.read_csv",
"numpy.random.poisson",
"pickle.load",
"numpy.argmax",
"requests.get",
"os.path.isfile",
"datetime.datetime.now",
"numpy.quantile",
"pandas.date_range",
"pandas.DataFrame",
"datetime.timedelta",
"logging.info"... | [((2627, 2683), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'end': 'end_date', 'freq': '"""1H"""'}), "(start=start_date, end=end_date, freq='1H')\n", (2640, 2683), True, 'import pandas as pd\n'), ((2700, 2725), 'pandas.DataFrame', 'pd.DataFrame', (["{'dt': idx}"], {}), "({'dt': idx})\n", (2712, 2725), True, 'import pandas as pd\n'), ((3296, 3327), 'os.path.isfile', 'os.path.isfile', (['cached_filename'], {}), '(cached_filename)\n', (3310, 3327), False, 'import os\n'), ((5285, 5317), 'pandas.to_datetime', 'pd.to_datetime', (["weather_df['dt']"], {}), "(weather_df['dt'])\n", (5299, 5317), True, 'import pandas as pd\n'), ((5426, 5478), 'logging.info', 'logging.info', (['f"""Merging hourly and weather datasets"""'], {}), "(f'Merging hourly and weather datasets')\n", (5438, 5478), False, 'import logging\n'), ((5625, 5676), 'logging.info', 'logging.info', (['f"""Preparing features for predictions"""'], {}), "(f'Preparing features for predictions')\n", (5637, 5676), False, 'import logging\n'), ((8878, 9006), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s.%(msecs)03f %(levelname)s %(message)s"""', 'level': 'logging.INFO', 'datefmt': '"""%H:%M:%S"""'}), "(format=\n '%(asctime)s.%(msecs)03f %(levelname)s %(message)s', level=logging.INFO,\n datefmt='%H:%M:%S')\n", (8897, 9006), False, 'import logging\n'), ((3337, 3397), 'logging.info', 'logging.info', (['f"""Using cached weather file {cached_filename}"""'], {}), "(f'Using cached weather file {cached_filename}')\n", (3349, 3397), False, 'import logging\n'), ((3419, 3447), 'pandas.read_csv', 'pd.read_csv', (['cached_filename'], {}), '(cached_filename)\n', (3430, 3447), True, 'import pandas as pd\n'), ((3466, 3541), 'logging.info', 'logging.info', (['f"""Pulling weather data from Dark Sky API for {n_days} day(s)"""'], {}), "(f'Pulling weather data from Dark Sky API for {n_days} day(s)')\n", (3478, 3541), False, 'import logging\n'), ((5047, 5084), 'logging.info', 'logging.info', (['f"""Caching weather data"""'], {}), "(f'Caching weather data')\n", (5059, 5084), False, 'import logging\n'), ((5106, 5128), 'pandas.DataFrame', 'pd.DataFrame', (['dark_sky'], {}), '(dark_sky)\n', (5118, 5128), True, 'import pandas as pd\n'), ((6038, 6084), 'logging.info', 'logging.info', (['f"""Loading scaler for {ob_class}"""'], {}), "(f'Loading scaler for {ob_class}')\n", (6050, 6084), False, 'import logging\n'), ((6285, 6333), 'logging.info', 'logging.info', (['f"""Scaling features for {ob_class}"""'], {}), "(f'Scaling features for {ob_class}')\n", (6297, 6333), False, 'import logging\n'), ((6382, 6427), 'logging.info', 'logging.info', (['f"""Loading model for {ob_class}"""'], {}), "(f'Loading model for {ob_class}')\n", (6394, 6427), False, 'import logging\n'), ((6580, 6634), 'logging.info', 'logging.info', (['f"""Generating predictions for {ob_class}"""'], {}), "(f'Generating predictions for {ob_class}')\n", (6592, 6634), False, 'import logging\n'), ((6690, 6763), 'logging.info', 'logging.info', (['f"""Generating probabilities from predictions for {ob_class}"""'], {}), "(f'Generating probabilities from predictions for {ob_class}')\n", (6702, 6763), False, 'import logging\n'), ((8655, 8707), 'logging.info', 'logging.info', (['f"""Saving probabilities for {ob_class}"""'], {}), "(f'Saving probabilities for {ob_class}')\n", (8667, 8707), False, 'import logging\n'), ((2424, 2438), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2436, 2438), False, 'from datetime import datetime, timedelta\n'), ((4027, 4065), 'requests.get', 'requests.get', (['url'], {'headers': 'API_HEADERS'}), '(url, headers=API_HEADERS)\n', (4039, 4065), False, 'import requests\n'), ((6215, 6229), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6226, 6229), False, 'import pickle\n'), ((6556, 6570), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6567, 6570), False, 'import pickle\n'), ((7156, 7205), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'event_rate', 'size': 'N_SAMPLES'}), '(lam=event_rate, size=N_SAMPLES)\n', (7173, 7205), True, 'import numpy as np\n'), ((7291, 7329), 'numpy.unique', 'np.unique', (['samples'], {'return_counts': '(True)'}), '(samples, return_counts=True)\n', (7300, 7329), True, 'import numpy as np\n'), ((7545, 7569), 'numpy.argmax', 'np.argmax', (['probabilities'], {}), '(probabilities)\n', (7554, 7569), True, 'import numpy as np\n'), ((1901, 1949), 'datetime.datetime', 'datetime', (['year', 'month', 'day', 'hour', 'minute', 'second'], {}), '(year, month, day, hour, minute, second)\n', (1909, 1949), False, 'from datetime import datetime, timedelta\n'), ((2539, 2565), 'datetime.timedelta', 'timedelta', ([], {'days': '(n_days - 1)'}), '(days=n_days - 1)\n', (2548, 2565), False, 'from datetime import datetime, timedelta\n'), ((4126, 4192), 'logging.error', 'logging.error', (['f"""Status code {sky_data.status_code} for url {url}"""'], {}), "(f'Status code {sky_data.status_code} for url {url}')\n", (4139, 4192), False, 'import logging\n'), ((8089, 8127), 'numpy.quantile', 'np.quantile', (['samples', '(1 - ANOMALY_RATE)'], {}), '(samples, 1 - ANOMALY_RATE)\n', (8100, 8127), True, 'import numpy as np\n')] |
import sys
import unittest
from random import randint
import numpy as np
import crocoddyl
import pinocchio
from crocoddyl.utils import DDPDerived
class SolverAbstractTestCase(unittest.TestCase):
MODEL = None
SOLVER = None
SOLVER_DER = None
def setUp(self):
# Set up the solvers
self.T = randint(1, 21)
state = self.MODEL.state
self.xs = []
self.us = []
self.xs.append(state.rand())
for i in range(self.T):
self.xs.append(state.rand())
self.us.append(pinocchio.utils.rand(self.MODEL.nu))
self.PROBLEM = crocoddyl.ShootingProblem(self.xs[0], [self.MODEL] * self.T, self.MODEL)
self.PROBLEM_DER = crocoddyl.ShootingProblem(self.xs[0], [self.MODEL] * self.T, self.MODEL)
self.solver = self.SOLVER(self.PROBLEM)
self.solver_der = self.SOLVER_DER(self.PROBLEM_DER)
def test_number_of_nodes(self):
# Check the number of nodes
self.assertEqual(self.T, self.solver.problem.T, "Wrong number of nodes in SOLVER")
self.assertEqual(self.T, self.solver_der.problem.T, "Wrong number of nodes in SOLVER_DER")
def test_solve(self):
# Run maximum 10 iterations in order to boost test analysis
self.solver.solve([], [], 10)
self.solver_der.solve([], [], 10)
for x1, x2 in zip(self.solver.xs, self.solver_der.xs):
self.assertTrue(np.allclose(x1, x2, atol=1e-9), "xs doesn't match.")
for u1, u2 in zip(self.solver.us, self.solver_der.us):
self.assertTrue(np.allclose(u1, u2, atol=1e-9), "us doesn't match.")
for k1, k2 in zip(self.solver.k, self.solver_der.k):
self.assertTrue(np.allclose(k1, k2, atol=1e-9), "k doesn't match.")
def test_compute_search_direction(self):
# Compute the direction
self.solver.computeDirection()
self.solver_der.computeDirection()
# Check the LQ model of the Hamiltonian
for qx1, qx2 in zip(self.solver.Qx, self.solver_der.Qx):
self.assertTrue(np.allclose(qx1, qx2, atol=1e-9), "Qx doesn't match.")
for qu1, qu2 in zip(self.solver.Qu, self.solver_der.Qu):
self.assertTrue(np.allclose(qu1, qu2, atol=1e-9), "Qu doesn't match.")
for qxx1, qxx2 in zip(self.solver.Qxx, self.solver_der.Qxx):
self.assertTrue(np.allclose(qxx1, qxx2, atol=1e-9), "Qxx doesn't match.")
for qxu1, qxu2 in zip(self.solver.Qxu, self.solver_der.Qxu):
self.assertTrue(np.allclose(qxu1, qxu2, atol=1e-9), "Quu doesn't match.")
for quu1, quu2 in zip(self.solver.Quu, self.solver_der.Quu):
self.assertTrue(np.allclose(qx1, qx2, atol=1e-9), "Quu doesn't match.")
for vx1, vx2 in zip(self.solver.Vx, self.solver_der.Vx):
self.assertTrue(np.allclose(vx1, vx2, atol=1e-9), "Vx doesn't match.")
for vxx1, vxx2 in zip(self.solver.Vxx, self.solver_der.Vxx):
self.assertTrue(np.allclose(vxx1, vxx2, atol=1e-9), "Vxx doesn't match.")
def test_try_step(self):
# Try a full step and check the improvement in the cost
self.solver.computeDirection()
self.solver_der.computeDirection()
cost = self.solver.tryStep()
costDer = self.solver_der.tryStep()
self.assertAlmostEqual(cost, costDer, 10, "Wrong cost value for full step")
# Try a half step and check the improvement in the cost
cost = self.solver.tryStep(0.5)
costDer = self.solver_der.tryStep(0.5)
self.assertAlmostEqual(cost, costDer, 10, "Wrong cost value for half step")
def test_stopping_criteria(self):
# Run 2 iteration in order to boost test analysis
self.solver.solve([], [], 2)
self.solver_der.solve([], [], 2)
# Compute and check the stopping criteria
stop = self.solver.stoppingCriteria()
stopDer = self.solver_der.stoppingCriteria()
self.assertAlmostEqual(stop, stopDer, 10, "Wrong stopping value")
def test_expected_improvement(self):
# Run 2 iteration in order to boost test analysis
self.solver.solve([], [], 2)
self.solver_der.solve([], [], 2)
expImp = self.solver.expectedImprovement()
expImpDer = self.solver_der.expectedImprovement()
self.assertTrue(np.allclose(expImp, expImpDer, atol=1e-9), "Expected improvement doesn't match.")
class UnicycleDDPTest(SolverAbstractTestCase):
MODEL = crocoddyl.ActionModelUnicycle()
SOLVER = crocoddyl.SolverDDP
SOLVER_DER = DDPDerived
class ManipulatorDDPTest(SolverAbstractTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelManipulator()
STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
COST_SUM = crocoddyl.CostModelSum(STATE, ROBOT_MODEL.nv)
COST_SUM.addCost('xReg', crocoddyl.CostModelState(STATE), 1e-7)
COST_SUM.addCost('uReg', crocoddyl.CostModelControl(STATE), 1e-7)
COST_SUM.addCost(
'frTrack',
crocoddyl.CostModelFramePlacement(
STATE, crocoddyl.FramePlacement(ROBOT_MODEL.getFrameId("effector_body"), pinocchio.SE3.Random())), 1.)
DIFF_MODEL = crocoddyl.DifferentialActionModelFreeFwdDynamics(STATE, COST_SUM)
MODEL = crocoddyl.IntegratedActionModelEuler(crocoddyl.DifferentialActionModelFreeFwdDynamics(STATE, COST_SUM),
1e-3)
SOLVER = crocoddyl.SolverDDP
SOLVER_DER = DDPDerived
if __name__ == '__main__':
test_classes_to_run = [UnicycleDDPTest, ManipulatorDDPTest]
loader = unittest.TestLoader()
suites_list = []
for test_class in test_classes_to_run:
suite = loader.loadTestsFromTestCase(test_class)
suites_list.append(suite)
big_suite = unittest.TestSuite(suites_list)
runner = unittest.TextTestRunner()
results = runner.run(big_suite)
sys.exit(not results.wasSuccessful())
| [
"unittest.TestSuite",
"random.randint",
"numpy.allclose",
"crocoddyl.ShootingProblem",
"crocoddyl.CostModelControl",
"crocoddyl.CostModelState",
"pinocchio.utils.rand",
"pinocchio.SE3.Random",
"crocoddyl.CostModelSum",
"pinocchio.buildSampleModelManipulator",
"crocoddyl.StateMultibody",
"croco... | [((4457, 4488), 'crocoddyl.ActionModelUnicycle', 'crocoddyl.ActionModelUnicycle', ([], {}), '()\n', (4486, 4488), False, 'import crocoddyl\n'), ((4620, 4659), 'pinocchio.buildSampleModelManipulator', 'pinocchio.buildSampleModelManipulator', ([], {}), '()\n', (4657, 4659), False, 'import pinocchio\n'), ((4672, 4709), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (4696, 4709), False, 'import crocoddyl\n'), ((4725, 4770), 'crocoddyl.CostModelSum', 'crocoddyl.CostModelSum', (['STATE', 'ROBOT_MODEL.nv'], {}), '(STATE, ROBOT_MODEL.nv)\n', (4747, 4770), False, 'import crocoddyl\n'), ((5125, 5190), 'crocoddyl.DifferentialActionModelFreeFwdDynamics', 'crocoddyl.DifferentialActionModelFreeFwdDynamics', (['STATE', 'COST_SUM'], {}), '(STATE, COST_SUM)\n', (5173, 5190), False, 'import crocoddyl\n'), ((5529, 5550), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (5548, 5550), False, 'import unittest\n'), ((5722, 5753), 'unittest.TestSuite', 'unittest.TestSuite', (['suites_list'], {}), '(suites_list)\n', (5740, 5753), False, 'import unittest\n'), ((5767, 5792), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (5790, 5792), False, 'import unittest\n'), ((324, 338), 'random.randint', 'randint', (['(1)', '(21)'], {}), '(1, 21)\n', (331, 338), False, 'from random import randint\n'), ((611, 683), 'crocoddyl.ShootingProblem', 'crocoddyl.ShootingProblem', (['self.xs[0]', '([self.MODEL] * self.T)', 'self.MODEL'], {}), '(self.xs[0], [self.MODEL] * self.T, self.MODEL)\n', (636, 683), False, 'import crocoddyl\n'), ((711, 783), 'crocoddyl.ShootingProblem', 'crocoddyl.ShootingProblem', (['self.xs[0]', '([self.MODEL] * self.T)', 'self.MODEL'], {}), '(self.xs[0], [self.MODEL] * self.T, self.MODEL)\n', (736, 783), False, 'import crocoddyl\n'), ((4800, 4831), 'crocoddyl.CostModelState', 'crocoddyl.CostModelState', (['STATE'], {}), '(STATE)\n', (4824, 4831), False, 'import crocoddyl\n'), ((4868, 4901), 'crocoddyl.CostModelControl', 'crocoddyl.CostModelControl', (['STATE'], {}), '(STATE)\n', (4894, 4901), False, 'import crocoddyl\n'), ((5240, 5305), 'crocoddyl.DifferentialActionModelFreeFwdDynamics', 'crocoddyl.DifferentialActionModelFreeFwdDynamics', (['STATE', 'COST_SUM'], {}), '(STATE, COST_SUM)\n', (5288, 5305), False, 'import crocoddyl\n'), ((4314, 4356), 'numpy.allclose', 'np.allclose', (['expImp', 'expImpDer'], {'atol': '(1e-09)'}), '(expImp, expImpDer, atol=1e-09)\n', (4325, 4356), True, 'import numpy as np\n'), ((551, 586), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['self.MODEL.nu'], {}), '(self.MODEL.nu)\n', (571, 586), False, 'import pinocchio\n'), ((1421, 1452), 'numpy.allclose', 'np.allclose', (['x1', 'x2'], {'atol': '(1e-09)'}), '(x1, x2, atol=1e-09)\n', (1432, 1452), True, 'import numpy as np\n'), ((1565, 1596), 'numpy.allclose', 'np.allclose', (['u1', 'u2'], {'atol': '(1e-09)'}), '(u1, u2, atol=1e-09)\n', (1576, 1596), True, 'import numpy as np\n'), ((1707, 1738), 'numpy.allclose', 'np.allclose', (['k1', 'k2'], {'atol': '(1e-09)'}), '(k1, k2, atol=1e-09)\n', (1718, 1738), True, 'import numpy as np\n'), ((2060, 2093), 'numpy.allclose', 'np.allclose', (['qx1', 'qx2'], {'atol': '(1e-09)'}), '(qx1, qx2, atol=1e-09)\n', (2071, 2093), True, 'import numpy as np\n'), ((2208, 2241), 'numpy.allclose', 'np.allclose', (['qu1', 'qu2'], {'atol': '(1e-09)'}), '(qu1, qu2, atol=1e-09)\n', (2219, 2241), True, 'import numpy as np\n'), ((2360, 2395), 'numpy.allclose', 'np.allclose', (['qxx1', 'qxx2'], {'atol': '(1e-09)'}), '(qxx1, qxx2, atol=1e-09)\n', (2371, 2395), True, 'import numpy as np\n'), ((2515, 2550), 'numpy.allclose', 'np.allclose', (['qxu1', 'qxu2'], {'atol': '(1e-09)'}), '(qxu1, qxu2, atol=1e-09)\n', (2526, 2550), True, 'import numpy as np\n'), ((2670, 2703), 'numpy.allclose', 'np.allclose', (['qx1', 'qx2'], {'atol': '(1e-09)'}), '(qx1, qx2, atol=1e-09)\n', (2681, 2703), True, 'import numpy as np\n'), ((2819, 2852), 'numpy.allclose', 'np.allclose', (['vx1', 'vx2'], {'atol': '(1e-09)'}), '(vx1, vx2, atol=1e-09)\n', (2830, 2852), True, 'import numpy as np\n'), ((2971, 3006), 'numpy.allclose', 'np.allclose', (['vxx1', 'vxx2'], {'atol': '(1e-09)'}), '(vxx1, vxx2, atol=1e-09)\n', (2982, 3006), True, 'import numpy as np\n'), ((5078, 5100), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (5098, 5100), False, 'import pinocchio\n')] |
#!/usr/bin/env python3
#title : poisson.py
#description : Poisson distribution.
#author : <NAME>
#date : 2015.06.19
#version : 0.1
#usage : python poisson.py
#=====================================================
import numpy as np
from scipy import special as sp
from scipy import stats
from mpmath import ln
from core import core as co
class Poisson(co.RealDistribution):
"""
Poisson distribution:
Poisson(x) = lambda^x * exp(-lambda) / x!.
If lambda is too small, it is replaced by a delta function.
"""
@staticmethod
def pmf(params, domain=co.DEFAULT_PDF_MAX):
"""
Probability mass function.
:param params: a one element list containing the shape (lambda) parameter.
:param domain: maximum of the domain.
:return: probability mass function.
"""
if params[0] < co.EPSILON:
return co.delta.pmf([0], domain)
else:
return stats.poisson.pmf(np.arange(0, domain+1), params[0])
@staticmethod
def samples(params, size=co.DEFAULT_SAMPLE_SIZE, domain=co.DEFAULT_SAMPLE_MAX):
"""
Returns samples with Poisson distribution.
:param params: a one element list containing the shape (lambda) parameter.
:param size: number of samples.
:param domain: unused.
:return: samples.
"""
if params[0] < co.EPSILON:
return co.delta.samples([0], size)
else:
return np.random.poisson(params[0], size)
@staticmethod
def log_likelihood(params, data, nonzero_only=False):
"""
Calculates the log-likelihood on the data.
The factorial is approximated by using Stirling's formula.
:param params: a one element list containing the shape (lambda) parameter.
:param data: input data as a numpy array.
:param nonzero_only: whether nonzero element should be considered only. This is
used after determining the parameters and comparing to distributions that ignore
zero values.
:return: log-likelihood.
"""
nonzero_samples = data[np.where(data > 0)]
if params[0] < co.EPSILON:
return co.delta.log_likelihood([0], data)
else:
if nonzero_only:
_sampes = data[np.where(data > 0)]
else:
_sampes = data
return np.sum(_sampes)*ln(params[0])\
- len(_sampes)*params[0]\
- 0.5*len(_sampes)*ln(2*np.pi) - np.sum((0.5+nonzero_samples)*np.log(nonzero_samples)-nonzero_samples)\
- np.sum(np.log(1+1/(12*nonzero_samples)+1/(288*np.power(nonzero_samples, 2))))
@staticmethod
def get_params(params):
return "lambda = %.5f" % params[0]
poisson = Poisson() | [
"core.core.delta.log_likelihood",
"numpy.random.poisson",
"numpy.power",
"numpy.where",
"core.core.delta.pmf",
"numpy.log",
"core.core.delta.samples",
"numpy.sum",
"mpmath.ln",
"numpy.arange"
] | [((932, 957), 'core.core.delta.pmf', 'co.delta.pmf', (['[0]', 'domain'], {}), '([0], domain)\n', (944, 957), True, 'from core import core as co\n'), ((1457, 1484), 'core.core.delta.samples', 'co.delta.samples', (['[0]', 'size'], {}), '([0], size)\n', (1473, 1484), True, 'from core import core as co\n'), ((1518, 1552), 'numpy.random.poisson', 'np.random.poisson', (['params[0]', 'size'], {}), '(params[0], size)\n', (1535, 1552), True, 'import numpy as np\n'), ((2169, 2187), 'numpy.where', 'np.where', (['(data > 0)'], {}), '(data > 0)\n', (2177, 2187), True, 'import numpy as np\n'), ((2243, 2277), 'core.core.delta.log_likelihood', 'co.delta.log_likelihood', (['[0]', 'data'], {}), '([0], data)\n', (2266, 2277), True, 'from core import core as co\n'), ((1009, 1033), 'numpy.arange', 'np.arange', (['(0)', '(domain + 1)'], {}), '(0, domain + 1)\n', (1018, 1033), True, 'import numpy as np\n'), ((2352, 2370), 'numpy.where', 'np.where', (['(data > 0)'], {}), '(data > 0)\n', (2360, 2370), True, 'import numpy as np\n'), ((2548, 2561), 'mpmath.ln', 'ln', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2550, 2561), False, 'from mpmath import ln\n'), ((2440, 2455), 'numpy.sum', 'np.sum', (['_sampes'], {}), '(_sampes)\n', (2446, 2455), True, 'import numpy as np\n'), ((2456, 2469), 'mpmath.ln', 'ln', (['params[0]'], {}), '(params[0])\n', (2458, 2469), False, 'from mpmath import ln\n'), ((2591, 2614), 'numpy.log', 'np.log', (['nonzero_samples'], {}), '(nonzero_samples)\n', (2597, 2614), True, 'import numpy as np\n'), ((2697, 2725), 'numpy.power', 'np.power', (['nonzero_samples', '(2)'], {}), '(nonzero_samples, 2)\n', (2705, 2725), True, 'import numpy as np\n')] |
import numpy as np
from collections import Counter
from lidar_interpolate_toolbox import *
from path import Path
from PIL import Image
from imageio import imread
from visdom import Visdom
from color_utils import *
vis = Visdom()
def read_raw_calib_file(filepath):
# From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
"""Read in a calibration file and parse into a dictionary."""
data = {}
with open(filepath, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def generate_depth_map(img, velo, Tr, R_rect, P_rect, depth_size_ratio=1, depth_scale=2.5, img_height=256,
img_width=512,
choose_closest=False):
# compute projection matrix velodyne->image plane
velo2cam = np.vstack((Tr, np.array([0, 0, 0, 1.0])))
R_cam2rect = np.eye(4)
R_cam2rect[:3, :3] = R_rect.reshape(3, 3)
P_rect[0] /= depth_size_ratio
P_rect[1] /= depth_size_ratio
P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
# project the points to the camera
velo_pts_im = np.dot(P_velo2im, velo.T).T
velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, -1:]
# check if in bounds
# use minus 1 to get the exact same value as KITTI matlab code
velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1
velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1
val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
val_inds = val_inds & (velo_pts_im[:, 0] < img_width / depth_size_ratio)
val_inds = val_inds & (velo_pts_im[:, 1] < img_height / depth_size_ratio)
velo_pts_im = velo_pts_im[val_inds, :]
# project to image
depth = np.zeros((img_height // depth_size_ratio, img_width // depth_size_ratio)).astype(
np.float32)
depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]
# find the duplicate points and choose the closest depth
if choose_closest:
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n - 1) + colSub - 1
inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
for dd in dupe_inds:
pts = np.where(inds == dd)[0]
x_loc = int(velo_pts_im[pts[0], 0])
y_loc = int(velo_pts_im[pts[0], 1])
depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
depth[depth < 0] = 0
depth = np.floor((depth - depth.min()) / (depth.max() - depth.min()) * 255) * depth_scale
depth[depth > 255.] = 255
interpolated_lidar = sd_filter(img, depth)
return interpolated_lidar
def extract_data(root, name, img_height=256, img_width=512):
root = Path(root)
img_file = root / 'image_2' / name + '.png'
img = Image.fromarray(imread(img_file))
zoom_y = img_height / img.size[1]
zoom_x = img_width / img.size[0]
img = img.resize((img_width, img_height))
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)
calib_file = root / 'calib' / name + '.txt'
filedata = read_raw_calib_file(calib_file)
P_rect = np.reshape(filedata['P2'], (3, 4))
R_rect = filedata['R0_rect'].reshape(3, 3)
P_rect[0] *= zoom_x
P_rect[1] *= zoom_y
Tr = filedata['Tr_velo_to_cam'].reshape(3, 4)
velo_name = root / 'velodyne' / name + '.bin'
velo = np.fromfile(velo_name, dtype=np.float32).reshape(-1, 4)
velo[:, 3] = 1
velo = velo[velo[:, 0] >= 0, :]
depth = generate_depth_map(img, velo, Tr, R_rect, P_rect)
vis.images(
array2color(depth, max_value=None,
colormap='magma'), win='d', opts=dict(title='d'))
return array2color(depth, max_value=None, colormap='magma')
if __name__ == '__main__':
root = Path('./data/object/testing')
imgs = root / 'image_2'
files = imgs.files('*.png')
for f in files:
depth = extract_data(root, f.stem)
if 'float' in str(depth.dtype):
if depth.max() <= 1:
depth = depth * 255.
depth = np.uint8(depth)
depth = np.transpose(depth, (1, 2, 0))
img = Image.fromarray(depth)
img.save(root / 'lidar' / f.stem + '.png')
root = Path('./data/object/training')
imgs = root / 'image_2'
files = imgs.files('*.png')
for f in files:
depth = extract_data(root, f.stem)
if 'float' in str(depth.dtype):
if depth.max() <= 1:
depth = depth * 255.
depth = np.uint8(depth)
depth = np.transpose(depth, (1, 2, 0))
img = Image.fromarray(depth)
img.save(root / 'lidar' / f.stem + '.png')
| [
"numpy.uint8",
"numpy.eye",
"PIL.Image.fromarray",
"numpy.reshape",
"numpy.fromfile",
"numpy.where",
"collections.Counter",
"path.Path",
"numpy.dot",
"numpy.array",
"numpy.zeros",
"imageio.imread",
"numpy.transpose",
"numpy.round",
"visdom.Visdom"
] | [((221, 229), 'visdom.Visdom', 'Visdom', ([], {}), '()\n', (227, 229), False, 'from visdom import Visdom\n'), ((1133, 1142), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1139, 1142), True, 'import numpy as np\n'), ((3074, 3084), 'path.Path', 'Path', (['root'], {}), '(root)\n', (3078, 3084), False, 'from path import Path\n'), ((3466, 3500), 'numpy.reshape', 'np.reshape', (["filedata['P2']", '(3, 4)'], {}), "(filedata['P2'], (3, 4))\n", (3476, 3500), True, 'import numpy as np\n'), ((4116, 4145), 'path.Path', 'Path', (['"""./data/object/testing"""'], {}), "('./data/object/testing')\n", (4120, 4145), False, 'from path import Path\n'), ((4562, 4592), 'path.Path', 'Path', (['"""./data/object/training"""'], {}), "('./data/object/training')\n", (4566, 4592), False, 'from path import Path\n'), ((1281, 1307), 'numpy.dot', 'np.dot', (['P_rect', 'R_cam2rect'], {}), '(P_rect, R_cam2rect)\n', (1287, 1307), True, 'import numpy as np\n'), ((1377, 1402), 'numpy.dot', 'np.dot', (['P_velo2im', 'velo.T'], {}), '(P_velo2im, velo.T)\n', (1383, 1402), True, 'import numpy as np\n'), ((1588, 1615), 'numpy.round', 'np.round', (['velo_pts_im[:, 0]'], {}), '(velo_pts_im[:, 0])\n', (1596, 1615), True, 'import numpy as np\n'), ((1644, 1671), 'numpy.round', 'np.round', (['velo_pts_im[:, 1]'], {}), '(velo_pts_im[:, 1])\n', (1652, 1671), True, 'import numpy as np\n'), ((3159, 3175), 'imageio.imread', 'imread', (['img_file'], {}), '(img_file)\n', (3165, 3175), False, 'from imageio import imread\n'), ((3322, 3335), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3330, 3335), True, 'import numpy as np\n'), ((4431, 4461), 'numpy.transpose', 'np.transpose', (['depth', '(1, 2, 0)'], {}), '(depth, (1, 2, 0))\n', (4443, 4461), True, 'import numpy as np\n'), ((4476, 4498), 'PIL.Image.fromarray', 'Image.fromarray', (['depth'], {}), '(depth)\n', (4491, 4498), False, 'from PIL import Image\n'), ((4878, 4908), 'numpy.transpose', 'np.transpose', (['depth', '(1, 2, 0)'], {}), '(depth, (1, 2, 0))\n', (4890, 4908), True, 'import numpy as np\n'), ((4923, 4945), 'PIL.Image.fromarray', 'Image.fromarray', (['depth'], {}), '(depth)\n', (4938, 4945), False, 'from PIL import Image\n'), ((1088, 1112), 'numpy.array', 'np.array', (['[0, 0, 0, 1.0]'], {}), '([0, 0, 0, 1.0])\n', (1096, 1112), True, 'import numpy as np\n'), ((1977, 2050), 'numpy.zeros', 'np.zeros', (['(img_height // depth_size_ratio, img_width // depth_size_ratio)'], {}), '((img_height // depth_size_ratio, img_width // depth_size_ratio))\n', (1985, 2050), True, 'import numpy as np\n'), ((3710, 3750), 'numpy.fromfile', 'np.fromfile', (['velo_name'], {'dtype': 'np.float32'}), '(velo_name, dtype=np.float32)\n', (3721, 3750), True, 'import numpy as np\n'), ((4399, 4414), 'numpy.uint8', 'np.uint8', (['depth'], {}), '(depth)\n', (4407, 4414), True, 'import numpy as np\n'), ((4846, 4861), 'numpy.uint8', 'np.uint8', (['depth'], {}), '(depth)\n', (4854, 4861), True, 'import numpy as np\n'), ((2593, 2613), 'numpy.where', 'np.where', (['(inds == dd)'], {}), '(inds == dd)\n', (2601, 2613), True, 'import numpy as np\n'), ((2510, 2523), 'collections.Counter', 'Counter', (['inds'], {}), '(inds)\n', (2517, 2523), False, 'from collections import Counter\n')] |
"""
Train a new model on one or across multiple GPUs.
"""
from comet_ml import ExistingExperiment, Experiment
import collections
import math
import random
from getpass import getpass
import keyring
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
progress_bar,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging.meters import AverageMeter, StopwatchMeter
from fairseq.trainer import Trainer
def main(args, config=None, init_distributed=False):
utils.import_user_module(args)
experiment = None
if config:
experiment = ExistingExperiment(
api_key=config["api_key"],
previous_experiment=config["experiment_key"],
auto_output_logging=None,
)
assert (
args.max_tokens is not None or args.max_sentences is not None
), "Must specify batch size either with --max-tokens or --max-sentences"
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
print(args)
if experiment:
experiment.log_parameters(
vars(args), prefix="Device {} :: ".format(args.device_id)
)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=0)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
print(model)
print("| model {}, criterion {}".format(args.arch, criterion.__class__.__name__))
print(
"| num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
if experiment:
experiment.log_parameters(
{
"criterion": criterion.__class__.__name__,
"num. model params": sum(p.numel() for p in model.parameters()),
"num. trained params": sum(
p.numel() for p in model.parameters() if p.requires_grad
),
},
prefix="Device {} :: ".format(args.device_id),
)
# Build trainer
trainer = Trainer(args, task, model, criterion)
print("| training on {} GPUs".format(args.distributed_world_size))
print(
"| max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.max_sentences
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(",")
while (
lr > args.min_lr
and epoch_itr.epoch < max_epoch
and trainer.get_num_updates() < max_update
):
# train for one epoch
train(args, trainer, task, epoch_itr, experiment)
if (
not args.disable_validation
and epoch_itr.epoch % args.validate_interval == 0
):
valid_losses = validate(
args, trainer, task, epoch_itr, valid_subsets, experiment
)
else:
valid_losses = [None]
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
reload_dataset = ":" in getattr(args, "data", "")
# sharded data: get train iterator for next epoch
epoch_itr = trainer.get_train_iterator(
epoch_itr.epoch, load_dataset=reload_dataset
)
train_meter.stop()
print("| done training in {:.1f} seconds".format(train_meter.sum))
if experiment:
experiment.log_metrics(
{"valid_loss": valid_losses[0], "lr": lr},
prefix="Device {} ".format(args.device_id),
)
def train(args, trainer, task, epoch_itr, experiment=None):
"""Train the model for one epoch."""
# Update parameters every N batches
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.epoch >= args.curriculum),
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch, no_progress_bar="simple"
)
extra_meters = collections.defaultdict(lambda: AverageMeter())
valid_subsets = args.valid_subset.split(",")
max_update = args.max_update or math.inf
for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if log_output is None:
continue
# log mid-epoch stats
stats = get_training_stats(trainer)
for k, v in log_output.items():
if k in ["loss", "nll_loss", "ntokens", "nsentences", "sample_size"]:
continue # these are already logged above
if "loss" in k or k == "accuracy":
extra_meters[k].update(v, log_output["sample_size"])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats, tag="train", step=stats["num_updates"])
if experiment:
experiment.log_metrics(
stats, step=stats["num_updates"], prefix="mid_epoch_train"
)
# ignore the first mini-batch in words-per-second and updates-per-second calculation
if i == 0:
trainer.get_meter("wps").reset()
trainer.get_meter("ups").reset()
num_updates = trainer.get_num_updates()
if (
not args.disable_validation
and args.save_interval_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates > 0
):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag="train", step=stats["num_updates"])
if experiment:
experiment.log_metrics(
stats, prefix="end_of_epoch_train", step=stats["num_updates"]
)
# reset training meters
for k in [
"train_loss",
"train_nll_loss",
"wps",
"ups",
"wpb",
"bsz",
"gnorm",
"clip",
]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats["loss"] = trainer.get_meter("train_loss")
if trainer.get_meter("train_nll_loss").count > 0:
nll_loss = trainer.get_meter("train_nll_loss")
stats["nll_loss"] = nll_loss
else:
nll_loss = trainer.get_meter("train_loss")
stats["ppl"] = utils.get_perplexity(nll_loss.avg)
stats["wps"] = trainer.get_meter("wps")
stats["ups"] = trainer.get_meter("ups")
stats["wpb"] = trainer.get_meter("wpb")
stats["bsz"] = trainer.get_meter("bsz")
stats["num_updates"] = trainer.get_num_updates()
stats["lr"] = trainer.get_lr()
stats["gnorm"] = trainer.get_meter("gnorm")
stats["clip"] = trainer.get_meter("clip")
stats["oom"] = trainer.get_meter("oom")
if trainer.get_meter("loss_scale") is not None:
stats["loss_scale"] = trainer.get_meter("loss_scale")
stats["wall"] = round(trainer.get_meter("wall").elapsed_time)
stats["train_wall"] = trainer.get_meter("train_wall")
return stats
def validate(args, trainer, task, epoch_itr, subsets, experiment=None):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens_valid,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(), trainer.get_model().max_positions()
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args,
itr,
epoch_itr.epoch,
prefix="valid on '{}' subset".format(subset),
no_progress_bar="simple",
)
# reset validation loss meters
for k in ["valid_loss", "valid_nll_loss"]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda: AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for k, v in log_output.items():
if k in ["loss", "nll_loss", "ntokens", "nsentences", "sample_size"]:
continue
extra_meters[k].update(v)
# log validation stats
stats = get_valid_stats(trainer, args, extra_meters)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag=subset, step=trainer.get_num_updates())
if experiment:
experiment.log_metrics(
stats, prefix="validation_{}".format(subset), step=stats["num_updates"]
)
valid_losses.append(
stats[args.best_checkpoint_metric].avg
if args.best_checkpoint_metric == "loss"
else stats[args.best_checkpoint_metric]
)
return valid_losses
def get_valid_stats(trainer, args, extra_meters=None):
stats = collections.OrderedDict()
stats["loss"] = trainer.get_meter("valid_loss")
if trainer.get_meter("valid_nll_loss").count > 0:
nll_loss = trainer.get_meter("valid_nll_loss")
stats["nll_loss"] = nll_loss
else:
nll_loss = stats["loss"]
stats["ppl"] = utils.get_perplexity(nll_loss.avg)
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
current_metric = None
if args.best_checkpoint_metric == "loss":
current_metric = stats["loss"].avg
elif args.best_checkpoint_metric in extra_meters:
current_metric = extra_meters[args.best_checkpoint_metric].avg
elif args.best_checkpoint_metric in stats:
current_metric = stats[args.best_checkpoint_metric]
else:
raise ValueError("best_checkpoint_metric not found in logs")
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best, current_metric
)
return stats
def distributed_main(i, args, config=None, start_rank=0):
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = start_rank + i
main(args, config=config, init_distributed=True)
def cli_main():
parser = options.get_training_parser()
parser.add_argument(
"--comet-logging",
action="store_true",
help="Whether to use Comet.ML for logging",
)
args = options.parse_args_and_arch(parser)
logging = getattr(args, "comet_logging", False)
config = None
if logging:
PROJECT = "machine-translation"
if not keyring.get_password("comet", PROJECT):
comet_ml_api_key = getpass("Please enter the comet.ml API key: ")
keyring.set_password("<PASSWORD>", PROJECT, comet_ml_api_key)
else:
comet_ml_api_key = keyring.get_password("co<PASSWORD>", PROJECT)
experiment = Experiment(
api_key=comet_ml_api_key,
project_name="machine-translation",
workspace="machine-translation",
auto_output_logging=None,
)
config = {"api_key": comet_ml_api_key, "experiment_key": experiment.get_key()}
print("Proceeding with Comet.ML logging...")
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, config, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args.device_id, args, config)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = "tcp://localhost:{port}".format(port=port)
args.distributed_rank = None # set based on device id
if max(args.update_freq) > 1 and args.ddp_backend != "no_c10d":
print("| NOTE: you may get better performance with: --ddp-backend=no_c10d")
torch.multiprocessing.spawn(
fn=distributed_main, args=(args, config), nprocs=args.distributed_world_size
)
else:
# single GPU training
main(args, config=config)
if config:
experiment.end()
if __name__ == "__main__":
cli_main()
| [
"comet_ml.Experiment",
"fairseq.options.parse_args_and_arch",
"torch.cuda.device_count",
"comet_ml.ExistingExperiment",
"torch.cuda.is_available",
"fairseq.options.get_training_parser",
"fairseq.distributed_utils.infer_init_method",
"fairseq.checkpoint_utils.load_checkpoint",
"keyring.set_password",... | [((550, 580), 'fairseq.utils.import_user_module', 'utils.import_user_module', (['args'], {}), '(args)\n', (574, 580), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((1119, 1144), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1133, 1144), True, 'import numpy as np\n'), ((1149, 1177), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1166, 1177), False, 'import torch\n'), ((1284, 1317), 'fairseq.distributed_utils.is_master', 'distributed_utils.is_master', (['args'], {}), '(args)\n', (1311, 1317), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((1611, 1633), 'fairseq.tasks.setup_task', 'tasks.setup_task', (['args'], {}), '(args)\n', (1627, 1633), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((2747, 2784), 'fairseq.trainer.Trainer', 'Trainer', (['args', 'task', 'model', 'criterion'], {}), '(args, task, model, criterion)\n', (2754, 2784), False, 'from fairseq.trainer import Trainer\n'), ((3140, 3187), 'fairseq.checkpoint_utils.load_checkpoint', 'checkpoint_utils.load_checkpoint', (['args', 'trainer'], {}), '(args, trainer)\n', (3172, 3187), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((3372, 3388), 'fairseq.logging.meters.StopwatchMeter', 'StopwatchMeter', ([], {}), '()\n', (3386, 3388), False, 'from fairseq.logging.meters import AverageMeter, StopwatchMeter\n'), ((5299, 5342), 'fairseq.data.iterators.GroupedIterator', 'iterators.GroupedIterator', (['itr', 'update_freq'], {}), '(itr, update_freq)\n', (5324, 5342), False, 'from fairseq.data import iterators\n'), ((5358, 5448), 'fairseq.progress_bar.build_progress_bar', 'progress_bar.build_progress_bar', (['args', 'itr', 'epoch_itr.epoch'], {'no_progress_bar': '"""simple"""'}), "(args, itr, epoch_itr.epoch, no_progress_bar\n ='simple')\n", (5389, 5448), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((7867, 7892), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (7890, 7892), False, 'import collections\n'), ((8171, 8205), 'fairseq.utils.get_perplexity', 'utils.get_perplexity', (['nll_loss.avg'], {}), '(nll_loss.avg)\n', (8191, 8205), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((11403, 11428), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (11426, 11428), False, 'import collections\n'), ((11689, 11723), 'fairseq.utils.get_perplexity', 'utils.get_perplexity', (['nll_loss.avg'], {}), '(nll_loss.avg)\n', (11709, 11723), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((12849, 12878), 'fairseq.options.get_training_parser', 'options.get_training_parser', ([], {}), '()\n', (12876, 12878), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((13029, 13064), 'fairseq.options.parse_args_and_arch', 'options.parse_args_and_arch', (['parser'], {}), '(parser)\n', (13056, 13064), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((640, 762), 'comet_ml.ExistingExperiment', 'ExistingExperiment', ([], {'api_key': "config['api_key']", 'previous_experiment': "config['experiment_key']", 'auto_output_logging': 'None'}), "(api_key=config['api_key'], previous_experiment=config[\n 'experiment_key'], auto_output_logging=None)\n", (658, 762), False, 'from comet_ml import ExistingExperiment, Experiment\n'), ((1025, 1050), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1048, 1050), False, 'import torch\n'), ((1077, 1114), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.device_id'], {}), '(args.device_id)\n', (1098, 1114), False, 'import torch\n'), ((1235, 1275), 'fairseq.distributed_utils.distributed_init', 'distributed_utils.distributed_init', (['args'], {}), '(args)\n', (1269, 1275), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((1327, 1386), 'fairseq.checkpoint_utils.verify_checkpoint_directory', 'checkpoint_utils.verify_checkpoint_directory', (['args.save_dir'], {}), '(args.save_dir)\n', (1371, 1386), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((9116, 9164), 'fairseq.utils.set_torch_seed', 'utils.set_torch_seed', (['args.fixed_validation_seed'], {}), '(args.fixed_validation_seed)\n', (9136, 9164), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((13512, 13647), 'comet_ml.Experiment', 'Experiment', ([], {'api_key': 'comet_ml_api_key', 'project_name': '"""machine-translation"""', 'workspace': '"""machine-translation"""', 'auto_output_logging': 'None'}), "(api_key=comet_ml_api_key, project_name='machine-translation',\n workspace='machine-translation', auto_output_logging=None)\n", (13522, 13647), False, 'from comet_ml import ExistingExperiment, Experiment\n'), ((13897, 13938), 'fairseq.distributed_utils.infer_init_method', 'distributed_utils.infer_init_method', (['args'], {}), '(args)\n', (13932, 13938), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((4231, 4306), 'fairseq.checkpoint_utils.save_checkpoint', 'checkpoint_utils.save_checkpoint', (['args', 'trainer', 'epoch_itr', 'valid_losses[0]'], {}), '(args, trainer, epoch_itr, valid_losses[0])\n', (4263, 4306), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((5510, 5524), 'fairseq.logging.meters.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5522, 5524), False, 'from fairseq.logging.meters import AverageMeter, StopwatchMeter\n'), ((7060, 7135), 'fairseq.checkpoint_utils.save_checkpoint', 'checkpoint_utils.save_checkpoint', (['args', 'trainer', 'epoch_itr', 'valid_losses[0]'], {}), '(args, trainer, epoch_itr, valid_losses[0])\n', (7092, 7135), False, 'from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\n'), ((13207, 13245), 'keyring.get_password', 'keyring.get_password', (['"""comet"""', 'PROJECT'], {}), "('comet', PROJECT)\n", (13227, 13245), False, 'import keyring\n'), ((13278, 13324), 'getpass.getpass', 'getpass', (['"""Please enter the comet.ml API key: """'], {}), "('Please enter the comet.ml API key: ')\n", (13285, 13324), False, 'from getpass import getpass\n'), ((13337, 13398), 'keyring.set_password', 'keyring.set_password', (['"""<PASSWORD>"""', 'PROJECT', 'comet_ml_api_key'], {}), "('<PASSWORD>', PROJECT, comet_ml_api_key)\n", (13357, 13398), False, 'import keyring\n'), ((13444, 13489), 'keyring.get_password', 'keyring.get_password', (['"""co<PASSWORD>"""', 'PROJECT'], {}), "('co<PASSWORD>', PROJECT)\n", (13464, 13489), False, 'import keyring\n'), ((14655, 14683), 'random.randint', 'random.randint', (['(10000)', '(20000)'], {}), '(10000, 20000)\n', (14669, 14683), False, 'import random\n'), ((14997, 15106), 'torch.multiprocessing.spawn', 'torch.multiprocessing.spawn', ([], {'fn': 'distributed_main', 'args': '(args, config)', 'nprocs': 'args.distributed_world_size'}), '(fn=distributed_main, args=(args, config),\n nprocs=args.distributed_world_size)\n', (15024, 15106), False, 'import torch\n'), ((10404, 10418), 'fairseq.logging.meters.AverageMeter', 'AverageMeter', ([], {}), '()\n', (10416, 10418), False, 'from fairseq.logging.meters import AverageMeter, StopwatchMeter\n'), ((14031, 14056), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14054, 14056), False, 'import torch\n'), ((14614, 14639), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14637, 14639), False, 'import torch\n'), ((14358, 14383), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14381, 14383), False, 'import torch\n')] |
from typing import List
import numpy as np
import segmentation_models_pytorch as smp
from segmentation_models_pytorch.base.modules import Activation
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets
from torchvision.transforms import transforms
from baal import ActiveLearningDataset
pascal_voc_ids = np.array([
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
])
def active_pascal(
path="/tmp",
*args,
transform=transforms.ToTensor(),
test_transform=transforms.ToTensor(),
**kwargs,
):
"""Get active Pascal-VOC 2102 datasets.
Arguments:
path : str
The root folder for the Pascal dataset
Returns:
ActiveLearningDataset
the active learning dataset, training data
Dataset
the evaluation dataset
"""
return (
ActiveLearningDataset(datasets.VOCSegmentation(
path, image_set='train', transform=transform, download=False, *args, **kwargs
)),
datasets.VOCSegmentation(path, image_set='val', transform=test_transform, download=False,
*args, **kwargs),
)
class SegmentationHead(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, activation=None, upsampling=1):
dropout = nn.Dropout2d(0.5)
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
padding=kernel_size // 2)
upsampling = nn.UpsamplingBilinear2d(
scale_factor=upsampling) if upsampling > 1 else nn.Identity()
activation = Activation(activation)
super().__init__(dropout, conv2d, upsampling, activation)
def add_dropout(model: smp.Unet, decoder_channels: List[int] = (256, 128, 64, 32, 16),
classes=1, activation=None):
seg_head = SegmentationHead(
in_channels=decoder_channels[-1],
out_channels=classes,
activation=activation,
kernel_size=3,
)
model.add_module('segmentation_head', seg_head)
model.initialize()
class FocalLoss(nn.Module):
"""
References:
Author: clcarwin
Site https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
"""
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)): self.alpha = torch.Tensor([alpha, 1 - alpha])
if isinstance(alpha, list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = logpt.data.exp()
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
select = (target != 0).type(torch.LongTensor).to(self.alpha.device)
at = self.alpha.gather(0, select.data.view(-1))
logpt = logpt * at
loss = -1 * (1 - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
| [
"segmentation_models_pytorch.base.modules.Activation",
"torch.nn.Dropout2d",
"torch.Tensor",
"torch.nn.Conv2d",
"torch.nn.UpsamplingBilinear2d",
"numpy.array",
"torchvision.transforms.transforms.ToTensor",
"torch.nn.functional.log_softmax",
"torch.nn.Identity",
"torchvision.datasets.VOCSegmentatio... | [((358, 682), 'numpy.array', 'np.array', (['[[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, \n 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128,\n 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, \n 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64,\n 128]]'], {}), '([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],\n [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0],\n [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128\n ], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0\n ], [0, 64, 128]])\n', (366, 682), True, 'import numpy as np\n'), ((827, 848), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (846, 848), False, 'from torchvision.transforms import transforms\n'), ((873, 894), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (892, 894), False, 'from torchvision.transforms import transforms\n'), ((1383, 1494), 'torchvision.datasets.VOCSegmentation', 'datasets.VOCSegmentation', (['path', '*args'], {'image_set': '"""val"""', 'transform': 'test_transform', 'download': '(False)'}), "(path, *args, image_set='val', transform=\n test_transform, download=False, **kwargs)\n", (1407, 1494), False, 'from torchvision import datasets\n'), ((1687, 1704), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.5)'], {}), '(0.5)\n', (1699, 1704), False, 'from torch import nn\n'), ((1722, 1814), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'padding': '(kernel_size // 2)'}), '(in_channels, out_channels, kernel_size=kernel_size, padding=\n kernel_size // 2)\n', (1731, 1814), False, 'from torch import nn\n'), ((1978, 2000), 'segmentation_models_pytorch.base.modules.Activation', 'Activation', (['activation'], {}), '(activation)\n', (1988, 2000), False, 'from segmentation_models_pytorch.base.modules import Activation\n'), ((3330, 3357), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (3343, 3357), True, 'from torch.nn import functional as F\n'), ((1247, 1355), 'torchvision.datasets.VOCSegmentation', 'datasets.VOCSegmentation', (['path', '*args'], {'image_set': '"""train"""', 'transform': 'transform', 'download': '(False)'}), "(path, *args, image_set='train', transform=\n transform, download=False, **kwargs)\n", (1271, 1355), False, 'from torchvision import datasets\n'), ((1858, 1906), 'torch.nn.UpsamplingBilinear2d', 'nn.UpsamplingBilinear2d', ([], {'scale_factor': 'upsampling'}), '(scale_factor=upsampling)\n', (1881, 1906), False, 'from torch import nn\n'), ((1943, 1956), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1954, 1956), False, 'from torch import nn\n'), ((2831, 2863), 'torch.Tensor', 'torch.Tensor', (['[alpha, 1 - alpha]'], {}), '([alpha, 1 - alpha])\n', (2843, 2863), False, 'import torch\n'), ((2913, 2932), 'torch.Tensor', 'torch.Tensor', (['alpha'], {}), '(alpha)\n', (2925, 2932), False, 'import torch\n')] |
import pandas as pd
import numpy as np
import json
import csv
import argparse
import re
from collections import OrderedDict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Converts Covid-19 Data \
Tabels provided by the RKI to a simpler format \
to fit the model")
parser.add_argument(
"--source",
nargs=1,
dest="input_csv",
default="../data/raw/COVID19.csv",
help="provide the source csv table")
parser.add_argument(
"--destination",
nargs=1,
dest="output_csv",
default="../data/diseases/covid19.csv",
help="provide the destination file")
args = parser.parse_args()
counties = OrderedDict()
with open("../data/raw/germany_county_shapes.json", "r") as data_file:
shape_data = json.load(data_file)
for idx, val in enumerate(shape_data["features"]):
id_current = val["properties"]["RKI_ID"]
name_current = val["properties"]["RKI_NameDE"]
counties[name_current] = id_current
covid19_data = pd.read_csv(args.input_csv, sep=',')
# this complicated procedure removes timezone information.
regex = re.compile(r"([0-9]+)-([0-9]+)-([0-9]+)T.*")
start_year, start_month, start_day = regex.search(
covid19_data['Meldedatum'].min()).groups()
end_year, end_month, end_day = regex.search(
covid19_data['Meldedatum'].max()).groups()
start_date = pd.Timestamp(
int(start_year), int(start_month), int(start_day))
end_date = pd.Timestamp(int(end_year), int(end_month), int(end_day))
dates = [day for day in pd.date_range(start_date, end_date)]
df = pd.DataFrame(index=dates)
for county_name in counties:
series = np.zeros(len(df), dtype=np.int32)
lk_data = covid19_data[covid19_data['Landkreis'] == county_name]
for (d_id, day) in enumerate(dates):
day_string = "{:04d}-{:02d}-{:02d}T00:00:00.000Z".format(
day.year, day.month, day.day)
cases = np.sum(lk_data[lk_data['Meldedatum']
== day_string]['AnzahlFall'])
if cases > 0:
series[d_id] = cases
df.insert(len(df.columns), counties[county_name], series)
df.to_csv(args.output_csv, sep=",")
| [
"collections.OrderedDict",
"argparse.ArgumentParser",
"re.compile",
"pandas.read_csv",
"json.load",
"numpy.sum",
"pandas.DataFrame",
"pandas.date_range"
] | [((168, 355), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Converts Covid-19 Data Tabels provided by the RKI to a simpler format to fit the model"""'}), "(description=\n 'Converts Covid-19 Data Tabels provided by the RKI to a simpler format to fit the model'\n )\n", (191, 355), False, 'import argparse\n'), ((761, 774), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (772, 774), False, 'from collections import OrderedDict\n'), ((1117, 1153), 'pandas.read_csv', 'pd.read_csv', (['args.input_csv'], {'sep': '""","""'}), "(args.input_csv, sep=',')\n", (1128, 1153), True, 'import pandas as pd\n'), ((1230, 1273), 're.compile', 're.compile', (['"""([0-9]+)-([0-9]+)-([0-9]+)T.*"""'], {}), "('([0-9]+)-([0-9]+)-([0-9]+)T.*')\n", (1240, 1273), False, 'import re\n'), ((1719, 1744), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dates'}), '(index=dates)\n', (1731, 1744), True, 'import pandas as pd\n'), ((871, 891), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (880, 891), False, 'import json\n'), ((1673, 1708), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {}), '(start_date, end_date)\n', (1686, 1708), True, 'import pandas as pd\n'), ((2083, 2149), 'numpy.sum', 'np.sum', (["lk_data[lk_data['Meldedatum'] == day_string]['AnzahlFall']"], {}), "(lk_data[lk_data['Meldedatum'] == day_string]['AnzahlFall'])\n", (2089, 2149), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""Usage:
convert.py serial_speakers <input_path> <serie_uri> <annotation_path> <annotated_path> [--map]
"""
from pyannote.core import Annotation, Segment, Timeline
import pyannote.database
from Plumcot import Plumcot
import json
from termcolor import colored
import numpy as np
import re
from docopt import docopt
from pathlib import Path
import affinegap
from scipy.optimize import linear_sum_assignment
DISTANCE_THRESHOLD = 0.5
NA_VALUES = {'', '<NA>'}
def update_labels(annotation, distances):
"""Tag labels with "?" depending on their distance to the reference
"""
for segment, track, label in annotation.itertracks(yield_label=True):
distance = distances.get(segment)
distance = distance.get(label) if distance else None
distance = distance if distance != "<NA>" else None
if distance:
if distance > DISTANCE_THRESHOLD:
annotation[segment, track] = f"?{label}"
return annotation
def serial_speakers_to_RTTM(input_path, serie_uri, annotation_path, annotated_path,
do_mapping=False):
input_path, annotation_path, annotated_path = map(Path, (
input_path, annotation_path, annotated_path))
if annotation_path.exists():
raise ValueError(f"""{annotation_path} already exists.
You probably don't wan't to append any more data to it.""")
if annotated_path.exists():
raise ValueError(f"""{annotated_path} already exists.
You probably don't wan't to append any more data to it.""")
annotation_path.parent.mkdir(exist_ok=True)
annotated_path.parent.mkdir(exist_ok=True)
db = Plumcot()
character_uris = db.get_characters(serie_uri, field="character_uri")
character_names = db.get_characters(serie_uri, field="character_name")
flat_character_uris = {uri for episode in character_uris.values() for uri in episode}
with open(input_path, 'r') as file:
serial_speakers = json.load(file)
unique, counts = get_serial_speaker_names(serial_speakers)
if do_mapping:
mapping = map_names(flat_character_uris, unique, counts)
with open(input_path.parent / f'{serie_uri}.name_mapping.json', 'w') as file:
json.dump(mapping, file, indent=4, sort_keys=True)
return mapping
for season in serial_speakers['seasons']:
season_i = season['id']
for episode in season['episodes']:
episode_i = episode['id']
episode_uri = f"{serie_uri}.Season{season_i:02d}.Episode{episode_i:02d}"
print(f"processing {episode_uri}", end='\r')
annotation, annotated = serial_speaker_to_Annotation(episode, episode_uri,
'speaker')
with open(annotation_path, 'a') as file:
annotation.write_rttm(file)
with open(annotated_path, 'a') as file:
annotated.write_uem(file)
def get_serial_speaker_names(serial_speakers):
"""Get all names in serial speaker annotation.
"""
serial_speaker_names = [segment['speaker'] for season in serial_speakers['seasons']
for episode in season['episodes'] for segment in
episode["data"]["speech_segments"]]
unique, counts = np.unique(serial_speaker_names, return_counts=True)
return unique, counts
def unknown_char(char_name, id_ep):
"""Transforms character name into unknown version."""
return f"{char_name}#unknown#{id_ep}"
def map_names(character_uris, serial_speaker_names, counts):
mapping = {}
for name, count in zip(serial_speaker_names, counts):
norm_name = name.lower().replace(" ", "_")
if norm_name in character_uris:
character_uris.remove(norm_name)
mapping[name] = norm_name
else:
mapping[name] = int(count)
while True:
print(sorted(character_uris), '\n\n')
for name, norm_name in mapping.items():
color = 'green' if isinstance(norm_name, str) else 'white'
print(colored(f"{name} -> {norm_name}", color))
request = input("\nType the name of the character which you want "
"to change normalized name (end to save, stop "
"to skip, unk to unknownize every character that didn't match): ")
# Stop and save
if request == "end" or not request:
break
# Wrong name
if request not in dic_names:
warnings.warn("This name doesn't match with any characters.\n")
else:
prompt = ("\nType the new character's name "
"(unk for unknown character): ")
new_name = input(prompt)
# Unknown character
if new_name == "unk" or not new_name:
new_name = 'unknown'
mapping[request] = new_name
return mapping
def serial_speaker_to_Annotation(serial_speaker, uri=None, modality='speaker'):
"""
Parameters:
-----------
serial_speaker : `dict`
loaded from a serial speaker JSON as defined
in https://figshare.com/articles/TV_Series_Corpus/3471839
uri (uniform resource identifier) : `str`, optional
which identifies the annotation (e.g. episode number)
Default : None
modality : `str`, optional
modality of the annotation as defined in https://github.com/pyannote/pyannote-core
Returns:
--------
annotation: pyannote `Annotation`
for speaker identification/diarization as defined
in https://github.com/pyannote/pyannote-core
annotated: pyannote `Timeline`
representing the annotated parts of the serial_speaker file
Unknown speakers are not considered as annotated
"""
annotation = Annotation(uri, modality)
not_annotated = Timeline(uri=uri)
for segment in serial_speaker["data"]["speech_segments"]:
time = Segment(segment["start"], segment["end"])
speaker_id = segment['speaker'].replace(" ", "_")
annotation[time, speaker_id] = speaker_id
if speaker_id == 'unknown':
not_annotated.add(time)
end = serial_speaker.get("duration", segment["end"])
annotated = not_annotated.gaps(support=Segment(0.0, end))
return annotation, annotated
def gecko_JSON_to_Annotation(gecko_JSON, uri=None, modality='speaker'):
"""
Parameters:
-----------
gecko_JSON : `dict`
loaded from a Gecko-compliant JSON as defined in xml_to_GeckoJSON
uri (uniform resource identifier) : `str`
which identifies the annotation (e.g. episode number)
Default : None
modality : `str`
modality of the annotation as defined in https://github.com/pyannote/pyannote-core
Returns:
--------
annotation: pyannote `Annotation`
for speaker identification/diarization as defined in https://github.com/pyannote/pyannote-core
must_link: pyannote `Annotation`
User annotation
cannot_link: pyannote `Annotation`
User annotation of parts were the labelled speakers **do not** speak
annotated: pyannote `Timeline`
representing the annotated parts of the gecko_JSON file
"""
annotation = Annotation(uri, modality)
must_link = Annotation(uri, modality)
cannot_link = Annotation(uri, f"non-{modality}")
for monologue in gecko_JSON["monologues"]:
segment = Segment(monologue["start"], monologue["end"])
# '@' defined in https://github.com/hbredin/pyannote-db-plumcot/blob/develop/CONTRIBUTING.md#idepisodetxt
# '+' defined in https://github.com/gong-io/gecko/blob/master/app/geckoModule/constants.js#L35
speaker_ids = re.split("@|\+", monologue["speaker"]["id"])
speaker_ids = set(speaker_ids) - NA_VALUES
for speaker_id in speaker_ids: # most of the time there's only one
annotation[segment, speaker_id] = speaker_id
if monologue["speaker"]["annotators"] > 0:
must_link[segment, speaker_id] = speaker_id
non_ids = monologue["speaker"]["non_id"]
non_ids = set(non_ids) - NA_VALUES
for speaker_id in non_ids:
cannot_link[segment, speaker_id] = speaker_id
annotated = Timeline([Segment(0.0, monologue["end"])], uri)
return annotation, must_link, cannot_link, annotated
def annotation_to_GeckoJSON(annotation, distances={}, colors={}):
"""
Parameters:
-----------
annotation: `pyannote.core.Annotation`
proper pyannote annotation for speaker identification/diarization
distances: `dict`, optional
in-cluster distances between speech features
see `get_distances_per_speaker`
colors: `dict`, optional
speaker id : consistent color
Returns:
--------
gecko_json : a JSON `dict` based on the demo file of https://github.com/gong-io/gecko/blob/master/samples/demo.json
should be written to a file using json.dump
"""
gecko_json = json.loads("""{
"schemaVersion" : "3.1",
"monologues" : [ ]
}""")
for segment, track, label in annotation.itertracks(yield_label=True):
distance = distances.get(label, {}).get(segment)
color = colors.get(label)
gecko_json["monologues"].append(
{
"speaker": {
"id": label,
"color": color,
"distance": distance,
"non_id": [],
"annotators": 0
},
"start": segment.start,
"end": segment.end
})
return gecko_json
if __name__ == '__main__':
args = docopt(__doc__)
input_path = args['<input_path>']
serie_uri = args['<serie_uri>']
annotation_path = args['<annotation_path>']
annotated_path = args['<annotated_path>']
do_mapping = args['--map']
serial_speakers_to_RTTM(input_path, serie_uri, annotation_path, annotated_path,
do_mapping)
| [
"Plumcot.Plumcot",
"json.loads",
"pyannote.core.Segment",
"re.split",
"numpy.unique",
"termcolor.colored",
"json.dump",
"pyannote.core.Timeline",
"json.load",
"pyannote.core.Annotation",
"docopt.docopt"
] | [((1716, 1725), 'Plumcot.Plumcot', 'Plumcot', ([], {}), '()\n', (1723, 1725), False, 'from Plumcot import Plumcot\n'), ((3381, 3432), 'numpy.unique', 'np.unique', (['serial_speaker_names'], {'return_counts': '(True)'}), '(serial_speaker_names, return_counts=True)\n', (3390, 3432), True, 'import numpy as np\n'), ((5899, 5924), 'pyannote.core.Annotation', 'Annotation', (['uri', 'modality'], {}), '(uri, modality)\n', (5909, 5924), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((5945, 5962), 'pyannote.core.Timeline', 'Timeline', ([], {'uri': 'uri'}), '(uri=uri)\n', (5953, 5962), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((7341, 7366), 'pyannote.core.Annotation', 'Annotation', (['uri', 'modality'], {}), '(uri, modality)\n', (7351, 7366), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((7383, 7408), 'pyannote.core.Annotation', 'Annotation', (['uri', 'modality'], {}), '(uri, modality)\n', (7393, 7408), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((7427, 7461), 'pyannote.core.Annotation', 'Annotation', (['uri', 'f"""non-{modality}"""'], {}), "(uri, f'non-{modality}')\n", (7437, 7461), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((9107, 9194), 'json.loads', 'json.loads', (['"""{\n "schemaVersion" : "3.1",\n "monologues" : [ ]\n }"""'], {}), '(\n """{\n "schemaVersion" : "3.1",\n "monologues" : [ ]\n }""")\n', (9117, 9194), False, 'import json\n'), ((9791, 9806), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (9797, 9806), False, 'from docopt import docopt\n'), ((2030, 2045), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2039, 2045), False, 'import json\n'), ((6041, 6082), 'pyannote.core.Segment', 'Segment', (["segment['start']", "segment['end']"], {}), "(segment['start'], segment['end'])\n", (6048, 6082), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((7528, 7573), 'pyannote.core.Segment', 'Segment', (["monologue['start']", "monologue['end']"], {}), "(monologue['start'], monologue['end'])\n", (7535, 7573), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((7813, 7858), 're.split', 're.split', (['"""@|\\\\+"""', "monologue['speaker']['id']"], {}), "('@|\\\\+', monologue['speaker']['id'])\n", (7821, 7858), False, 'import re\n'), ((2291, 2341), 'json.dump', 'json.dump', (['mapping', 'file'], {'indent': '(4)', 'sort_keys': '(True)'}), '(mapping, file, indent=4, sort_keys=True)\n', (2300, 2341), False, 'import json\n'), ((6364, 6381), 'pyannote.core.Segment', 'Segment', (['(0.0)', 'end'], {}), '(0.0, end)\n', (6371, 6381), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((8370, 8400), 'pyannote.core.Segment', 'Segment', (['(0.0)', "monologue['end']"], {}), "(0.0, monologue['end'])\n", (8377, 8400), False, 'from pyannote.core import Annotation, Segment, Timeline\n'), ((4162, 4203), 'termcolor.colored', 'colored', (['f"""{name} -> {norm_name}"""', 'color'], {}), "(f'{name} -> {norm_name}', color)\n", (4169, 4203), False, 'from termcolor import colored\n')] |
# -*- coding: utf-8 -*-
"""
Simple ANALYSIS driver for GrainHill model
"""
from landlab.io import load_params
import numpy as np
grain_hill_as_class = reload(grain_hill_as_class)
def two_node_diff(a):
"""Calculate and return diffs over two nodes instead of one."""
N = len(a)
return a[2:] - a[:(N-2)]
dx = 0.1 # assumed node spacing, m
#DAKOTA stuff: setting input files
input_file = 'inputs.txt' #DAKOTA creates this
#INPUT VARIABLES
# read parameter values from file
params = load_params(input_file)
domain_length = 10.0 ** params['number_of_node_columns']
num_cols = int(np.round(domain_length / (dx * 0.866) + 1))
num_rows = int(np.round(0.5 * domain_length / dx))
params['number_of_node_columns'] = num_cols
params['number_of_node_rows'] = num_rows
params['disturbance_rate'] = 10.0 ** params['disturbance_rate']
params['uplift_interval'] = 10.0 ** params['uplift_interval']
# Calculate run duration
#
# Time for the domain to rise by L/2, where L is # of node cols
print('Domain length:')
print(domain_length)
t1 = params['uplift_interval'] * num_cols
print('Time for domain rise:')
print(t1)
# Time to generate, on average, L/2 disturbance events per column
t2 = num_cols / params['disturbance_rate']
print('Time for L/2 disturbances per column:')
print(t2)
# Take the minimum
tt = min(t1, t2)
# Time to have at least two uplift events
t3 = 2 * params['uplift_interval']
# Take the max
params['run_duration'] = max(tt, t3)
if params['run_duration'] > 580000.0:
print('WARNING: something is wrong')
params['run_duration'] = 1.0
print('Run duration used:')
print(params['run_duration'])
params['plot_interval'] = 1.1 * params['run_duration']
params['output_interval'] = params['run_duration']
print('Running grainhill, params:')
print(params)
# instantiate a GrainHill model
grain_hill = grain_hill_as_class.GrainHill((num_rows, num_cols), **params)
#run the model
grain_hill.run()
# compute an write the results
(elev_profile, soil) = grain_hill.get_profile_and_soil_thickness(grain_hill.grid,
grain_hill.ca.node_state)
max_elev = np.amax(elev_profile)
N = len(elev_profile)
mean_grad_left = np.mean(two_node_diff(elev_profile[:((N+1)/2)])/1.73205)
mean_grad_right = np.mean(-two_node_diff(elev_profile[((N+1)/2):])/1.73205)
mean_grad = (mean_grad_left + mean_grad_right) / 2
myfile = open('results.out', 'w')
myfile.write(str(max_elev) + ' ' + str(mean_grad) + '\n')
myfile.close()
| [
"landlab.io.load_params",
"numpy.amax",
"numpy.round"
] | [((499, 522), 'landlab.io.load_params', 'load_params', (['input_file'], {}), '(input_file)\n', (510, 522), False, 'from landlab.io import load_params\n'), ((2131, 2152), 'numpy.amax', 'np.amax', (['elev_profile'], {}), '(elev_profile)\n', (2138, 2152), True, 'import numpy as np\n'), ((596, 638), 'numpy.round', 'np.round', (['(domain_length / (dx * 0.866) + 1)'], {}), '(domain_length / (dx * 0.866) + 1)\n', (604, 638), True, 'import numpy as np\n'), ((655, 689), 'numpy.round', 'np.round', (['(0.5 * domain_length / dx)'], {}), '(0.5 * domain_length / dx)\n', (663, 689), True, 'import numpy as np\n')] |
from sacred import Experiment
import os.path as osp
import os
import numpy as np
import yaml
import cv2
import torch
from torch.utils.data import DataLoader
from tracktor.config import get_output_dir, get_tb_dir
from tracktor.solver import Solver
from tracktor.datasets.factory import Datasets
from tracktor.resnet import resnet50
ex = Experiment()
ex.add_config('experiments/cfgs/siamese.yaml')
Solver = ex.capture(Solver, prefix='siamese.solver')
@ex.automain
def my_main(_config, siamese):
# set all seeds
torch.manual_seed(siamese['seed'])
torch.cuda.manual_seed(siamese['seed'])
np.random.seed(siamese['seed'])
torch.backends.cudnn.deterministic = True
print(_config)
output_dir = osp.join(get_output_dir(siamese['module_name']), siamese['name'])
tb_dir = osp.join(get_tb_dir(siamese['module_name']), siamese['name'])
sacred_config = osp.join(output_dir, 'sacred_config.yaml')
if not osp.exists(output_dir):
os.makedirs(output_dir)
with open(sacred_config, 'w') as outfile:
yaml.dump(_config, outfile, default_flow_style=False)
#########################
# Initialize dataloader #
#########################
print("[*] Initializing Dataloader")
db_train = Datasets(siamese['db_train'], siamese['dataloader'])
db_train = DataLoader(db_train, batch_size=1, shuffle=True)
if siamese['db_val']:
db_val = None
#db_val = DataLoader(db_val, batch_size=1, shuffle=True)
else:
db_val = None
##########################
# Initialize the modules #
##########################
print("[*] Building CNN")
network = resnet50(pretrained=True, **siamese['cnn'])
network.train()
network.cuda()
##################
# Begin training #
##################
print("[*] Solving ...")
# build scheduling like in "In Defense of the Triplet Loss for Person Re-Identification"
# from Hermans et al.
lr = siamese['solver']['optim_args']['lr']
iters_per_epoch = len(db_train)
# we want to keep lr until iter 15000 and from there to iter 25000 a exponential decay
l = eval("lambda epoch: 1 if epoch*{} < 15000 else 0.001**((epoch*{} - 15000)/(25000-15000))".format(
iters_per_epoch, iters_per_epoch))
#else:
# l = None
max_epochs = 25000 // len(db_train.dataset) + 1 if 25000%len(db_train.dataset) else 25000 // len(db_train.dataset)
solver = Solver(output_dir, tb_dir, lr_scheduler_lambda=l)
solver.train(network, db_train, db_val, max_epochs, 100, model_args=siamese['model_args'])
| [
"torch.manual_seed",
"tracktor.datasets.factory.Datasets",
"os.path.exists",
"tracktor.resnet.resnet50",
"os.makedirs",
"yaml.dump",
"tracktor.config.get_tb_dir",
"os.path.join",
"sacred.Experiment",
"tracktor.solver.Solver",
"numpy.random.seed",
"tracktor.config.get_output_dir",
"torch.util... | [((339, 351), 'sacred.Experiment', 'Experiment', ([], {}), '()\n', (349, 351), False, 'from sacred import Experiment\n'), ((522, 556), 'torch.manual_seed', 'torch.manual_seed', (["siamese['seed']"], {}), "(siamese['seed'])\n", (539, 556), False, 'import torch\n'), ((561, 600), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (["siamese['seed']"], {}), "(siamese['seed'])\n", (583, 600), False, 'import torch\n'), ((605, 636), 'numpy.random.seed', 'np.random.seed', (["siamese['seed']"], {}), "(siamese['seed'])\n", (619, 636), True, 'import numpy as np\n'), ((883, 925), 'os.path.join', 'osp.join', (['output_dir', '"""sacred_config.yaml"""'], {}), "(output_dir, 'sacred_config.yaml')\n", (891, 925), True, 'import os.path as osp\n'), ((1250, 1302), 'tracktor.datasets.factory.Datasets', 'Datasets', (["siamese['db_train']", "siamese['dataloader']"], {}), "(siamese['db_train'], siamese['dataloader'])\n", (1258, 1302), False, 'from tracktor.datasets.factory import Datasets\n'), ((1318, 1366), 'torch.utils.data.DataLoader', 'DataLoader', (['db_train'], {'batch_size': '(1)', 'shuffle': '(True)'}), '(db_train, batch_size=1, shuffle=True)\n', (1328, 1366), False, 'from torch.utils.data import DataLoader\n'), ((1651, 1694), 'tracktor.resnet.resnet50', 'resnet50', ([], {'pretrained': '(True)'}), "(pretrained=True, **siamese['cnn'])\n", (1659, 1694), False, 'from tracktor.resnet import resnet50\n'), ((2493, 2542), 'tracktor.solver.Solver', 'Solver', (['output_dir', 'tb_dir'], {'lr_scheduler_lambda': 'l'}), '(output_dir, tb_dir, lr_scheduler_lambda=l)\n', (2499, 2542), False, 'from tracktor.solver import Solver\n'), ((730, 768), 'tracktor.config.get_output_dir', 'get_output_dir', (["siamese['module_name']"], {}), "(siamese['module_name'])\n", (744, 768), False, 'from tracktor.config import get_output_dir, get_tb_dir\n'), ((809, 843), 'tracktor.config.get_tb_dir', 'get_tb_dir', (["siamese['module_name']"], {}), "(siamese['module_name'])\n", (819, 843), False, 'from tracktor.config import get_output_dir, get_tb_dir\n'), ((938, 960), 'os.path.exists', 'osp.exists', (['output_dir'], {}), '(output_dir)\n', (948, 960), True, 'import os.path as osp\n'), ((970, 993), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (981, 993), False, 'import os\n'), ((1048, 1101), 'yaml.dump', 'yaml.dump', (['_config', 'outfile'], {'default_flow_style': '(False)'}), '(_config, outfile, default_flow_style=False)\n', (1057, 1101), False, 'import yaml\n')] |
######################################################################
# #
# Copyright 2009-2020 <NAME>. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys
if sys.version_info.major < 3:
from builtins import zip
from builtins import open
from builtins import int
from builtins import round
from builtins import range
from builtins import super
from future import standard_library
standard_library.install_aliases()
# Python 2 doesn't have the pathlib module.
Path = basestring
else:
from pathlib import Path
# Python 3 doesn't have basestring, as unicode is type string
# Python 2 doesn't equate unicode to string, but both are basestring
# Now isinstance(s, basestring) will be True for any python version
basestring = str
import numpy
import datetime
import struct
import itertools
import colorsys
import warnings
import copy as libcopy
from gdspy.polygon import PolygonSet, Polygon
from gdspy.path import FlexPath, RobustPath
from gdspy.label import Label
from gdspy.gdsiiformat import (
_record_reader,
_raw_record_reader,
_eight_byte_real,
_eight_byte_real_to_float,
)
_mpone = numpy.array((-1.0, 1.0))
use_current_library = True
"""
Globally disable add newly-created cells to the current_library.
"""
class Cell(object):
"""
Collection of polygons, paths, labels and raferences to other cells.
.. deprecated:: 1.5
The parameter `exclude_from_current` has been deprecated
alongside the use of a global library. It will be removed in a
future version of Gdspy.
Parameters
----------
name : string
The name of the cell.
Attributes
----------
name : string
The name of this cell.
polygons : list of `PolygonSet`
List of cell polygons.
paths : list of `RobustPath` or `FlexPath`
List of cell paths.
labels : list of `Label`
List of cell labels.
references : list of `CellReference` or `CellArray`
List of cell references.
"""
__slots__ = (
"name",
"polygons",
"paths",
"labels",
"references",
"_bb_valid",
"_bounding_box",
)
def __init__(self, name, exclude_from_current=False):
self.name = name
self.polygons = []
self.paths = []
self.labels = []
self.references = []
self._bb_valid = False
self._bounding_box = None
if use_current_library and not exclude_from_current:
import gdspy
gdspy.current_library.add(self, include_dependencies=False)
def __str__(self):
return 'Cell ("{}", {} polygons, {} paths, {} labels, {} references)'.format(
self.name,
len(self.polygons),
len(self.paths),
len(self.labels),
len(self.references),
)
def __iter__(self):
return itertools.chain(self.polygons, self.paths, self.labels, self.references)
def to_gds(self, outfile, multiplier, timestamp=None):
"""
Convert this cell to a GDSII structure.
Parameters
----------
outfile : open file
Output to write the GDSII.
multiplier : number
A number that multiplies all dimensions written in the GDSII
structure.
timestamp : datetime object
Sets the GDSII timestamp. If None, the current time is
used.
"""
now = datetime.datetime.today() if timestamp is None else timestamp
name = self.name
if len(name) % 2 != 0:
name = name + "\0"
outfile.write(
struct.pack(
">2H12h2H",
28,
0x0502,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
4 + len(name),
0x0606,
)
)
outfile.write(name.encode("ascii"))
for polygon in self.polygons:
polygon.to_gds(outfile, multiplier)
for path in self.paths:
path.to_gds(outfile, multiplier)
for label in self.labels:
label.to_gds(outfile, multiplier)
for reference in self.references:
reference.to_gds(outfile, multiplier)
outfile.write(struct.pack(">2H", 4, 0x0700))
def copy(
self,
name,
deep_copy=False,
translation=None,
rotation=None,
scale=None,
x_reflection=False,
):
"""
Create a copy of this cell.
Parameters
----------
name : string
The name of the cell.
deep_copy : bool
If False, the new cell will contain only references to the
existing elements. If True, copies of all elements are also
created. If any transformation is performed, this argument
is automatically set to True.
translation : Numpy array[2]
Amount to translate the cell contents.
rotation : number
Rotation angle (in *radians*).
scale : number
Scaling factor.
x_reflection : bool
Reflect the geometry accros the x axis.
Returns
-------
out : `Cell`
The new copy of this cell.
"""
new_cell = Cell(name)
transform = False
if (
x_reflection
or scale is not None
or rotation is not None
or translation is not None
):
transform = True
deep_copy = True
if not deep_copy:
new_cell.polygons = list(self.polygons)
new_cell.paths = list(self.paths)
new_cell.labels = list(self.labels)
new_cell.references = list(self.references)
return new_cell
new_cell.polygons = libcopy.deepcopy(self.polygons)
new_cell.paths = libcopy.deepcopy(self.paths)
new_cell.labels = libcopy.deepcopy(self.labels)
new_cell.references = [libcopy.copy(ref) for ref in self.references]
if transform:
r = -1 if x_reflection else 1
s = 1 if scale is None else scale
t = 0 if rotation is None else rotation
dx, dy = (0, 0) if translation is None else translation
ct = numpy.cos(t)
st = numpy.sin(t)
for poly in new_cell.polygons:
if x_reflection:
poly.scale(1, -1)
if scale is not None:
poly.scale(scale)
if rotation is not None:
poly.rotate(rotation)
if translation is not None:
poly.translate(dx, dy)
for path in new_cell.paths:
path.transform(translation, rotation, scale, x_reflection)
for lbl in new_cell.labels:
r0 = -1 if lbl.x_reflection is None else 1
s0 = 1 if lbl.magnification is None else lbl.magnification
t0 = 0 if lbl.rotation is None else (lbl.rotation * numpy.pi / 180)
dx0, dy0 = lbl.position
lbl.position = (
dx + s * (dx0 * ct - r * dy0 * st),
dy + s * (dx0 * st + r * dy0 * ct),
)
lbl.rotation = (r * t0 + t) * 180 / numpy.pi
if lbl.rotation == 0:
lbl.rotation = None
lbl.magnification = s * s0
if lbl.magnification == 1:
lbl.magnification = None
lbl.x_reflection = r * r0 < 0
for ref in new_cell.references:
r0 = -1 if ref.x_reflection is None else 1
s0 = 1 if ref.magnification is None else ref.magnification
t0 = 0 if ref.rotation is None else (ref.rotation * numpy.pi / 180)
dx0, dy0 = ref.origin
ref.origin = (
dx + s * (dx0 * ct - r * dy0 * st),
dy + s * (dx0 * st + r * dy0 * ct),
)
ref.rotation = (r * t0 + t) * 180 / numpy.pi
if ref.rotation == 0:
ref.rotation = None
ref.magnification = s * s0
if ref.magnification == 1:
ref.magnification = None
ref.x_reflection = r * r0 < 0
return new_cell
def add(self, element):
"""
Add a new element or list of elements to this cell.
Parameters
----------
element : `PolygonSet`, `CellReference`, `CellArray` or iterable
The element or iterable of elements to be inserted in this
cell.
Returns
-------
out : `Cell`
This cell.
"""
if isinstance(element, PolygonSet):
self.polygons.append(element)
elif isinstance(element, RobustPath) or isinstance(element, FlexPath):
self.paths.append(element)
elif isinstance(element, Label):
self.labels.append(element)
elif isinstance(element, CellReference) or isinstance(element, CellArray):
self.references.append(element)
else:
for e in element:
if isinstance(e, PolygonSet):
self.polygons.append(e)
elif isinstance(e, RobustPath) or isinstance(e, FlexPath):
self.paths.append(e)
elif isinstance(e, Label):
self.labels.append(e)
elif isinstance(e, CellReference) or isinstance(e, CellArray):
self.references.append(e)
else:
raise ValueError(
"[GDSPY] Only instances of `PolygonSet`, `FlexPath`, "
"`RobustPath`, `Label`, `CellReference`, and "
"`CellArray` can be added to `Cell`."
)
self._bb_valid = False
return self
def remove_polygons(self, test):
"""
Remove polygons from this cell.
The function or callable `test` is called for each polygon in
the cell. If its return value evaluates to True, the
corresponding polygon is removed from the cell.
Parameters
----------
test : callable
Test function to query whether a polygon should be removed.
The function is called with arguments:
``(points, layer, datatype)``
Returns
-------
out : `Cell`
This cell.
Examples
--------
Remove polygons in layer 1:
>>> cell.remove_polygons(lambda pts, layer, datatype:
... layer == 1)
Remove polygons with negative x coordinates:
>>> cell.remove_polygons(lambda pts, layer, datatype:
... any(pts[:, 0] < 0))
"""
empty = []
for element in self.polygons:
ii = 0
while ii < len(element.polygons):
if test(
element.polygons[ii], element.layers[ii], element.datatypes[ii]
):
element.polygons.pop(ii)
element.layers.pop(ii)
element.datatypes.pop(ii)
else:
ii += 1
if len(element.polygons) == 0:
empty.append(element)
for element in empty:
self.polygons.remove(element)
return self
def remove_paths(self, test):
"""
Remove paths from this cell.
The function or callable `test` is called for each `FlexPath`
or `RobustPath` in the cell. If its return value evaluates to
True, the corresponding label is removed from the cell.
Parameters
----------
test : callable
Test function to query whether a path should be removed.
The function is called with the path as the only argument.
Returns
-------
out : `Cell`
This cell.
"""
ii = 0
while ii < len(self.paths):
if test(self.paths[ii]):
self.paths.pop(ii)
else:
ii += 1
return self
def remove_labels(self, test):
"""
Remove labels from this cell.
The function or callable `test` is called for each label in
the cell. If its return value evaluates to True, the
corresponding label is removed from the cell.
Parameters
----------
test : callable
Test function to query whether a label should be removed.
The function is called with the label as the only argument.
Returns
-------
out : `Cell`
This cell.
Examples
--------
Remove labels in layer 1:
>>> cell.remove_labels(lambda lbl: lbl.layer == 1)
"""
ii = 0
while ii < len(self.labels):
if test(self.labels[ii]):
self.labels.pop(ii)
else:
ii += 1
return self
def area(self, by_spec=False):
"""
Calculate the total area of the elements on this cell, including
cell references and arrays.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with the areas
of each individual pair (layer, datatype).
Returns
-------
out : number, dictionary
Area of this cell.
"""
if by_spec:
cell_area = {}
for element in itertools.chain(self.polygons, self.paths, self.references):
element_area = element.area(True)
for ll in element_area.keys():
if ll in cell_area:
cell_area[ll] += element_area[ll]
else:
cell_area[ll] = element_area[ll]
else:
cell_area = 0
for element in itertools.chain(self.polygons, self.paths, self.references):
cell_area += element.area()
return cell_area
def get_layers(self):
"""
Return the set of layers in this cell.
Returns
-------
out : set
Set of the layers used in this cell.
"""
layers = set()
for element in itertools.chain(self.polygons, self.paths):
layers.update(element.layers)
for reference in self.references:
layers.update(reference.ref_cell.get_layers())
for label in self.labels:
layers.add(label.layer)
return layers
def get_datatypes(self):
"""
Return the set of datatypes in this cell.
Returns
-------
out : set
Set of the datatypes used in this cell.
"""
datatypes = set()
for element in itertools.chain(self.polygons, self.paths):
datatypes.update(element.datatypes)
for reference in self.references:
datatypes.update(reference.ref_cell.get_datatypes())
return datatypes
def get_texttypes(self):
"""
Return the set of texttypes in this cell.
Returns
-------
out : set
Set of the texttypes used in this cell.
"""
texttypes = set()
for reference in self.references:
textypes.update(reference.ref_cell.get_textypes())
for label in self.labels:
textypes.add(label.texttype)
return textypes
def get_svg_classes(self):
"""
Return the set of classes for the SVG representation of this
cell.
Returns
-------
out0, out1 : sets of 2-tuples
Sets of (layer, datatype) and (layer, texttype) used in
this cell.
"""
ld = set()
lt = set()
for element in itertools.chain(self.polygons, self.paths):
ld.update(zip(element.layers, element.datatypes))
for label in self.labels:
lt.add((label.layer, label.texttype))
for reference in self.references:
ref = reference.ref_cell.get_svg_classes()
ld.update(ref[0])
lt.update(ref[1])
return ld, lt
def get_bounding_box(self):
"""
Calculate the bounding box for this cell.
Returns
-------
out : Numpy array[2, 2] or None
Bounding box of this cell [[x_min, y_min], [x_max, y_max]],
or None if the cell is empty.
"""
if (
len(self.polygons) == 0
and len(self.paths) == 0
and len(self.references) == 0
):
return None
if not (
self._bb_valid and all(ref._bb_valid for ref in self.get_dependencies(True))
):
bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))
all_polygons = []
for polygon in self.polygons:
all_polygons.extend(polygon.polygons)
for path in self.paths:
all_polygons.extend(path.to_polygonset().polygons)
for reference in self.references:
reference_bb = reference.get_bounding_box()
if reference_bb is not None:
bb[0, 0] = min(bb[0, 0], reference_bb[0, 0])
bb[0, 1] = min(bb[0, 1], reference_bb[0, 1])
bb[1, 0] = max(bb[1, 0], reference_bb[1, 0])
bb[1, 1] = max(bb[1, 1], reference_bb[1, 1])
if len(all_polygons) > 0:
all_points = numpy.concatenate(all_polygons).transpose()
bb[0, 0] = min(bb[0, 0], all_points[0].min())
bb[0, 1] = min(bb[0, 1], all_points[1].min())
bb[1, 0] = max(bb[1, 0], all_points[0].max())
bb[1, 1] = max(bb[1, 1], all_points[1].max())
self._bb_valid = True
self._bounding_box = bb
return numpy.array(self._bounding_box)
def get_polygons(self, by_spec=False, depth=None):
"""
Return a list of polygons in this cell.
Parameters
----------
by_spec : bool or tuple
If True, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
If set to a tuple of (layer, datatype), only polygons
with that specification are returned.
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If `by_spec` is True the key will be the
name of this cell.
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
`by_spec` is True).
Note
----
Instances of `FlexPath` and `RobustPath` are also included in
the result by computing their polygonal boundary.
"""
if depth is not None and depth < 0:
if not (by_spec is False or by_spec is True):
return []
bb = self.get_bounding_box()
if bb is None:
return {} if by_spec else []
pts = [
numpy.array(
[
(bb[0, 0], bb[0, 1]),
(bb[0, 0], bb[1, 1]),
(bb[1, 0], bb[1, 1]),
(bb[1, 0], bb[0, 1]),
]
)
]
polygons = {self.name: pts} if by_spec else pts
else:
if by_spec is True:
polygons = {}
for polyset in self.polygons:
for ii in range(len(polyset.polygons)):
key = (polyset.layers[ii], polyset.datatypes[ii])
if key in polygons:
polygons[key].append(numpy.array(polyset.polygons[ii]))
else:
polygons[key] = [numpy.array(polyset.polygons[ii])]
for path in self.paths:
path_polygons = path.get_polygons(True)
for kk in path_polygons.keys():
if kk in polygons:
polygons[kk].extend(path_polygons[kk])
else:
polygons[kk] = path_polygons[kk]
for reference in self.references:
if depth is None:
next_depth = None
else:
next_depth = depth - 1
cell_polygons = reference.get_polygons(True, next_depth)
for kk in cell_polygons.keys():
if kk in polygons:
polygons[kk].extend(cell_polygons[kk])
else:
polygons[kk] = cell_polygons[kk]
elif by_spec is False:
polygons = []
for polyset in self.polygons:
for points in polyset.polygons:
polygons.append(numpy.array(points))
for path in self.paths:
polygons.extend(path.get_polygons())
for reference in self.references:
if depth is None:
next_depth = None
else:
next_depth = depth - 1
polygons.extend(reference.get_polygons(depth=next_depth))
else:
polygons = []
layer, datatype = by_spec
polygons.extend(
numpy.array(polyset.polygons[ii])
for polyset in self.polygons
for ii in range(len(polyset.polygons))
if polyset.layers[ii] == layer and polyset.datatypes[ii] == datatype
)
for path in self.paths:
if any(ld == by_spec for ld in zip(path.layers, path.datatype)):
path_polygons = path.get_polygons(True)
if by_spec in path_polygons:
polygons.extend(path_polygons[key])
for reference in self.references:
if depth is None:
next_depth = None
else:
next_depth = depth - 1
polygons.extend(reference.get_polygons(by_spec, next_depth))
return polygons
def get_polygonsets(self, depth=None):
"""
Return a list with a copy of the polygons in this cell.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons from.
Returns
-------
out : list of `PolygonSet`
List containing the polygons in this cell and its
references.
"""
polys = libcopy.deepcopy(self.polygons)
if depth is None or depth > 0:
for reference in self.references:
if depth is None:
next_depth = None
else:
next_depth = depth - 1
polys.extend(reference.get_polygonsets(next_depth))
return polys
def get_paths(self, depth=None):
"""
Return a list with a copy of the paths in this cell.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve paths from.
Returns
-------
out : list of `FlexPath` or `RobustPath`
List containing the paths in this cell and its references.
"""
paths = libcopy.deepcopy(self.paths)
if depth is None or depth > 0:
for reference in self.references:
if depth is None:
next_depth = None
else:
next_depth = depth - 1
paths.extend(reference.get_paths(next_depth))
return paths
def get_labels(self, depth=None):
"""
Return a list with a copy of the labels in this cell.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of `Label`
List containing the labels in this cell and its references.
"""
labels = libcopy.deepcopy(self.labels)
if depth is None or depth > 0:
for reference in self.references:
if depth is None:
next_depth = None
else:
next_depth = depth - 1
labels.extend(reference.get_labels(next_depth))
return labels
def get_dependencies(self, recursive=False):
"""
Return a list of the cells included in this cell as references.
Parameters
----------
recursive : bool
If True returns cascading dependencies.
Returns
-------
out : set of `Cell`
List of the cells referenced by this cell.
"""
dependencies = set()
for reference in self.references:
if isinstance(reference.ref_cell, Cell):
if recursive:
dependencies.update(reference.ref_cell.get_dependencies(True))
dependencies.add(reference.ref_cell)
return dependencies
def flatten(self, single_layer=None, single_datatype=None, single_texttype=None):
"""
Convert all references into polygons, paths and labels.
Parameters
----------
single_layer : integer or None
If not None, all polygons will be transfered to the
layer indicated by this number.
single_datatype : integer or None
If not None, all polygons will be transfered to the
datatype indicated by this number.
single_datatype : integer or None
If not None, all labels will be transfered to the
texttype indicated by this number.
Returns
-------
out : `Cell`
This cell.
"""
self.labels = self.get_labels()
if single_layer is not None and single_datatype is not None:
for lbl in self.labels:
lbl.layer = single_layer
lbl.texttype = single_texttype
elif single_layer is not None:
for lbl in self.labels:
lbl.layer = single_layer
elif single_datatype is not None:
for lbl in self.labels:
lbl.texttype = single_texttype
self.polygons = self.get_polygonsets()
self.paths = self.get_paths()
if single_layer is not None and single_datatype is not None:
for poly in self.polygons:
poly.layers = [single_layer] * len(poly.polygons)
poly.datatypes = [single_datatype] * len(poly.polygons)
for path in self.paths:
path.layers = [single_layer] * path.n
path.datatypes = [single_datatype] * path.n
elif single_layer is not None:
for poly in self.polygons:
poly.layers = [single_layer] * len(poly.polygons)
for path in self.paths:
path.layers = [single_layer] * path.n
elif single_datatype is not None:
for poly in self.polygons:
poly.datatypes = [single_datatype] * len(poly.polygons)
for path in self.paths:
path.datatypes = [single_datatype] * path.n
self.references = []
return self
def to_svg(self, outfile, scaling, attributes=""):
"""
Write an SVG fragment representation of this object.
Parameters
----------
outfile : open file
Output to write the SVG representation.
scaling : number
Scaling factor for the geometry.
attributes : string
Additional attributes to set for the cell group.
"""
outfile.write('<g id="')
outfile.write(self.name.replace("#", "_"))
outfile.write('" ')
outfile.write(attributes)
outfile.write(">\n")
for polygon in self.polygons:
polygon.to_svg(outfile, scaling)
for path in self.paths:
path.to_svg(outfile, scaling)
for label in self.labels:
label.to_svg(outfile, scaling)
for reference in self.references:
reference.to_svg(outfile, scaling)
outfile.write("</g>\n")
def write_svg(
self,
outfile,
scaling=10,
style=None,
fontstyle=None,
background="#222",
pad="5%",
):
"""
Export this cell to an SVG file.
The dimensions actually written on the GDSII file will be the
dimensions of the objects created times the ratio
unit/precision. For example, if a circle with radius 1.5 is
created and we set `GdsLibrary.unit` to 1.0e-6 (1 um) and
`GdsLibrary.precision` to 1.0e-9` (1 nm), the radius of the
circle will be 1.5 um and the GDSII file will contain the
dimension 1500 nm.
Parameters
----------
outfile : file, string or Path
The file (or path) where the GDSII stream will be written.
It must be opened for writing operations in binary format.
scaling : number
Scaling factor for the geometry.
style : dict or None
Dictionary indexed by (layer, datatype) tuples. Entries
must be dictionaries with CSS key-value pairs for the
presentation attributes of the geometry in that layer and
datatype.
fontstyle : dict or None
Dictionary indexed by (layer, texttype) tuples. Entries
must be dictionaries with CSS key-value pairs for the
presentation attributes of the labels in that layer and
texttype.
background : string or None
String specifying the background color. If None, no
background is inserted.
pad : number or string
Background margin around the cell bounding box. It can
be specified as a percentage of the width or height,
whichever is the largest.
Examples
--------
>>> cell = gdspy.Cell('MAIN')
>>> cell.add(gdspy.Rectangle((0, 0), (10, 10), layer=1))
>>> # Define fill and stroke for layer 1 and datatype 0
>>> mystyle = {(1, 0): {'fill': '#CC00FF',
'stroke': 'black'}}
>>> cell.write_svg('main.svg', style=mystyle)
"""
bb = self.get_bounding_box()
if bb is None:
return
close = True
if hasattr(outfile, "__fspath__"):
outfile = open(outfile.__fspath__(), "w")
elif isinstance(outfile, (basestring, Path)):
outfile = open(outfile, "w")
else:
close = False
if style is None:
style = {}
if fontstyle is None:
fontstyle = {}
bb *= scaling
x = bb[0, 0]
y = -bb[1, 1]
w = bb[1, 0] - bb[0, 0]
h = bb[1, 1] - bb[0, 1]
if background is not None:
if isinstance(pad, basestring):
if pad[-1] == "%":
pad = max(w, h) * float(pad[:-1]) / 100
else:
pad = float(pad)
x -= pad
y -= pad
w += 2 * pad
h += 2 * pad
outfile.write(
"""<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"
width="{}" height="{}" viewBox="{} {} {} {}">
<defs>
<style type="text/css">
""".format(
w, h, x, y, w, h
)
)
ldkeys, ltkeys = self.get_svg_classes()
for k in ldkeys:
l, d = k
if k in style:
style_dict = style[k]
else:
c = "rgb({}, {}, {})".format(
*[
int(255 * c + 0.5)
for c in colorsys.hsv_to_rgb(
(l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,
1 - ((l + d) % 8) / 12.0,
1 - (d % 3) / 4.0,
)
]
)
style_dict = {"stroke": c, "fill": c, "fill-opacity": "0.5"}
outfile.write(".l{}d{} {{".format(l, d))
outfile.write(" ".join("{}: {};".format(*x) for x in style_dict.items()))
outfile.write("}\n")
for k in ltkeys:
l, t = k
if k in fontstyle:
style_dict = fontstyle[k]
else:
c = "rgb({}, {}, {})".format(
*[
int(255 * c + 0.5)
for c in colorsys.hsv_to_rgb(
(l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,
1 - ((l + t) % 8) / 12.0,
1 - (t % 3) / 4.0,
)
]
)
style_dict = {"stroke": "none", "fill": c}
outfile.write(".l{}t{} {{".format(l, t))
outfile.write(" ".join("{}: {};".format(*x) for x in style_dict.items()))
outfile.write("}\n")
outfile.write("</style>\n")
for cell in self.get_dependencies(True):
cell.to_svg(outfile, scaling)
outfile.write("</defs>")
if background is not None:
outfile.write(
'<rect x="{}" y="{}" width="{}" height="{}" fill="{}" stroke="none"/>\n'.format(
x, y, w, h, background
)
)
self.to_svg(outfile, scaling, 'transform="scale(1 -1)"')
outfile.write("</svg>")
if close:
outfile.close()
class CellReference(object):
"""
Simple reference to an existing cell.
Parameters
----------
ref_cell : `Cell` or string
The referenced cell or its name.
origin : array-like[2]
Position where the reference is inserted.
rotation : number
Angle of rotation of the reference (in *degrees*).
magnification : number
Magnification factor for the reference.
x_reflection : bool
If True the reference is reflected parallel to the x
direction before being rotated.
ignore_missing : bool
If False a warning is issued when the referenced cell is not
found.
Attributes
----------
ref_cell : `Cell` or string
The referenced cell or its name.
origin : array-like[2]
Position where the reference is inserted.
rotation : number
Angle of rotation of the reference (in *degrees*).
magnification : number
Magnification factor for the reference.
x_reflection : bool
If True the reference is reflected parallel to the x
direction before being rotated.
ignore_missing : bool
If False a warning is issued when the referenced cell is not
found.
properties : {integer: string} dictionary
Properties for these elements.
"""
__slots__ = (
"ref_cell",
"origin",
"rotation",
"magnification",
"x_reflection",
"properties",
)
def __init__(
self,
ref_cell,
origin=(0, 0),
rotation=None,
magnification=None,
x_reflection=False,
ignore_missing=False,
):
self.origin = origin
self.ref_cell = ref_cell
self.rotation = rotation
self.magnification = magnification
self.x_reflection = x_reflection
self.properties = {}
if not isinstance(self.ref_cell, Cell) and not ignore_missing:
warnings.warn(
"[GDSPY] Cell {0} not found; operations on this "
"CellReference may not work.".format(self.ref_cell),
stacklevel=2,
)
def __str__(self):
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
return 'CellReference ("{0}", at ({1[0]}, {1[1]}), rotation {2}, magnification {3}, reflection {4})'.format(
name, self.origin, self.rotation, self.magnification, self.x_reflection
)
def __repr__(self):
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
return 'CellReference("{0}", ({1[0]}, {1[1]}), {2}, {3}, {4})'.format(
name, self.origin, self.rotation, self.magnification, self.x_reflection
)
def to_gds(self, outfile, multiplier):
"""
Convert this object to a GDSII element.
Parameters
----------
outfile : open file
Output to write the GDSII.
multiplier : number
A number that multiplies all dimensions written in the GDSII
element.
"""
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
if len(name) % 2 != 0:
name = name + "\0"
outfile.write(struct.pack(">4H", 4, 0x0A00, 4 + len(name), 0x1206))
outfile.write(name.encode("ascii"))
if (
(self.rotation is not None)
or (self.magnification is not None)
or self.x_reflection
):
word = 0
values = b""
if self.x_reflection:
word += 0x8000
if not (self.magnification is None):
# This flag indicates that the magnification is absolute, not
# relative (not supported).
# word += 0x0004
values += struct.pack(">2H", 12, 0x1B05) + _eight_byte_real(
self.magnification
)
if not (self.rotation is None):
# This flag indicates that the rotation is absolute, not
# relative (not supported).
# word += 0x0002
values += struct.pack(">2H", 12, 0x1C05) + _eight_byte_real(
self.rotation
)
outfile.write(struct.pack(">3H", 6, 0x1A01, word))
outfile.write(values)
outfile.write(
struct.pack(
">2H2l",
12,
0x1003,
int(round(self.origin[0] * multiplier)),
int(round(self.origin[1] * multiplier)),
)
)
if self.properties is not None and len(self.properties) > 0:
size = 0
for attr, value in self.properties.items():
if len(value) % 2 != 0:
value = value + "\0"
outfile.write(
struct.pack(">5H", 6, 0x2B02, attr, 4 + len(value), 0x2C06)
)
outfile.write(value.encode("ascii"))
size += len(value) + 2
if size > 128:
warnings.warn(
"[GDSPY] Properties with size larger than 128 bytes are not "
"officially supported by the GDSII specification. This file "
"might not be compatible with all readers.",
stacklevel=4,
)
outfile.write(struct.pack(">2H", 4, 0x1100))
def to_svg(self, outfile, scaling):
"""
Write an SVG fragment representation of this object.
Parameters
----------
outfile : open file
Output to write the SVG representation.
scaling : number
Scaling factor for the geometry.
"""
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
transform = "translate({} {})".format(
scaling * self.origin[0], scaling * self.origin[1]
)
if self.rotation is not None:
transform += " rotate({})".format(self.rotation)
if self.x_reflection:
transform += " scale(1 -1)"
if self.magnification is not None:
transform += " scale({})".format(self.magnification)
outfile.write('<use transform="')
outfile.write(transform)
outfile.write('" xlink:href="#')
outfile.write(name.replace("#", "_"))
outfile.write('"/>\n')
def area(self, by_spec=False):
"""
Calculate the total area of the referenced cell with the
magnification factor included.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with the areas
of each individual pair (layer, datatype).
Returns
-------
out : number, dictionary
Area of this cell.
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else 0
if self.magnification is None:
return self.ref_cell.area(by_spec)
else:
if by_spec:
factor = self.magnification ** 2
cell_area = self.ref_cell.area(True)
for kk in cell_area.keys():
cell_area[kk] *= factor
return cell_area
else:
return self.ref_cell.area() * self.magnification ** 2
def get_polygons(self, by_spec=False, depth=None):
"""
Return the list of polygons created by this reference.
Parameters
----------
by_spec : bool or tuple
If True, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
If set to a tuple of (layer, datatype), only polygons
with that specification are returned.
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If `by_spec` is True the key will be the
name of the referenced cell.
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
`by_spec` is True).
Note
----
Instances of `FlexPath` and `RobustPath` are also included in
the result by computing their polygonal boundary.
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = numpy.array((1, -1))
if self.magnification is not None:
mag = numpy.array((self.magnification, self.magnification), dtype=float)
if self.origin is not None:
orgn = numpy.array(self.origin)
polygons = self.ref_cell.get_polygons(by_spec, depth)
if by_spec is True:
for kk in polygons.keys():
for ii in range(len(polygons[kk])):
if self.x_reflection:
polygons[kk][ii] = polygons[kk][ii] * xrefl
if self.magnification is not None:
polygons[kk][ii] = polygons[kk][ii] * mag
if self.rotation is not None:
polygons[kk][ii] = (
polygons[kk][ii] * ct + polygons[kk][ii][:, ::-1] * st
)
if self.origin is not None:
polygons[kk][ii] = polygons[kk][ii] + orgn
else:
for ii in range(len(polygons)):
if self.x_reflection:
polygons[ii] = polygons[ii] * xrefl
if self.magnification is not None:
polygons[ii] = polygons[ii] * mag
if self.rotation is not None:
polygons[ii] = polygons[ii] * ct + polygons[ii][:, ::-1] * st
if self.origin is not None:
polygons[ii] = polygons[ii] + orgn
return polygons
def get_polygonsets(self, depth=None):
"""
Return the list of polygons created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons from.
Returns
-------
out : list of `PolygonSet`
List containing the polygons in this cell and its
references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = numpy.array((1, -1))
if self.magnification is not None:
mag = numpy.array((self.magnification, self.magnification), dtype=float)
if self.origin is not None:
orgn = numpy.array(self.origin)
polygonsets = self.ref_cell.get_polygonsets(depth=depth)
for ps in polygonsets:
for ii in range(len(ps.polygons)):
if self.x_reflection:
ps.polygons[ii] = ps.polygons[ii] * xrefl
if self.magnification is not None:
ps.polygons[ii] = ps.polygons[ii] * mag
if self.rotation is not None:
ps.polygons[ii] = (
ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st
)
if self.origin is not None:
ps.polygons[ii] = ps.polygons[ii] + orgn
return polygonsets
def get_paths(self, depth=None):
"""
Return the list of paths created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve paths from.
Returns
-------
out : list of `FlexPath` or `RobustPath`
List containing the paths in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.origin is not None:
trans = numpy.array(self.origin)
else:
trans = None
if self.rotation is not None:
rot = self.rotation * numpy.pi / 180.0
else:
rot = None
return [
p.transform(trans, rot, self.magnification, self.x_reflection)
for p in self.ref_cell.get_paths(depth=depth)
]
def get_labels(self, depth=None):
"""
Return the list of labels created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of `Label`
List containing the labels in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = numpy.array((1, -1))
if self.magnification is not None:
mag = numpy.array((self.magnification, self.magnification), dtype=float)
if self.origin is not None:
orgn = numpy.array(self.origin)
labels = self.ref_cell.get_labels(depth=depth)
for lbl in labels:
if self.x_reflection:
lbl.position = lbl.position * xrefl
if self.magnification is not None:
lbl.position = lbl.position * mag
if self.rotation is not None:
lbl.position = lbl.position * ct + lbl.position[::-1] * st
if self.origin is not None:
lbl.position = lbl.position + orgn
return labels
def get_bounding_box(self):
"""
Calculate the bounding box for this reference.
Returns
-------
out : Numpy array[2, 2] or None
Bounding box of this cell [[x_min, y_min], [x_max, y_max]],
or None if the cell is empty.
"""
if not isinstance(self.ref_cell, Cell):
return None
deps = self.ref_cell.get_dependencies(True)
for ref in deps:
ref.get_bounding_box()
self.ref_cell.get_bounding_box()
tmp = self.origin
self.origin = None
polygons = self.get_polygons()
self.origin = tmp
if len(polygons) == 0:
bb = None
else:
all_points = numpy.concatenate(polygons).transpose()
bb = numpy.array(
(
(all_points[0].min(), all_points[1].min()),
(all_points[0].max(), all_points[1].max()),
)
)
if self.origin is None or bb is None:
return bb
else:
return bb + numpy.array(
((self.origin[0], self.origin[1]), (self.origin[0], self.origin[1]))
)
def translate(self, dx, dy):
"""
Translate this reference.
Parameters
----------
dx : number
Distance to move in the x-direction.
dy : number
Distance to move in the y-direction.
Returns
-------
out : `CellReference`
This object.
"""
self.origin = (self.origin[0] + dx, self.origin[1] + dy)
return self
class CellArray(object):
"""
Multiple references to an existing cell in an array format.
Parameters
----------
ref_cell : `Cell` or string
The referenced cell or its name.
columns : positive integer
Number of columns in the array.
rows : positive integer
Number of columns in the array.
spacing : array-like[2]
distances between adjacent columns and adjacent rows.
origin : array-like[2]
Position where the cell is inserted.
rotation : number
Angle of rotation of the reference (in *degrees*).
magnification : number
Magnification factor for the reference.
x_reflection : bool
If True, the reference is reflected parallel to the x
direction before being rotated.
ignore_missing : bool
If False a warning is issued when the referenced cell is not
found.
Attributes
----------
ref_cell : `Cell` or string
The referenced cell or its name.
columns : positive integer
Number of columns in the array.
rows : positive integer
Number of columns in the array.
spacing : array-like[2]
distances between adjacent columns and adjacent rows.
origin : array-like[2]
Position where the cell is inserted.
rotation : number
Angle of rotation of the reference (in *degrees*).
magnification : number
Magnification factor for the reference.
x_reflection : bool
If True, the reference is reflected parallel to the x
direction before being rotated.
ignore_missing : bool
If False a warning is issued when the referenced cell is not
found.
properties : {integer: string} dictionary
Properties for these elements.
"""
__slots__ = (
"ref_cell",
"origin",
"rotation",
"magnification",
"x_reflection",
"columns",
"rows",
"spacing",
"properties",
)
def __init__(
self,
ref_cell,
columns,
rows,
spacing,
origin=(0, 0),
rotation=None,
magnification=None,
x_reflection=False,
ignore_missing=False,
):
self.columns = columns
self.rows = rows
self.spacing = spacing
self.origin = origin
self.ref_cell = ref_cell
self.rotation = rotation
self.magnification = magnification
self.x_reflection = x_reflection
self.properties = {}
if not isinstance(self.ref_cell, Cell) and not ignore_missing:
warnings.warn(
"[GDSPY] Cell {0} not found; operations on this "
"CellArray may not work.".format(self.ref_cell),
stacklevel=2,
)
def __str__(self):
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
return 'CellArray ("{0}", {1} x {2}, at ({3[0]}, {3[1]}), spacing {4[0]} x {4[1]}, rotation {5}, magnification {6}, reflection {7})'.format(
name,
self.columns,
self.rows,
self.origin,
self.spacing,
self.rotation,
self.magnification,
self.x_reflection,
)
def __repr__(self):
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
return 'CellArray("{0}", {1}, {2}, ({4[0]}, {4[1]}), ({3[0]}, {3[1]}), {5}, {6}, {7})'.format(
name,
self.columns,
self.rows,
self.origin,
self.spacing,
self.rotation,
self.magnification,
self.x_reflection,
)
def to_gds(self, outfile, multiplier):
"""
Convert this object to a GDSII element.
Parameters
----------
outfile : open file
Output to write the GDSII.
multiplier : number
A number that multiplies all dimensions written in the GDSII
element.
"""
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
if len(name) % 2 != 0:
name = name + "\0"
outfile.write(struct.pack(">4H", 4, 0x0B00, 4 + len(name), 0x1206))
outfile.write(name.encode("ascii"))
x2 = self.origin[0] + self.columns * self.spacing[0]
y2 = self.origin[1]
x3 = self.origin[0]
y3 = self.origin[1] + self.rows * self.spacing[1]
if (
(self.rotation is not None)
or (self.magnification is not None)
or self.x_reflection
):
word = 0
values = b""
if self.x_reflection:
word += 0x8000
y3 = 2 * self.origin[1] - y3
if not (self.magnification is None):
# This flag indicates that the magnification is absolute, not
# relative (not supported).
# word += 0x0004
values += struct.pack(">2H", 12, 0x1B05) + _eight_byte_real(
self.magnification
)
if not (self.rotation is None):
# This flag indicates that the rotation is absolute, not
# relative (not supported).
# word += 0x0002
sa = numpy.sin(self.rotation * numpy.pi / 180.0)
ca = numpy.cos(self.rotation * numpy.pi / 180.0)
tmp = (
(x2 - self.origin[0]) * ca
- (y2 - self.origin[1]) * sa
+ self.origin[0]
)
y2 = (
(x2 - self.origin[0]) * sa
+ (y2 - self.origin[1]) * ca
+ self.origin[1]
)
x2 = tmp
tmp = (
(x3 - self.origin[0]) * ca
- (y3 - self.origin[1]) * sa
+ self.origin[0]
)
y3 = (
(x3 - self.origin[0]) * sa
+ (y3 - self.origin[1]) * ca
+ self.origin[1]
)
x3 = tmp
values += struct.pack(">2H", 12, 0x1C05) + _eight_byte_real(
self.rotation
)
outfile.write(struct.pack(">3H", 6, 0x1A01, word))
outfile.write(values)
outfile.write(
struct.pack(
">2H2h2H6l",
8,
0x1302,
self.columns,
self.rows,
28,
0x1003,
int(round(self.origin[0] * multiplier)),
int(round(self.origin[1] * multiplier)),
int(round(x2 * multiplier)),
int(round(y2 * multiplier)),
int(round(x3 * multiplier)),
int(round(y3 * multiplier)),
)
)
if self.properties is not None and len(self.properties) > 0:
size = 0
for attr, value in self.properties.items():
if len(value) % 2 != 0:
value = value + "\0"
outfile.write(
struct.pack(">5H", 6, 0x2B02, attr, 4 + len(value), 0x2C06)
)
outfile.write(value.encode("ascii"))
size += len(value) + 2
if size > 128:
warnings.warn(
"[GDSPY] Properties with size larger than 128 bytes are not "
"officially supported by the GDSII specification. This file "
"might not be compatible with all readers.",
stacklevel=4,
)
outfile.write(struct.pack(">2H", 4, 0x1100))
def to_svg(self, outfile, scaling):
"""
Write an SVG fragment representation of this object.
Parameters
----------
outfile : open file
Output to write the SVG representation.
scaling : number
Scaling factor for the geometry.
"""
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
transform = "translate({} {})".format(
scaling * self.origin[0], scaling * self.origin[1]
)
if self.rotation is not None:
transform += " rotate({})".format(self.rotation)
if self.x_reflection:
transform += " scale(1 -1)"
mag = (
""
if self.magnification is None
else " scale({})".format(self.magnification)
)
for ii in range(self.columns):
dx = scaling * self.spacing[0] * ii
for jj in range(self.rows):
dy = scaling * self.spacing[1] * jj
outfile.write('<use transform="')
outfile.write(transform)
outfile.write(" translate({} {})".format(dx, dy))
outfile.write(mag)
outfile.write('" xlink:href="#')
outfile.write(name.replace("#", "_"))
outfile.write('"/>\n')
def area(self, by_spec=False):
"""
Calculate the total area of the cell array with the
magnification factor included.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with the areas
of each individual pair (layer, datatype).
Returns
-------
out : number, dictionary
Area of this cell.
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else 0
if self.magnification is None:
factor = self.columns * self.rows
else:
factor = self.columns * self.rows * self.magnification ** 2
if by_spec:
cell_area = self.ref_cell.area(True)
for kk in cell_area.keys():
cell_area[kk] *= factor
return cell_area
else:
return self.ref_cell.area() * factor
def get_polygons(self, by_spec=False, depth=None):
"""
Return the list of polygons created by this reference.
Parameters
----------
by_spec : bool or tuple
If True, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
If set to a tuple of (layer, datatype), only polygons
with that specification are returned.
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If `by_spec` is True the key will be
name of the referenced cell.
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
`by_spec` is True).
Note
----
Instances of `FlexPath` and `RobustPath` are also included in
the result by computing their polygonal boundary.
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.magnification is not None:
mag = numpy.array((self.magnification, self.magnification), dtype=float)
if self.origin is not None:
orgn = numpy.array(self.origin)
if self.x_reflection:
xrefl = numpy.array((1, -1))
cell_polygons = self.ref_cell.get_polygons(by_spec, depth)
if by_spec is True:
polygons = {}
for kk in cell_polygons.keys():
polygons[kk] = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for points in cell_polygons[kk]:
if self.magnification:
polygons[kk].append(points * mag + spc)
else:
polygons[kk].append(points + spc)
if self.x_reflection:
polygons[kk][-1] = polygons[kk][-1] * xrefl
if self.rotation is not None:
polygons[kk][-1] = (
polygons[kk][-1] * ct
+ polygons[kk][-1][:, ::-1] * st
)
if self.origin is not None:
polygons[kk][-1] = polygons[kk][-1] + orgn
else:
polygons = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for points in cell_polygons:
if self.magnification is not None:
polygons.append(points * mag + spc)
else:
polygons.append(points + spc)
if self.x_reflection:
polygons[-1] = polygons[-1] * xrefl
if self.rotation is not None:
polygons[-1] = (
polygons[-1] * ct + polygons[-1][:, ::-1] * st
)
if self.origin is not None:
polygons[-1] = polygons[-1] + orgn
return polygons
def get_polygonsets(self, depth=None):
"""
Return the list of polygons created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons from.
Returns
-------
out : list of `PolygonSet`
List containing the polygons in this cell and its
references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = numpy.array((1, -1))
if self.magnification is not None:
mag = numpy.array((self.magnification, self.magnification), dtype=float)
if self.origin is not None:
orgn = numpy.array(self.origin)
polygonsets = self.ref_cell.get_polygonsets(depth=depth)
array = []
for i in range(self.columns):
for j in range(self.rows):
spc = numpy.array([self.spacing[0] * i, self.spacing[1] * j])
for polygonset in polygonsets:
ps = libcopy.deepcopy(polygonset)
for ii in range(len(ps.polygons)):
if self.magnification is not None:
ps.polygons[ii] = ps.polygons[ii] * mag + spc
else:
ps.polygons[ii] = ps.polygons[ii] + spc
if self.x_reflection:
ps.polygons[ii] = ps.polygons[ii] * xrefl
if self.rotation is not None:
ps.polygons[ii] = (
ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st
)
if self.origin is not None:
ps.polygons[ii] = ps.polygons[ii] + orgn
array.append(ps)
return array
def get_paths(self, depth=None):
"""
Return the list of paths created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve paths from.
Returns
-------
out : list of `FlexPath` or `RobustPath`
List containing the paths in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.origin is not None:
trans = numpy.array(self.origin)
else:
trans = None
if self.rotation is not None:
rot = self.rotation * numpy.pi / 180.0
else:
rot = None
paths = self.ref_cell.get_paths(depth=depth)
array = []
for i in range(self.columns):
for j in range(self.rows):
spc = numpy.array([self.spacing[0] * i, self.spacing[1] * j])
for path in paths:
array.append(
libcopy.deepcopy(path).transform(
trans, rot, self.magnification, self.x_reflection, spc
)
)
return array
def get_labels(self, depth=None):
"""
Return the list of labels created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of `Label`
List containing the labels in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.magnification is not None:
mag = numpy.array((self.magnification, self.magnification), dtype=float)
if self.origin is not None:
orgn = numpy.array(self.origin)
if self.x_reflection:
xrefl = numpy.array((1, -1))
cell_labels = self.ref_cell.get_labels(depth=depth)
labels = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for clbl in cell_labels:
lbl = libcopy.deepcopy(clbl)
if self.magnification:
lbl.position = lbl.position * mag + spc
else:
lbl.position = lbl.position + spc
if self.x_reflection:
lbl.position = lbl.position * xrefl
if self.rotation is not None:
lbl.position = lbl.position * ct + lbl.position[::-1] * st
if self.origin is not None:
lbl.position = lbl.position + orgn
labels.append(lbl)
return labels
def get_bounding_box(self):
"""
Calculate the bounding box for this reference.
Returns
-------
out : Numpy array[2, 2] or None
Bounding box of this cell [[x_min, y_min], [x_max, y_max]],
or None if the cell is empty.
"""
if not isinstance(self.ref_cell, Cell):
return None
key = (
self.ref_cell,
self.rotation,
self.magnification,
self.x_reflection,
self.columns,
self.rows,
self.spacing[0],
self.spacing[1],
)
deps = self.ref_cell.get_dependencies(True)
for ref in deps:
ref.get_bounding_box()
self.ref_cell.get_bounding_box()
tmp = self.origin
self.origin = None
polygons = self.get_polygons()
self.origin = tmp
if len(polygons) == 0:
bb = None
else:
all_points = numpy.concatenate(polygons).transpose()
bb = numpy.array(
(
(all_points[0].min(), all_points[1].min()),
(all_points[0].max(), all_points[1].max()),
)
)
if self.origin is None or bb is None:
return bb
else:
return bb + numpy.array(
((self.origin[0], self.origin[1]), (self.origin[0], self.origin[1]))
)
def translate(self, dx, dy):
"""
Translate this reference.
Parameters
----------
dx : number
Distance to move in the x-direction.
dy : number
Distance to move in the y-direction.
Returns
-------
out : `CellArray`
This object.
"""
self.origin = (self.origin[0] + dx, self.origin[1] + dy)
return self
class GdsLibrary(object):
"""
GDSII library (file).
Represent a GDSII library containing a dictionary of cells.
Parameters
----------
name : string
Name of the GDSII library. Ignored if a name is defined in
`infile`.
infile : file or string
GDSII stream file (or path) to be imported. It must be opened
for reading in binary format.
kwargs : keyword arguments
Arguments passed to `read_gds`.
Attributes
----------
name : string
Name of the GDSII library.
cells : dictionary
Dictionary of cells in this library, indexed by name.
unit : number
Unit size for the objects in the library (in *meters*).
precision : number
Precision for the dimensions of the objects in the library (in
*meters*).
"""
_record_name = (
"HEADER",
"BGNLIB",
"LIBNAME",
"UNITS",
"ENDLIB",
"BGNSTR",
"STRNAME",
"ENDSTR",
"BOUNDARY",
"PATH",
"SREF",
"AREF",
"TEXT",
"LAYER",
"DATATYPE",
"WIDTH",
"XY",
"ENDEL",
"SNAME",
"COLROW",
"TEXTNODE",
"NODE",
"TEXTTYPE",
"PRESENTATION",
"SPACING",
"STRING",
"STRANS",
"MAG",
"ANGLE",
"UINTEGER",
"USTRING",
"REFLIBS",
"FONTS",
"PATHTYPE",
"GENERATIONS",
"ATTRTABLE",
"STYPTABLE",
"STRTYPE",
"ELFLAGS",
"ELKEY",
"LINKTYPE",
"LINKKEYS",
"NODETYPE",
"PROPATTR",
"PROPVALUE",
"BOX",
"BOXTYPE",
"PLEX",
"BGNEXTN",
"ENDTEXTN",
"TAPENUM",
"TAPECODE",
"STRCLASS",
"RESERVED",
"FORMAT",
"MASK",
"ENDMASKS",
"LIBDIRSIZE",
"SRFNAME",
"LIBSECUR",
)
_unused_records = (0x05, 0x00, 0x01, 0x02, 0x034, 0x38)
_import_anchors = ["nw", "n", "ne", None, "w", "o", "e", None, "sw", "s", "se"]
_pathtype_dict = {0: "flush", 1: "round", 2: "extended"}
__slots__ = "name", "cells", "unit", "precision", "_references"
def __init__(
self, name="library", infile=None, unit=1e-6, precision=1e-9, **kwargs
):
self.name = name
self.cells = {}
self.unit = unit
self.precision = precision
if infile is not None:
self.read_gds(infile, **kwargs)
def __str__(self):
return "GdsLibrary (" + ", ".join([c for c in self.cells]) + ")"
def __iter__(self):
return iter(self.cells.values())
def new_cell(self, name, overwrite_duplicate=False, update_references=True):
"""
Create a new cell and add it to this library.
Parameters
----------
name : string
Name of the cell.
overwrite_duplicate : bool
If True, an existing cell with the same name in the library
will be overwritten.
update_references : bool
If True, `CellReference` and `CellArray` instances from an
overwritten cell are updated to the new one (used only when
`overwrite_duplicate` is True).
Returns
-------
out : `Cell`
The created cell.
Notes
-----
This is equivalent to:
>>> cell = gdspy.Cell(name)
>>> lib.add(cell, False, overwrite_duplicate, update_references)
"""
cell = Cell(name)
self.add(cell, False, overwrite_duplicate, update_references)
return cell
def add(
self,
cell,
include_dependencies=True,
overwrite_duplicate=False,
update_references=True,
):
"""
Add one or more cells to the library.
Parameters
----------
cell : `Cell` or iterable
Cells to be included in the library.
include_dependencies : bool
If True, also add cells referenced by `cell`, recursively.
overwrite_duplicate : bool
If True, an existing cell with the same name in the library
will be overwritten.
update_references : bool
If True, `CellReference` and `CellArray` instances from an
overwritten cell are updated to the new one (used only when
`overwrite_duplicate` is True).
Returns
-------
out : `GdsLibrary`
This object.
"""
if isinstance(cell, Cell):
cell_set = set([cell])
if include_dependencies:
cell_set.update(cell.get_dependencies(True))
else:
cell_set = set(cell)
if include_dependencies:
for c in cell:
cell_set.update(c.get_dependencies(True))
for c in cell_set:
if (
not overwrite_duplicate
and c.name in self.cells
and self.cells[c.name] is not c
):
raise ValueError(
"[GDSPY] Cell named {0} already present in library.".format(c.name)
)
if (
overwrite_duplicate
and update_references
and c.name in self.cells
and self.cells[c.name] is not c
):
self.replace_references(c.name, c)
self.cells[c.name] = c
return self
def remove(self, cell, remove_references=True):
"""
Remove a cell from the library.
Parameters
----------
cell : `Cell` or string
Cell to be removed from the library.
remove_references : bool
If True, `CellReference` and `CellArray` using the removed
cell will also be removed.
Returns
-------
out : integer
Number of references removed.
"""
if isinstance(cell, Cell):
name = cell.name
else:
name = cell
if name in self.cells:
del self.cells[name]
removed = 0
if remove_references:
for c in self.cells.values():
removed += len(c.references)
c.references = [
ref
for ref in c.references
if name
!= (
ref.ref_cell.name
if isinstance(ref.ref_cell, Cell)
else ref.ref_cell
)
]
removed -= len(c.references)
return removed
def write_gds(self, outfile, cells=None, timestamp=None, binary_cells=None):
"""
Write the GDSII library to a file.
The dimensions actually written on the GDSII file will be the
dimensions of the objects created times the ratio
unit/precision. For example, if a circle with radius 1.5 is
created and we set `GdsLibrary.unit` to 1.0e-6 (1 um) and
`GdsLibrary.precision` to 1.0e-9` (1 nm), the radius of the
circle will be 1.5 um and the GDSII file will contain the
dimension 1500 nm.
Parameters
----------
outfile : file, string or Path
The file (or path) where the GDSII stream will be written.
It must be opened for writing operations in binary format.
cells : iterable
The cells or cell names to be included in the library. If
None, all cells are used.
timestamp : datetime object
Sets the GDSII timestamp. If None, the current time is
used.
binary_cells : iterable of bytes
Iterable with binary data for GDSII cells (from
`get_binary_cells`, for example).
Notes
-----
Only the specified cells are written. The user is responsible
for ensuring all cell dependencies are satisfied.
"""
close = True
if hasattr(outfile, "__fspath__"):
outfile = open(outfile.__fspath__(), "wb")
elif isinstance(outfile, (basestring, Path)):
outfile = open(outfile, "wb")
else:
close = False
now = datetime.datetime.today() if timestamp is None else timestamp
name = self.name if len(self.name) % 2 == 0 else (self.name + "\0")
outfile.write(
struct.pack(
">5H12h2H",
6,
0x0002,
0x0258,
28,
0x0102,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
4 + len(name),
0x0206,
)
+ name.encode("ascii")
+ struct.pack(">2H", 20, 0x0305)
+ _eight_byte_real(self.precision / self.unit)
+ _eight_byte_real(self.precision)
)
if cells is None:
cells = self.cells.values()
else:
cells = [self.cells.get(c, c) for c in cells]
if len(cells) == 0:
warnings.warn("[GDSPY] Creating a GDSII file without any cells.")
for cell in cells:
cell.to_gds(outfile, self.unit / self.precision)
if binary_cells is not None:
for bc in binary_cells:
outfile.write(bc)
outfile.write(struct.pack(">2H", 4, 0x0400))
if close:
outfile.close()
def read_gds(
self,
infile,
units="skip",
rename={},
rename_template="{name}",
layers={},
datatypes={},
texttypes={},
):
"""
Read a GDSII file into this library.
Parameters
----------
infile : file, string or Path
GDSII stream file (or path) to be imported. It must be
opened for reading in binary format.
units : {'convert', 'import', 'skip'}
Controls how to scale and use the units in the imported
file. 'convert': the imported geometry is scaled to
this library units. 'import': the unit and precision in
this library are replaced by those from the imported file.
'skip': the imported geometry is not scaled and units
are not replaced; the geometry is imported in the *user
units* of the file.
rename : dictionary
Dictionary used to rename the imported cells. Keys and
values must be strings.
rename_template : string
Template string used to rename the imported cells. Appiled
only if the cell name is not in the `rename` dictionary.
Examples: 'prefix-{name}', '{name}-suffix'
layers : dictionary
Dictionary used to convert the layers in the imported cells.
Keys and values must be integers.
datatypes : dictionary
Dictionary used to convert the datatypes in the imported
cells. Keys and values must be integers.
texttypes : dictionary
Dictionary used to convert the text types in the imported
cells. Keys and values must be integers.
Returns
-------
out : `GdsLibrary`
This object.
Notes
-----
Not all features from the GDSII specification are currently
supported. A warning will be produced if any unsupported
features are found in the imported file.
"""
self._references = []
close = True
if hasattr(infile, "__fspath__"):
infile = open(infile.__fspath__(), "rb")
elif isinstance(infile, (basestring, Path)):
infile = open(infile, "rb")
else:
close = False
emitted_warnings = []
kwargs = {}
create_element = None
factor = 1
cell = None
properties = {}
attr = -1
for record in _record_reader(infile):
# LAYER
if record[0] == 0x0D:
kwargs["layer"] = layers.get(record[1][0], record[1][0])
# DATATYPE or BOXTYPE
elif record[0] == 0x0E or record[0] == 0x2E:
kwargs["datatype"] = datatypes.get(record[1][0], record[1][0])
# TEXTTYPE
elif record[0] == 0x16:
kwargs["texttype"] = texttypes.get(record[1][0], record[1][0])
# XY
elif record[0] == 0x10:
if "xy" in kwargs:
kwargs["xy"] = numpy.concatenate((kwargs["xy"], factor * record[1]))
else:
kwargs["xy"] = factor * record[1]
# WIDTH
elif record[0] == 0x0F:
kwargs["width"] = factor * abs(record[1][0])
if record[1][0] < 0:
kwargs["width_transform"] = False
# ENDEL
elif record[0] == 0x11:
if create_element is not None:
el = create_element(**kwargs)
if len(properties) > 0:
el.properties = properties
properties = {}
cell.add(el)
create_element = None
kwargs = {}
# BOUNDARY
elif record[0] == 0x08:
create_element = self._create_polygon
# PATH
elif record[0] == 0x09:
create_element = self._create_path
# BOX
elif record[0] == 0x2D:
create_element = self._create_polygon
if record[0] not in emitted_warnings:
warnings.warn(
"[GDSPY] GDSII elements of type BOX are imported as polygons.",
stacklevel=2,
)
emitted_warnings.append(record[0])
# TEXT
elif record[0] == 0x0C:
create_element = self._create_label
# SNAME
elif record[0] == 0x12:
if record[1] in rename:
name = rename[record[1]]
else:
name = rename_template.format(name=record[1])
kwargs["ref_cell"] = name
# COLROW
elif record[0] == 0x13:
kwargs["columns"] = record[1][0]
kwargs["rows"] = record[1][1]
# STRANS
elif record[0] == 0x1A:
kwargs["x_reflection"] = (int(record[1][0]) & 0x8000) > 0
if (int(record[1][0]) & 0x0006) and record[0] not in emitted_warnings:
warnings.warn(
"[GDSPY] Absolute magnification or rotation of "
"references is not supported. Transformations "
"will be interpreted as relative.",
stacklevel=2,
)
emitted_warnings.append(record[0])
# MAG
elif record[0] == 0x1B:
kwargs["magnification"] = record[1][0]
# ANGLE
elif record[0] == 0x1C:
kwargs["rotation"] = record[1][0]
# SREF
elif record[0] == 0x0A:
create_element = self._create_reference
# AREF
elif record[0] == 0x0B:
create_element = self._create_array
# STRNAME
elif record[0] == 0x06:
if record[1] in rename:
name = rename[record[1]]
else:
name = rename_template.format(name=record[1])
cell = Cell(name, exclude_from_current=True)
self.cells[name] = cell
# STRING
elif record[0] == 0x19:
kwargs["text"] = record[1]
# ENDSTR
elif record[0] == 0x07:
cell = None
# UNITS
elif record[0] == 0x03:
if units == "skip":
factor = record[1][0]
elif units == "import":
self.unit = record[1][1] / record[1][0]
self.precision = record[1][1]
factor = record[1][0]
elif units == "convert":
factor = record[1][1] / self.unit
else:
raise ValueError(
"[GDSPY] units must be one of 'convert', 'import' or 'skip'."
)
# LIBNAME
elif record[0] == 0x02:
self.name = record[1]
# PRESENTATION
elif record[0] == 0x17:
kwargs["anchor"] = GdsLibrary._import_anchors[
int(record[1][0]) & 0x000F
]
# PATHTYPE
elif record[0] == 0x21:
kwargs["ends"] = GdsLibrary._pathtype_dict.get(record[1][0], "extended")
# BGNEXTN
elif record[0] == 0x30:
kwargs["bgnextn"] = factor * record[1][0]
# ENDEXTN
elif record[0] == 0x31:
kwargs["endextn"] = factor * record[1][0]
# ENDLIB
elif record[0] == 0x04:
for ref in self._references:
if ref.ref_cell in self.cells:
ref.ref_cell = self.cells[ref.ref_cell]
# PROPATTR
elif record[0] == 0x2B:
attr = record[1][0]
# PROPVALUE
elif record[0] == 0x2C:
properties[attr] = record[1]
# Not supported
elif (
record[0] not in emitted_warnings
and record[0] not in GdsLibrary._unused_records
):
warnings.warn(
"[GDSPY] Record type {0} ({1:02X}) is not supported.".format(
GdsLibrary._record_name[record[0]], record[0]
),
stacklevel=2,
)
emitted_warnings.append(record[0])
if close:
infile.close()
return self
def _create_polygon(self, layer, datatype, xy):
return Polygon(xy[:-2].reshape((xy.size // 2 - 1, 2)), layer, datatype)
def _create_path(self, **kwargs):
xy = kwargs.pop("xy")
if "bgnextn" in kwargs or "endextn" in kwargs:
kwargs["ends"] = (kwargs.pop("bgnextn", 0), kwargs.pop("endextn", 0))
kwargs["points"] = xy.reshape((xy.size // 2, 2))
kwargs["gdsii_path"] = True
return FlexPath(**kwargs)
def _create_label(self, xy, width=None, ends=None, **kwargs):
kwargs["position"] = xy
return Label(**kwargs)
def _create_reference(self, **kwargs):
kwargs["origin"] = kwargs.pop("xy")
kwargs["ignore_missing"] = True
ref = CellReference(**kwargs)
ref.ref_cell = kwargs["ref_cell"]
self._references.append(ref)
return ref
def _create_array(self, **kwargs):
xy = kwargs.pop("xy")
kwargs["origin"] = xy[0:2]
if "x_reflection" in kwargs:
if "rotation" in kwargs:
sa = -numpy.sin(kwargs["rotation"] * numpy.pi / 180.0)
ca = numpy.cos(kwargs["rotation"] * numpy.pi / 180.0)
x2 = (xy[2] - xy[0]) * ca - (xy[3] - xy[1]) * sa + xy[0]
y3 = (xy[4] - xy[0]) * sa + (xy[5] - xy[1]) * ca + xy[1]
else:
x2 = xy[2]
y3 = xy[5]
if kwargs["x_reflection"]:
y3 = 2 * xy[1] - y3
kwargs["spacing"] = (
(x2 - xy[0]) / kwargs["columns"],
(y3 - xy[1]) / kwargs["rows"],
)
else:
kwargs["spacing"] = (
(xy[2] - xy[0]) / kwargs["columns"],
(xy[5] - xy[1]) / kwargs["rows"],
)
kwargs["ignore_missing"] = True
ref = CellArray(**kwargs)
ref.ref_cell = kwargs["ref_cell"]
self._references.append(ref)
return ref
def top_level(self):
"""
Output the top level cells from the GDSII data.
Top level cells are those that are not referenced by any other
cells.
Returns
-------
out : list
List of top level cells.
"""
top = set(self)
for cell in self:
top.difference_update(cell.get_dependencies())
return list(top)
def rename_cell(self, cell, name, update_references=True):
"""
Rename an existing cell in the library.
Parameters
----------
cell : `Cell` or string
Cell to be renamed. It must be present in the library.
name : string
New name for the cell. It cannot be present in the library.
update_references : bool
If True, replace references using the old name with the new
cell.
Returns
-------
out : integer
Number of updated references.
"""
if isinstance(cell, Cell):
old_name = cell.name
if old_name not in self.cells:
raise ValueError(
"[GDSPY] Cell named {0} not present in library.".format(old_name)
)
if self.cells[old_name] is not cell:
raise ValueError(
"[GDSPY] Cell named {0} doesn't match library's.".format(old_name)
)
else:
old_name = cell
if old_name not in self.cells:
raise ValueError(
"[GDSPY] Cell named {0} not present in library.".format(old_name)
)
cell = self.cells[old_name]
if name in self.cells:
raise ValueError(
"[GDSPY] Cell named {0} already present in library. "
"Use `add` to overwrite cells.".format(name)
)
del self.cells[old_name]
self.cells[name] = cell
cell.name = name
if update_references:
return self.replace_references(old_name, cell)
return 0
def replace_references(self, old_cell, new_cell):
"""
Replace cells in all references in the library.
All `CellReference` and `CellArray` using the `old_cell` are
updated to reference `new_cell`. Matching with `old_cell` is
by name only.
Parameters
----------
old_cell : `Cell` or string
Cell to be replaced.
new_cell : `Cell` or string
Replacement cell. If the cell name is passed and it is
present in the library, the actual cell is used instead.
Returns
-------
out : integer
Number of replacements.
"""
if isinstance(old_cell, Cell):
old_name = old_cell.name
else:
old_name = old_cell
if not isinstance(new_cell, Cell) and new_cell in self.cells:
new_cell = self.cells[new_cell]
replacements = 0
for cell in self.cells.values():
for ref in cell.references:
if isinstance(ref.ref_cell, Cell):
if ref.ref_cell.name == old_name:
ref.ref_cell = new_cell
replacements += 1
elif ref.ref_cell == old_name:
ref.ref_cell = new_cell
replacements += 1
return replacements
def extract(self, cell, overwrite_duplicate=False):
"""
Extract a cell from the this GDSII file and include it in the
current global library, including referenced dependencies.
.. deprecated:: 1.5
`extract` is deprecated and will be removed in a future
version of Gdspy. Gdspy no longer uses a global library.
Parameters
----------
cell : `Cell` or string
Cell or name of the cell to be extracted from the imported
file. Referenced cells will be automatically extracted as
well.
overwrite_duplicate : bool
If True an existing cell with the same name in the current
global library will be overwritten.
Returns
-------
out : `Cell`
The extracted cell.
Notes
-----
`CellReference` or `CellArray` instances that referred to an
overwritten cell are not automatically updated.
"""
warnings.warn(
"[GDSPY] extract and the use of the global library is deprecated.",
category=DeprecationWarning,
stacklevel=2,
)
import gdspy
cell = self.cells.get(cell, cell)
gdspy.current_library.add(
cell, include_dependencies=True, overwrite_duplicate=overwrite_duplicate
)
return cell
class GdsWriter(object):
"""
GDSII strem library writer.
The dimensions actually written on the GDSII file will be the
dimensions of the objects created times the ratio unit/precision.
For example, if a circle with radius 1.5 is created and we set
`unit` to 1.0e-6 (1 um) and `precision` to 1.0e-9 (1 nm), the radius
of the circle will be 1.5 um and the GDSII file will contain the
dimension 1500 nm.
Parameters
----------
outfile : file, string or Path
The file (or path) where the GDSII stream will be written. It
must be opened for writing operations in binary format.
name : string
Name of the GDSII library (file).
unit : number
Unit size for the objects in the library (in *meters*).
precision : number
Precision for the dimensions of the objects in the library (in
*meters*).
timestamp : datetime object
Sets the GDSII timestamp. If None, the current time is
used.
Notes
-----
This class can be used for incremental output of the geometry in
case the complete layout is too large to be kept in memory all at
once.
Examples
--------
>>> writer = gdspy.GdsWriter('out-file.gds', unit=1.0e-6,
... precision=1.0e-9)
>>> for i in range(10):
... cell = gdspy.Cell('C{}'.format(i), True)
... # Add the contents of this cell...
... writer.write_cell(cell)
... # Clear the memory: erase Cell objects and any other objects
... # that won't be needed.
... del cell
>>> writer.close()
"""
__slots__ = "_outfile", "_close", "_res"
def __init__(
self, outfile, name="library", unit=1.0e-6, precision=1.0e-9, timestamp=None
):
self._close = True
if hasattr(outfile, "__fspath__"):
self._outfile = open(outfile.__fspath__(), "wb")
elif isinstance(outfile, (basestring, Path)):
self._outfile = open(outfile, "wb")
else:
self._outfile = outfile
self._close = False
self._res = unit / precision
now = datetime.datetime.today() if timestamp is None else timestamp
if len(name) % 2 != 0:
name = name + "\0"
self._outfile.write(
struct.pack(
">5H12h2H",
6,
0x0002,
0x0258,
28,
0x0102,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
4 + len(name),
0x0206,
)
+ name.encode("ascii")
+ struct.pack(">2H", 20, 0x0305)
+ _eight_byte_real(precision / unit)
+ _eight_byte_real(precision)
)
def write_cell(self, cell, timestamp=None):
"""
Write the specified cell to the file.
Parameters
----------
cell : `Cell`
Cell to be written.
timestamp : datetime object
Sets the GDSII timestamp. If None, the current time is
used.
Notes
-----
Only the specified cell is written. Dependencies must be
manually included.
Returns
-------
out : `GdsWriter`
This object.
"""
cell.to_gds(self._outfile, self._res, timestamp)
return self
def write_binary_cells(self, binary_cells):
"""
Write the specified binary cells to the file.
Parameters
----------
binary_cells : iterable of bytes
Iterable with binary data for GDSII cells (from
`get_binary_cells`, for example).
Returns
-------
out : `GdsWriter`
This object.
"""
for bc in binary_cells:
self._outfile.write(bc)
return self
def close(self):
"""
Finalize the GDSII stream library.
"""
self._outfile.write(struct.pack(">2H", 4, 0x0400))
if self._close:
self._outfile.close()
def get_gds_units(infile):
"""
Return the unit and precision used in the GDS stream file.
Parameters
----------
infile : file, string or Path
GDSII stream file to be queried.
Returns
-------
out : 2-tuple
Return ``(unit, precision)`` from the file.
"""
close = True
if hasattr(infile, "__fspath__"):
infile = open(infile.__fspath__(), "rb")
elif isinstance(infile, (basestring, Path)):
infile = open(infile, "rb")
else:
close = False
unit = precision = None
for rec_type, data in _raw_record_reader(infile):
# UNITS
if rec_type == 0x03:
db_user = _eight_byte_real_to_float(data[4:12])
db_meters = _eight_byte_real_to_float(data[12:])
unit = db_meters / db_user
precision = db_meters
break
if close:
infile.close()
return (unit, precision)
def get_binary_cells(infile):
"""
Load all cells from a GDSII stream file in binary format.
Parameters
----------
infile : file, string, or Path
GDSII stream file (or path) to be loaded. It must be opened for
reading in binary format.
Returns
-------
out : dictionary
Dictionary of binary cell representations indexed by name.
Notes
-----
The returned cells inherit the units of the loaded file. If they
are used in a new library, the new library must use compatible
units.
"""
close = True
if hasattr(infile, "__fspath__"):
infile = open(infile.__fspath__(), "rb")
elif isinstance(infile, (basestring, Path)):
infile = open(infile, "rb")
else:
close = False
cells = {}
name = None
cell_data = None
for rec_type, data in _raw_record_reader(infile):
# BGNSTR
if rec_type == 0x05:
cell_data = [data]
# STRNAME
elif rec_type == 0x06:
cell_data.append(data)
if str is not bytes:
if data[-1] == 0:
name = data[4:-1].decode("ascii")
else:
name = data[4:].decode("ascii")
else:
if data[-1] == "\0":
name = data[4:-1]
else:
name = data[4:]
# ENDSTR
elif rec_type == 0x07:
cell_data.append(data)
cells[name] = b"".join(cell_data)
cell_data = None
elif cell_data is not None:
cell_data.append(data)
if close:
infile.close()
return cells
| [
"itertools.chain",
"gdspy.path.FlexPath",
"colorsys.hsv_to_rgb",
"numpy.array",
"builtins.range",
"copy.deepcopy",
"numpy.sin",
"datetime.datetime.today",
"copy.copy",
"gdspy.label.Label",
"gdspy.gdsiiformat._eight_byte_real",
"gdspy.gdsiiformat._raw_record_reader",
"numpy.concatenate",
"w... | [((1723, 1747), 'numpy.array', 'numpy.array', (['(-1.0, 1.0)'], {}), '((-1.0, 1.0))\n', (1734, 1747), False, 'import numpy\n'), ((970, 1004), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (1002, 1004), False, 'from future import standard_library\n'), ((103314, 103340), 'gdspy.gdsiiformat._raw_record_reader', '_raw_record_reader', (['infile'], {}), '(infile)\n', (103332, 103340), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((104529, 104555), 'gdspy.gdsiiformat._raw_record_reader', '_raw_record_reader', (['infile'], {}), '(infile)\n', (104547, 104555), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((3485, 3557), 'itertools.chain', 'itertools.chain', (['self.polygons', 'self.paths', 'self.labels', 'self.references'], {}), '(self.polygons, self.paths, self.labels, self.references)\n', (3500, 3557), False, 'import itertools\n'), ((6705, 6736), 'copy.deepcopy', 'libcopy.deepcopy', (['self.polygons'], {}), '(self.polygons)\n', (6721, 6736), True, 'import copy as libcopy\n'), ((6762, 6790), 'copy.deepcopy', 'libcopy.deepcopy', (['self.paths'], {}), '(self.paths)\n', (6778, 6790), True, 'import copy as libcopy\n'), ((6817, 6846), 'copy.deepcopy', 'libcopy.deepcopy', (['self.labels'], {}), '(self.labels)\n', (6833, 6846), True, 'import copy as libcopy\n'), ((15388, 15430), 'itertools.chain', 'itertools.chain', (['self.polygons', 'self.paths'], {}), '(self.polygons, self.paths)\n', (15403, 15430), False, 'import itertools\n'), ((15923, 15965), 'itertools.chain', 'itertools.chain', (['self.polygons', 'self.paths'], {}), '(self.polygons, self.paths)\n', (15938, 15965), False, 'import itertools\n'), ((16946, 16988), 'itertools.chain', 'itertools.chain', (['self.polygons', 'self.paths'], {}), '(self.polygons, self.paths)\n', (16961, 16988), False, 'import itertools\n'), ((19038, 19069), 'numpy.array', 'numpy.array', (['self._bounding_box'], {}), '(self._bounding_box)\n', (19049, 19069), False, 'import numpy\n'), ((24268, 24299), 'copy.deepcopy', 'libcopy.deepcopy', (['self.polygons'], {}), '(self.polygons)\n', (24284, 24299), True, 'import copy as libcopy\n'), ((25074, 25102), 'copy.deepcopy', 'libcopy.deepcopy', (['self.paths'], {}), '(self.paths)\n', (25090, 25102), True, 'import copy as libcopy\n'), ((25857, 25886), 'copy.deepcopy', 'libcopy.deepcopy', (['self.labels'], {}), '(self.labels)\n', (25873, 25886), True, 'import copy as libcopy\n'), ((60620, 60639), 'builtins.range', 'range', (['self.columns'], {}), '(self.columns)\n', (60625, 60639), False, 'from builtins import range\n'), ((66943, 66962), 'builtins.range', 'range', (['self.columns'], {}), '(self.columns)\n', (66948, 66962), False, 'from builtins import range\n'), ((68831, 68850), 'builtins.range', 'range', (['self.columns'], {}), '(self.columns)\n', (68836, 68850), False, 'from builtins import range\n'), ((70295, 70314), 'builtins.range', 'range', (['self.columns'], {}), '(self.columns)\n', (70300, 70314), False, 'from builtins import range\n'), ((85340, 85362), 'gdspy.gdsiiformat._record_reader', '_record_reader', (['infile'], {}), '(infile)\n', (85354, 85362), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((91994, 92012), 'gdspy.path.FlexPath', 'FlexPath', ([], {}), '(**kwargs)\n', (92002, 92012), False, 'from gdspy.path import FlexPath, RobustPath\n'), ((92127, 92142), 'gdspy.label.Label', 'Label', ([], {}), '(**kwargs)\n', (92132, 92142), False, 'from gdspy.label import Label\n'), ((97988, 98121), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] extract and the use of the global library is deprecated."""'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), "(\n '[GDSPY] extract and the use of the global library is deprecated.',\n category=DeprecationWarning, stacklevel=2)\n", (98001, 98121), False, 'import warnings\n'), ((98232, 98335), 'gdspy.current_library.add', 'gdspy.current_library.add', (['cell'], {'include_dependencies': '(True)', 'overwrite_duplicate': 'overwrite_duplicate'}), '(cell, include_dependencies=True,\n overwrite_duplicate=overwrite_duplicate)\n', (98257, 98335), False, 'import gdspy\n'), ((3117, 3176), 'gdspy.current_library.add', 'gdspy.current_library.add', (['self'], {'include_dependencies': '(False)'}), '(self, include_dependencies=False)\n', (3142, 3176), False, 'import gdspy\n'), ((4056, 4081), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4079, 4081), False, 'import datetime\n'), ((5125, 5152), 'struct.pack', 'struct.pack', (['""">2H"""', '(4)', '(1792)'], {}), "('>2H', 4, 1792)\n", (5136, 5152), False, 'import struct\n'), ((6878, 6895), 'copy.copy', 'libcopy.copy', (['ref'], {}), '(ref)\n', (6890, 6895), True, 'import copy as libcopy\n'), ((7172, 7184), 'numpy.cos', 'numpy.cos', (['t'], {}), '(t)\n', (7181, 7184), False, 'import numpy\n'), ((7202, 7214), 'numpy.sin', 'numpy.sin', (['t'], {}), '(t)\n', (7211, 7214), False, 'import numpy\n'), ((14608, 14667), 'itertools.chain', 'itertools.chain', (['self.polygons', 'self.paths', 'self.references'], {}), '(self.polygons, self.paths, self.references)\n', (14623, 14667), False, 'import itertools\n'), ((15014, 15073), 'itertools.chain', 'itertools.chain', (['self.polygons', 'self.paths', 'self.references'], {}), '(self.polygons, self.paths, self.references)\n', (15029, 15073), False, 'import itertools\n'), ((17906, 17957), 'numpy.array', 'numpy.array', (['((1e+300, 1e+300), (-1e+300, -1e+300))'], {}), '(((1e+300, 1e+300), (-1e+300, -1e+300)))\n', (17917, 17957), False, 'import numpy\n'), ((41164, 41191), 'struct.pack', 'struct.pack', (['""">2H"""', '(4)', '(4352)'], {}), "('>2H', 4, 4352)\n", (41175, 41191), False, 'import struct\n'), ((44499, 44542), 'numpy.cos', 'numpy.cos', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (44508, 44542), False, 'import numpy\n'), ((44663, 44683), 'numpy.array', 'numpy.array', (['(1, -1)'], {}), '((1, -1))\n', (44674, 44683), False, 'import numpy\n'), ((44745, 44811), 'numpy.array', 'numpy.array', (['(self.magnification, self.magnification)'], {'dtype': 'float'}), '((self.magnification, self.magnification), dtype=float)\n', (44756, 44811), False, 'import numpy\n'), ((44867, 44891), 'numpy.array', 'numpy.array', (['self.origin'], {}), '(self.origin)\n', (44878, 44891), False, 'import numpy\n'), ((46715, 46758), 'numpy.cos', 'numpy.cos', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (46724, 46758), False, 'import numpy\n'), ((46879, 46899), 'numpy.array', 'numpy.array', (['(1, -1)'], {}), '((1, -1))\n', (46890, 46899), False, 'import numpy\n'), ((46961, 47027), 'numpy.array', 'numpy.array', (['(self.magnification, self.magnification)'], {'dtype': 'float'}), '((self.magnification, self.magnification), dtype=float)\n', (46972, 47027), False, 'import numpy\n'), ((47083, 47107), 'numpy.array', 'numpy.array', (['self.origin'], {}), '(self.origin)\n', (47094, 47107), False, 'import numpy\n'), ((48351, 48375), 'numpy.array', 'numpy.array', (['self.origin'], {}), '(self.origin)\n', (48362, 48375), False, 'import numpy\n'), ((49257, 49300), 'numpy.cos', 'numpy.cos', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (49266, 49300), False, 'import numpy\n'), ((49421, 49441), 'numpy.array', 'numpy.array', (['(1, -1)'], {}), '((1, -1))\n', (49432, 49441), False, 'import numpy\n'), ((49503, 49569), 'numpy.array', 'numpy.array', (['(self.magnification, self.magnification)'], {'dtype': 'float'}), '((self.magnification, self.magnification), dtype=float)\n', (49514, 49569), False, 'import numpy\n'), ((49625, 49649), 'numpy.array', 'numpy.array', (['self.origin'], {}), '(self.origin)\n', (49636, 49649), False, 'import numpy\n'), ((59698, 59725), 'struct.pack', 'struct.pack', (['""">2H"""', '(4)', '(4352)'], {}), "('>2H', 4, 4352)\n", (59709, 59725), False, 'import struct\n'), ((60711, 60727), 'builtins.range', 'range', (['self.rows'], {}), '(self.rows)\n', (60716, 60727), False, 'from builtins import range\n'), ((63353, 63396), 'numpy.cos', 'numpy.cos', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (63362, 63396), False, 'import numpy\n'), ((63528, 63594), 'numpy.array', 'numpy.array', (['(self.magnification, self.magnification)'], {'dtype': 'float'}), '((self.magnification, self.magnification), dtype=float)\n', (63539, 63594), False, 'import numpy\n'), ((63650, 63674), 'numpy.array', 'numpy.array', (['self.origin'], {}), '(self.origin)\n', (63661, 63674), False, 'import numpy\n'), ((63725, 63745), 'numpy.array', 'numpy.array', (['(1, -1)'], {}), '((1, -1))\n', (63736, 63745), False, 'import numpy\n'), ((64999, 65018), 'builtins.range', 'range', (['self.columns'], {}), '(self.columns)\n', (65004, 65018), False, 'from builtins import range\n'), ((66449, 66492), 'numpy.cos', 'numpy.cos', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (66458, 66492), False, 'import numpy\n'), ((66613, 66633), 'numpy.array', 'numpy.array', (['(1, -1)'], {}), '((1, -1))\n', (66624, 66633), False, 'import numpy\n'), ((66695, 66761), 'numpy.array', 'numpy.array', (['(self.magnification, self.magnification)'], {'dtype': 'float'}), '((self.magnification, self.magnification), dtype=float)\n', (66706, 66761), False, 'import numpy\n'), ((66817, 66841), 'numpy.array', 'numpy.array', (['self.origin'], {}), '(self.origin)\n', (66828, 66841), False, 'import numpy\n'), ((66985, 67001), 'builtins.range', 'range', (['self.rows'], {}), '(self.rows)\n', (66990, 67001), False, 'from builtins import range\n'), ((68552, 68576), 'numpy.array', 'numpy.array', (['self.origin'], {}), '(self.origin)\n', (68563, 68576), False, 'import numpy\n'), ((68873, 68889), 'builtins.range', 'range', (['self.rows'], {}), '(self.rows)\n', (68878, 68889), False, 'from builtins import range\n'), ((69804, 69847), 'numpy.cos', 'numpy.cos', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (69813, 69847), False, 'import numpy\n'), ((69979, 70045), 'numpy.array', 'numpy.array', (['(self.magnification, self.magnification)'], {'dtype': 'float'}), '((self.magnification, self.magnification), dtype=float)\n', (69990, 70045), False, 'import numpy\n'), ((70101, 70125), 'numpy.array', 'numpy.array', (['self.origin'], {}), '(self.origin)\n', (70112, 70125), False, 'import numpy\n'), ((70176, 70196), 'numpy.array', 'numpy.array', (['(1, -1)'], {}), '((1, -1))\n', (70187, 70196), False, 'import numpy\n'), ((70338, 70354), 'builtins.range', 'range', (['self.rows'], {}), '(self.rows)\n', (70343, 70354), False, 'from builtins import range\n'), ((81376, 81401), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (81399, 81401), False, 'import datetime\n'), ((82464, 82529), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Creating a GDSII file without any cells."""'], {}), "('[GDSPY] Creating a GDSII file without any cells.')\n", (82477, 82529), False, 'import warnings\n'), ((82747, 82774), 'struct.pack', 'struct.pack', (['""">2H"""', '(4)', '(1024)'], {}), "('>2H', 4, 1024)\n", (82758, 82774), False, 'import struct\n'), ((100535, 100560), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (100558, 100560), False, 'import datetime\n'), ((102641, 102668), 'struct.pack', 'struct.pack', (['""">2H"""', '(4)', '(1024)'], {}), "('>2H', 4, 1024)\n", (102652, 102668), False, 'import struct\n'), ((103209, 103227), 'builtins.open', 'open', (['infile', '"""rb"""'], {}), "(infile, 'rb')\n", (103213, 103227), False, 'from builtins import open\n'), ((103409, 103446), 'gdspy.gdsiiformat._eight_byte_real_to_float', '_eight_byte_real_to_float', (['data[4:12]'], {}), '(data[4:12])\n', (103434, 103446), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((103471, 103507), 'gdspy.gdsiiformat._eight_byte_real_to_float', '_eight_byte_real_to_float', (['data[12:]'], {}), '(data[12:])\n', (103496, 103507), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((104400, 104418), 'builtins.open', 'open', (['infile', '"""rb"""'], {}), "(infile, 'rb')\n", (104404, 104418), False, 'from builtins import open\n'), ((17012, 17050), 'builtins.zip', 'zip', (['element.layers', 'element.datatypes'], {}), '(element.layers, element.datatypes)\n', (17015, 17050), False, 'from builtins import zip\n'), ((20470, 20576), 'numpy.array', 'numpy.array', (['[(bb[0, 0], bb[0, 1]), (bb[0, 0], bb[1, 1]), (bb[1, 0], bb[1, 1]), (bb[1, 0\n ], bb[0, 1])]'], {}), '([(bb[0, 0], bb[0, 1]), (bb[0, 0], bb[1, 1]), (bb[1, 0], bb[1, 1\n ]), (bb[1, 0], bb[0, 1])])\n', (20481, 20576), False, 'import numpy\n'), ((32501, 32519), 'builtins.open', 'open', (['outfile', '"""w"""'], {}), "(outfile, 'w')\n", (32505, 32519), False, 'from builtins import open\n'), ((40028, 40061), 'struct.pack', 'struct.pack', (['""">3H"""', '(6)', '(6657)', 'word'], {}), "('>3H', 6, 6657, word)\n", (40039, 40061), False, 'import struct\n'), ((40845, 41046), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Properties with size larger than 128 bytes are not officially supported by the GDSII specification. This file might not be compatible with all readers."""'], {'stacklevel': '(4)'}), "(\n '[GDSPY] Properties with size larger than 128 bytes are not officially supported by the GDSII specification. This file might not be compatible with all readers.'\n , stacklevel=4)\n", (40858, 41046), False, 'import warnings\n'), ((44560, 44603), 'numpy.sin', 'numpy.sin', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (44569, 44603), False, 'import numpy\n'), ((46776, 46819), 'numpy.sin', 'numpy.sin', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (46785, 46819), False, 'import numpy\n'), ((49318, 49361), 'numpy.sin', 'numpy.sin', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (49327, 49361), False, 'import numpy\n'), ((51233, 51319), 'numpy.array', 'numpy.array', (['((self.origin[0], self.origin[1]), (self.origin[0], self.origin[1]))'], {}), '(((self.origin[0], self.origin[1]), (self.origin[0], self.origin\n [1])))\n', (51244, 51319), False, 'import numpy\n'), ((57266, 57309), 'numpy.sin', 'numpy.sin', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (57275, 57309), False, 'import numpy\n'), ((57331, 57374), 'numpy.cos', 'numpy.cos', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (57340, 57374), False, 'import numpy\n'), ((58278, 58311), 'struct.pack', 'struct.pack', (['""">3H"""', '(6)', '(6657)', 'word'], {}), "('>3H', 6, 6657, word)\n", (58289, 58311), False, 'import struct\n'), ((59379, 59580), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Properties with size larger than 128 bytes are not officially supported by the GDSII specification. This file might not be compatible with all readers."""'], {'stacklevel': '(4)'}), "(\n '[GDSPY] Properties with size larger than 128 bytes are not officially supported by the GDSII specification. This file might not be compatible with all readers.'\n , stacklevel=4)\n", (59392, 59580), False, 'import warnings\n'), ((63414, 63457), 'numpy.sin', 'numpy.sin', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (63423, 63457), False, 'import numpy\n'), ((63971, 63990), 'builtins.range', 'range', (['self.columns'], {}), '(self.columns)\n', (63976, 63990), False, 'from builtins import range\n'), ((65046, 65062), 'builtins.range', 'range', (['self.rows'], {}), '(self.rows)\n', (65051, 65062), False, 'from builtins import range\n'), ((66510, 66553), 'numpy.sin', 'numpy.sin', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (66519, 66553), False, 'import numpy\n'), ((67025, 67080), 'numpy.array', 'numpy.array', (['[self.spacing[0] * i, self.spacing[1] * j]'], {}), '([self.spacing[0] * i, self.spacing[1] * j])\n', (67036, 67080), False, 'import numpy\n'), ((68913, 68968), 'numpy.array', 'numpy.array', (['[self.spacing[0] * i, self.spacing[1] * j]'], {}), '([self.spacing[0] * i, self.spacing[1] * j])\n', (68924, 68968), False, 'import numpy\n'), ((69865, 69908), 'numpy.sin', 'numpy.sin', (['(self.rotation * numpy.pi / 180.0)'], {}), '(self.rotation * numpy.pi / 180.0)\n', (69874, 69908), False, 'import numpy\n'), ((70378, 70435), 'numpy.array', 'numpy.array', (['[self.spacing[0] * ii, self.spacing[1] * jj]'], {}), '([self.spacing[0] * ii, self.spacing[1] * jj])\n', (70389, 70435), False, 'import numpy\n'), ((72458, 72544), 'numpy.array', 'numpy.array', (['((self.origin[0], self.origin[1]), (self.origin[0], self.origin[1]))'], {}), '(((self.origin[0], self.origin[1]), (self.origin[0], self.origin\n [1])))\n', (72469, 72544), False, 'import numpy\n'), ((81302, 81321), 'builtins.open', 'open', (['outfile', '"""wb"""'], {}), "(outfile, 'wb')\n", (81306, 81321), False, 'from builtins import open\n'), ((82243, 82275), 'gdspy.gdsiiformat._eight_byte_real', '_eight_byte_real', (['self.precision'], {}), '(self.precision)\n', (82259, 82275), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((85098, 85116), 'builtins.open', 'open', (['infile', '"""rb"""'], {}), "(infile, 'rb')\n", (85102, 85116), False, 'from builtins import open\n'), ((92678, 92726), 'numpy.cos', 'numpy.cos', (["(kwargs['rotation'] * numpy.pi / 180.0)"], {}), "(kwargs['rotation'] * numpy.pi / 180.0)\n", (92687, 92726), False, 'import numpy\n'), ((100382, 100401), 'builtins.open', 'open', (['outfile', '"""wb"""'], {}), "(outfile, 'wb')\n", (100386, 100401), False, 'from builtins import open\n'), ((101384, 101411), 'gdspy.gdsiiformat._eight_byte_real', '_eight_byte_real', (['precision'], {}), '(precision)\n', (101400, 101411), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((39571, 39599), 'struct.pack', 'struct.pack', (['""">2H"""', '(12)', '(6917)'], {}), "('>2H', 12, 6917)\n", (39582, 39599), False, 'import struct\n'), ((39604, 39640), 'gdspy.gdsiiformat._eight_byte_real', '_eight_byte_real', (['self.magnification'], {}), '(self.magnification)\n', (39620, 39640), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((39899, 39927), 'struct.pack', 'struct.pack', (['""">2H"""', '(12)', '(7173)'], {}), "('>2H', 12, 7173)\n", (39910, 39927), False, 'import struct\n'), ((39932, 39963), 'gdspy.gdsiiformat._eight_byte_real', '_eight_byte_real', (['self.rotation'], {}), '(self.rotation)\n', (39948, 39963), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((40236, 40270), 'builtins.round', 'round', (['(self.origin[0] * multiplier)'], {}), '(self.origin[0] * multiplier)\n', (40241, 40270), False, 'from builtins import round\n'), ((40293, 40327), 'builtins.round', 'round', (['(self.origin[1] * multiplier)'], {}), '(self.origin[1] * multiplier)\n', (40298, 40327), False, 'from builtins import round\n'), ((50879, 50906), 'numpy.concatenate', 'numpy.concatenate', (['polygons'], {}), '(polygons)\n', (50896, 50906), False, 'import numpy\n'), ((56943, 56971), 'struct.pack', 'struct.pack', (['""">2H"""', '(12)', '(6917)'], {}), "('>2H', 12, 6917)\n", (56954, 56971), False, 'import struct\n'), ((56976, 57012), 'gdspy.gdsiiformat._eight_byte_real', '_eight_byte_real', (['self.magnification'], {}), '(self.magnification)\n', (56992, 57012), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((58149, 58177), 'struct.pack', 'struct.pack', (['""">2H"""', '(12)', '(7173)'], {}), "('>2H', 12, 7173)\n", (58160, 58177), False, 'import struct\n'), ((58182, 58213), 'gdspy.gdsiiformat._eight_byte_real', '_eight_byte_real', (['self.rotation'], {}), '(self.rotation)\n', (58198, 58213), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((58590, 58624), 'builtins.round', 'round', (['(self.origin[0] * multiplier)'], {}), '(self.origin[0] * multiplier)\n', (58595, 58624), False, 'from builtins import round\n'), ((58647, 58681), 'builtins.round', 'round', (['(self.origin[1] * multiplier)'], {}), '(self.origin[1] * multiplier)\n', (58652, 58681), False, 'from builtins import round\n'), ((58704, 58726), 'builtins.round', 'round', (['(x2 * multiplier)'], {}), '(x2 * multiplier)\n', (58709, 58726), False, 'from builtins import round\n'), ((58749, 58771), 'builtins.round', 'round', (['(y2 * multiplier)'], {}), '(y2 * multiplier)\n', (58754, 58771), False, 'from builtins import round\n'), ((58794, 58816), 'builtins.round', 'round', (['(x3 * multiplier)'], {}), '(x3 * multiplier)\n', (58799, 58816), False, 'from builtins import round\n'), ((58839, 58861), 'builtins.round', 'round', (['(y3 * multiplier)'], {}), '(y3 * multiplier)\n', (58844, 58861), False, 'from builtins import round\n'), ((64022, 64038), 'builtins.range', 'range', (['self.rows'], {}), '(self.rows)\n', (64027, 64038), False, 'from builtins import range\n'), ((65090, 65147), 'numpy.array', 'numpy.array', (['[self.spacing[0] * ii, self.spacing[1] * jj]'], {}), '([self.spacing[0] * ii, self.spacing[1] * jj])\n', (65101, 65147), False, 'import numpy\n'), ((67153, 67181), 'copy.deepcopy', 'libcopy.deepcopy', (['polygonset'], {}), '(polygonset)\n', (67169, 67181), True, 'import copy as libcopy\n'), ((70503, 70525), 'copy.deepcopy', 'libcopy.deepcopy', (['clbl'], {}), '(clbl)\n', (70519, 70525), True, 'import copy as libcopy\n'), ((72104, 72131), 'numpy.concatenate', 'numpy.concatenate', (['polygons'], {}), '(polygons)\n', (72121, 72131), False, 'import numpy\n'), ((82184, 82228), 'gdspy.gdsiiformat._eight_byte_real', '_eight_byte_real', (['(self.precision / self.unit)'], {}), '(self.precision / self.unit)\n', (82200, 82228), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((92608, 92656), 'numpy.sin', 'numpy.sin', (["(kwargs['rotation'] * numpy.pi / 180.0)"], {}), "(kwargs['rotation'] * numpy.pi / 180.0)\n", (92617, 92656), False, 'import numpy\n'), ((101335, 101369), 'gdspy.gdsiiformat._eight_byte_real', '_eight_byte_real', (['(precision / unit)'], {}), '(precision / unit)\n', (101351, 101369), False, 'from gdspy.gdsiiformat import _record_reader, _raw_record_reader, _eight_byte_real, _eight_byte_real_to_float\n'), ((18661, 18692), 'numpy.concatenate', 'numpy.concatenate', (['all_polygons'], {}), '(all_polygons)\n', (18678, 18692), False, 'import numpy\n'), ((64070, 64127), 'numpy.array', 'numpy.array', (['[self.spacing[0] * ii, self.spacing[1] * jj]'], {}), '([self.spacing[0] * ii, self.spacing[1] * jj])\n', (64081, 64127), False, 'import numpy\n'), ((82139, 82166), 'struct.pack', 'struct.pack', (['""">2H"""', '(20)', '(773)'], {}), "('>2H', 20, 773)\n", (82150, 82166), False, 'import struct\n'), ((101290, 101317), 'struct.pack', 'struct.pack', (['""">2H"""', '(20)', '(773)'], {}), "('>2H', 20, 773)\n", (101301, 101317), False, 'import struct\n'), ((22928, 22961), 'numpy.array', 'numpy.array', (['polyset.polygons[ii]'], {}), '(polyset.polygons[ii])\n', (22939, 22961), False, 'import numpy\n'), ((33701, 33719), 'builtins.int', 'int', (['(255 * c + 0.5)'], {}), '(255 * c + 0.5)\n', (33704, 33719), False, 'from builtins import int\n'), ((34502, 34520), 'builtins.int', 'int', (['(255 * c + 0.5)'], {}), '(255 * c + 0.5)\n', (34505, 34520), False, 'from builtins import int\n'), ((21152, 21185), 'numpy.array', 'numpy.array', (['polyset.polygons[ii]'], {}), '(polyset.polygons[ii])\n', (21163, 21185), False, 'import numpy\n'), ((21262, 21295), 'numpy.array', 'numpy.array', (['polyset.polygons[ii]'], {}), '(polyset.polygons[ii])\n', (21273, 21295), False, 'import numpy\n'), ((22386, 22405), 'numpy.array', 'numpy.array', (['points'], {}), '(points)\n', (22397, 22405), False, 'import numpy\n'), ((33753, 33865), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['(l % 3 / 3.0 + l % 6 // 3 / 6.0 + l // 6 / 11.0)', '(1 - (l + d) % 8 / 12.0)', '(1 - d % 3 / 4.0)'], {}), '(l % 3 / 3.0 + l % 6 // 3 / 6.0 + l // 6 / 11.0, 1 - (l +\n d) % 8 / 12.0, 1 - d % 3 / 4.0)\n', (33772, 33865), False, 'import colorsys\n'), ((34554, 34666), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['(l % 3 / 3.0 + l % 6 // 3 / 6.0 + l // 6 / 11.0)', '(1 - (l + t) % 8 / 12.0)', '(1 - t % 3 / 4.0)'], {}), '(l % 3 / 3.0 + l % 6 // 3 / 6.0 + l // 6 / 11.0, 1 - (l +\n t) % 8 / 12.0, 1 - t % 3 / 4.0)\n', (34573, 34666), False, 'import colorsys\n'), ((69062, 69084), 'copy.deepcopy', 'libcopy.deepcopy', (['path'], {}), '(path)\n', (69078, 69084), True, 'import copy as libcopy\n'), ((85922, 85975), 'numpy.concatenate', 'numpy.concatenate', (["(kwargs['xy'], factor * record[1])"], {}), "((kwargs['xy'], factor * record[1]))\n", (85939, 85975), False, 'import numpy\n'), ((23269, 23300), 'builtins.zip', 'zip', (['path.layers', 'path.datatype'], {}), '(path.layers, path.datatype)\n', (23272, 23300), False, 'from builtins import zip\n'), ((87052, 87147), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] GDSII elements of type BOX are imported as polygons."""'], {'stacklevel': '(2)'}), "('[GDSPY] GDSII elements of type BOX are imported as polygons.',\n stacklevel=2)\n", (87065, 87147), False, 'import warnings\n'), ((88038, 88203), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Absolute magnification or rotation of references is not supported. Transformations will be interpreted as relative."""'], {'stacklevel': '(2)'}), "(\n '[GDSPY] Absolute magnification or rotation of references is not supported. Transformations will be interpreted as relative.'\n , stacklevel=2)\n", (88051, 88203), False, 'import warnings\n'), ((87899, 87916), 'builtins.int', 'int', (['record[1][0]'], {}), '(record[1][0])\n', (87902, 87916), False, 'from builtins import int\n'), ((87951, 87968), 'builtins.int', 'int', (['record[1][0]'], {}), '(record[1][0])\n', (87954, 87968), False, 'from builtins import int\n'), ((90155, 90172), 'builtins.int', 'int', (['record[1][0]'], {}), '(record[1][0])\n', (90158, 90172), False, 'from builtins import int\n')] |
# Reference: https://www.kaggle.com/CVxTz/audio-data-augmentation
import numpy as np
from nlpaug.model.audio import Audio
class Shift(Audio):
def __init__(self, sampling_rate, shift_max=2, shift_direction='both'):
"""
:param sampling_rate: SR of audio
:param shift_max: Max shifting in second
:param shift_direction: Shifting segment to left, right or one of them
"""
super(Shift, self).__init__()
self.sampling_rate = sampling_rate
self.shift_max = shift_max
if shift_direction in ['left', 'right', 'both']:
self.shift_direction = shift_direction
else:
raise ValueError(
'shift_direction should be either left, right or both while {} is passed.'.format(shift_direction))
def manipulate(self, data):
shift = np.random.randint(self.sampling_rate * self.shift_max)
if self.shift_direction == 'right':
shift = -shift
elif self.shift_direction == 'both':
direction = np.random.randint(0, 2)
if direction == 1:
shift = -shift
augmented_data = np.roll(data, shift)
# Set to silence for heading/ tailing
if shift > 0:
augmented_data[:shift] = 0
else:
augmented_data[shift:] = 0
return augmented_data
| [
"numpy.random.randint",
"numpy.roll"
] | [((855, 909), 'numpy.random.randint', 'np.random.randint', (['(self.sampling_rate * self.shift_max)'], {}), '(self.sampling_rate * self.shift_max)\n', (872, 909), True, 'import numpy as np\n'), ((1162, 1182), 'numpy.roll', 'np.roll', (['data', 'shift'], {}), '(data, shift)\n', (1169, 1182), True, 'import numpy as np\n'), ((1050, 1073), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1067, 1073), True, 'import numpy as np\n')] |
from itertools import product
from random import choice
from sys import stdout
from typing import Tuple
from numpy import all, full, zeros, uint8
symbol_dict = {
'hunt': ('S', 'P'),
'harvest': ('p', 'P'),
'escalation': 'M'
}
A_AGENT = 0 # base
B_AGENT = 1
STAG = 2 # hunt
PLANT = 3
Y_PLANT = 2 # harvest
M_PLANT = 3
MARK = 2 # escalation
def print_matrix(obs, game, grid_size):
if game == 'escalation':
matrix = full((grid_size[0], grid_size[1], 3), False, dtype=bool)
else:
matrix = full((grid_size[0], grid_size[1], 4), False, dtype=bool)
if game == 'hunt':
a, b, stag = (obs[0], obs[1]), (obs[2], obs[3]), (obs[4], obs[5])
matrix[a[0]][a[1]][A_AGENT] = True
matrix[b[0]][b[1]][B_AGENT] = True
matrix[stag[0]][stag[1]][STAG] = True
for i in range(6, len(obs), 2):
plant = obs[i], obs[i + 1]
matrix[plant[0]][plant[1]][PLANT] = True
elif game == 'harvest':
a, b = (obs[0], obs[1]), (obs[2], obs[3])
matrix[a[0]][a[1]][A_AGENT] = True
matrix[b[0]][b[1]][B_AGENT] = True
for i in range(4, len(obs), 3):
plant_age = M_PLANT if obs[i + 2] else Y_PLANT
matrix[obs[i]][obs[i + 1]][plant_age] = True
elif game == 'escalation':
a, b, mark = (obs[0], obs[1]), (obs[2], obs[3]), (obs[4], obs[5])
matrix[a[0]][a[1]][A_AGENT] = True
matrix[b[0]][b[1]][B_AGENT] = True
matrix[mark[0]][mark[1]][MARK] = True
symbols = symbol_dict[game]
stdout.write('╔════════════════════════════╗\n')
for row in matrix:
stdout.write('║ ·')
for col in row:
cell = []
cell.append('A') if col[0] == 1 else cell.append(' ')
cell.append('B') if col[1] == 1 else cell.append(' ')
cell.append(symbols[0]) if col[2] == 1 else cell.append(' ')
if game != 'escalation':
cell.append(symbols[1]) if col[3] == 1 else cell.append(' ')
else:
cell.append(' ')
stdout.write(''.join(cell) + '·')
stdout.write(' ║')
stdout.write('\n')
stdout.write('╚════════════════════════════╝\n\r')
stdout.flush()
def overlaps_entity(a, b):
"""
:param a: (X, Y) tuple for entity 1
:param b: (X, Y) tuple for entity 2
:return: True if they are on the same cell, False otherwise
"""
return (a == b).all()
def place_entity_in_unoccupied_cell(used_coordinates, grid_dims):
"""
Returns a random unused coordinate.
:param used_coordinates: a list of already used coordinates
:param grid_dims: dimensions of the grid so we know what a valid coordinate is
:return: the chosen x, y coordinate
"""
all_coords = list(product(list(range(grid_dims[0])), list(range(grid_dims[1]))))
for coord in used_coordinates:
for test in all_coords:
if all(test == coord):
all_coords.remove(test)
return choice(all_coords)
def spawn_plants(grid_dims, how_many, used_coordinates):
new_plants = []
for x in range(how_many):
new_plant = zeros(2, dtype=uint8)
new_pos = place_entity_in_unoccupied_cell(grid_dims=grid_dims,
used_coordinates=new_plants + used_coordinates)
new_plant[0], new_plant[1] = new_pos
new_plants.append(new_plant)
return new_plants
def respawn_plants(plants, tagged_plants, grid_dims, used_coordinates):
for tagged_plant in tagged_plants:
new_plant = zeros(2, dtype=uint8)
new_pos = place_entity_in_unoccupied_cell(grid_dims=grid_dims,
used_coordinates=plants + used_coordinates)
new_plant[0], new_plant[1] = new_pos
plants[tagged_plant] = new_plant
return plants
| [
"numpy.all",
"random.choice",
"numpy.zeros",
"numpy.full",
"sys.stdout.flush",
"sys.stdout.write"
] | [((1549, 1597), 'sys.stdout.write', 'stdout.write', (['"""╔════════════════════════════╗\n"""'], {}), "('╔════════════════════════════╗\\n')\n", (1561, 1597), False, 'from sys import stdout\n'), ((2169, 2219), 'sys.stdout.write', 'stdout.write', (["'╚════════════════════════════╝\\n\\r'"], {}), "('╚════════════════════════════╝\\n\\r')\n", (2181, 2219), False, 'from sys import stdout\n'), ((2224, 2238), 'sys.stdout.flush', 'stdout.flush', ([], {}), '()\n', (2236, 2238), False, 'from sys import stdout\n'), ((3005, 3023), 'random.choice', 'choice', (['all_coords'], {}), '(all_coords)\n', (3011, 3023), False, 'from random import choice\n'), ((448, 504), 'numpy.full', 'full', (['(grid_size[0], grid_size[1], 3)', '(False)'], {'dtype': 'bool'}), '((grid_size[0], grid_size[1], 3), False, dtype=bool)\n', (452, 504), False, 'from numpy import all, full, zeros, uint8\n'), ((532, 588), 'numpy.full', 'full', (['(grid_size[0], grid_size[1], 4)', '(False)'], {'dtype': 'bool'}), '((grid_size[0], grid_size[1], 4), False, dtype=bool)\n', (536, 588), False, 'from numpy import all, full, zeros, uint8\n'), ((1629, 1648), 'sys.stdout.write', 'stdout.write', (['"""║ ·"""'], {}), "('║ ·')\n", (1641, 1648), False, 'from sys import stdout\n'), ((2119, 2137), 'sys.stdout.write', 'stdout.write', (['""" ║"""'], {}), "(' ║')\n", (2131, 2137), False, 'from sys import stdout\n'), ((2146, 2164), 'sys.stdout.write', 'stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2158, 2164), False, 'from sys import stdout\n'), ((3153, 3174), 'numpy.zeros', 'zeros', (['(2)'], {'dtype': 'uint8'}), '(2, dtype=uint8)\n', (3158, 3174), False, 'from numpy import all, full, zeros, uint8\n'), ((3581, 3602), 'numpy.zeros', 'zeros', (['(2)'], {'dtype': 'uint8'}), '(2, dtype=uint8)\n', (3586, 3602), False, 'from numpy import all, full, zeros, uint8\n'), ((2933, 2951), 'numpy.all', 'all', (['(test == coord)'], {}), '(test == coord)\n', (2936, 2951), False, 'from numpy import all, full, zeros, uint8\n')] |
__author__='lhq'
import os
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
def get_train_img():
"""
获取输入数据
:return:
"""
cwd=os.getcwd()
mi_dir=cwd+'/dataset/train/mi/'
shu_dir=cwd+'/dataset/train/shu/'
file1=os.listdir(mi_dir)
file0=os.listdir(shu_dir)
# file1.sort(key=lambda x: int(x[:-4]))
# file0.sort(key=lambda x: int(x[:-4]))
mi_img_list=np.empty((1, 64, 64, 3))
shu_img_list = np.empty((1, 64, 64, 3))
label=[]
for mi_img_content in file1:
mi_img=Image.open(mi_dir+mi_img_content)
mi_img = np.array(mi_img).reshape((1, 64, 64, 3))
mi_img_list = np.concatenate((mi_img_list, mi_img), axis=0)
label.append(1)
for shu_img_content in file0:
shu_img=Image.open(shu_dir+shu_img_content)
shu_img = np.array(shu_img).reshape((1, 64, 64, 3))
shu_img_list = np.concatenate((shu_img_list, shu_img), axis=0)
label.append(0)
mi_img_list = np.delete(mi_img_list, 0, 0)
shu_img_list = np.delete(shu_img_list, 0, 0)
label=[int(i) for i in label]
label=np.array(label)
img_list=np.concatenate((mi_img_list, shu_img_list), axis=0)
return img_list,label
def get_test_img():
"""
获取输入数据
:return:
"""
cwd=os.getcwd()
mi_dir=cwd+'/dataset/test/mi/'
shu_dir=cwd+'/dataset/test/shu/'
file1=os.listdir(mi_dir)
file0=os.listdir(shu_dir)
# file1.sort(key=lambda x: int(x[:-4]))
# file0.sort(key=lambda x: int(x[:-4]))
mi_img_list=np.empty((1, 64, 64, 3))
shu_img_list = np.empty((1, 64, 64, 3))
label=[]
for mi_img_content in file1:
mi_img=Image.open(mi_dir+mi_img_content)
mi_img = np.array(mi_img).reshape((1, 64, 64, 3))
mi_img_list = np.concatenate((mi_img_list, mi_img), axis=0)
label.append(1)
for shu_img_content in file0:
shu_img=Image.open(shu_dir+shu_img_content)
shu_img = np.array(shu_img).reshape((1, 64, 64, 3))
shu_img_list = np.concatenate((shu_img_list, shu_img), axis=0)
label.append(0)
mi_img_list = np.delete(mi_img_list, 0, 0)
shu_img_list = np.delete(shu_img_list, 0, 0)
label=[int(i) for i in label]
label=np.array(label)
img_list=np.concatenate((mi_img_list, shu_img_list), axis=0)
return img_list,label
class MyTrainDataset(Dataset):
def __init__(self, transform=None, target_transform=None):
imgs,label = get_train_img()
self.imgs = imgs
self.label=label
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img = self.imgs[index]
label=self.label[index]
if self.transform is not None:
img = self.transform(img)
return img,label
def __len__(self):
return len(self.imgs)
class MyTestDataset(Dataset):
def __init__(self, transform=None, target_transform=None):
imgs,label = get_test_img()
self.imgs = imgs
self.label=label
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img = self.imgs[index]
label=self.label[index]
if self.transform is not None:
img = self.transform(img)
return img,label
def __len__(self):
return len(self.imgs)
| [
"os.listdir",
"PIL.Image.open",
"numpy.delete",
"os.getcwd",
"numpy.array",
"numpy.empty",
"numpy.concatenate"
] | [((176, 187), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (185, 187), False, 'import os\n'), ((272, 290), 'os.listdir', 'os.listdir', (['mi_dir'], {}), '(mi_dir)\n', (282, 290), False, 'import os\n'), ((301, 320), 'os.listdir', 'os.listdir', (['shu_dir'], {}), '(shu_dir)\n', (311, 320), False, 'import os\n'), ((425, 449), 'numpy.empty', 'np.empty', (['(1, 64, 64, 3)'], {}), '((1, 64, 64, 3))\n', (433, 449), True, 'import numpy as np\n'), ((469, 493), 'numpy.empty', 'np.empty', (['(1, 64, 64, 3)'], {}), '((1, 64, 64, 3))\n', (477, 493), True, 'import numpy as np\n'), ((998, 1026), 'numpy.delete', 'np.delete', (['mi_img_list', '(0)', '(0)'], {}), '(mi_img_list, 0, 0)\n', (1007, 1026), True, 'import numpy as np\n'), ((1046, 1075), 'numpy.delete', 'np.delete', (['shu_img_list', '(0)', '(0)'], {}), '(shu_img_list, 0, 0)\n', (1055, 1075), True, 'import numpy as np\n'), ((1120, 1135), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1128, 1135), True, 'import numpy as np\n'), ((1149, 1200), 'numpy.concatenate', 'np.concatenate', (['(mi_img_list, shu_img_list)'], {'axis': '(0)'}), '((mi_img_list, shu_img_list), axis=0)\n', (1163, 1200), True, 'import numpy as np\n'), ((1296, 1307), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1305, 1307), False, 'import os\n'), ((1390, 1408), 'os.listdir', 'os.listdir', (['mi_dir'], {}), '(mi_dir)\n', (1400, 1408), False, 'import os\n'), ((1419, 1438), 'os.listdir', 'os.listdir', (['shu_dir'], {}), '(shu_dir)\n', (1429, 1438), False, 'import os\n'), ((1543, 1567), 'numpy.empty', 'np.empty', (['(1, 64, 64, 3)'], {}), '((1, 64, 64, 3))\n', (1551, 1567), True, 'import numpy as np\n'), ((1587, 1611), 'numpy.empty', 'np.empty', (['(1, 64, 64, 3)'], {}), '((1, 64, 64, 3))\n', (1595, 1611), True, 'import numpy as np\n'), ((2116, 2144), 'numpy.delete', 'np.delete', (['mi_img_list', '(0)', '(0)'], {}), '(mi_img_list, 0, 0)\n', (2125, 2144), True, 'import numpy as np\n'), ((2164, 2193), 'numpy.delete', 'np.delete', (['shu_img_list', '(0)', '(0)'], {}), '(shu_img_list, 0, 0)\n', (2173, 2193), True, 'import numpy as np\n'), ((2238, 2253), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2246, 2253), True, 'import numpy as np\n'), ((2267, 2318), 'numpy.concatenate', 'np.concatenate', (['(mi_img_list, shu_img_list)'], {'axis': '(0)'}), '((mi_img_list, shu_img_list), axis=0)\n', (2281, 2318), True, 'import numpy as np\n'), ((555, 590), 'PIL.Image.open', 'Image.open', (['(mi_dir + mi_img_content)'], {}), '(mi_dir + mi_img_content)\n', (565, 590), False, 'from PIL import Image\n'), ((669, 714), 'numpy.concatenate', 'np.concatenate', (['(mi_img_list, mi_img)'], {'axis': '(0)'}), '((mi_img_list, mi_img), axis=0)\n', (683, 714), True, 'import numpy as np\n'), ((789, 826), 'PIL.Image.open', 'Image.open', (['(shu_dir + shu_img_content)'], {}), '(shu_dir + shu_img_content)\n', (799, 826), False, 'from PIL import Image\n'), ((908, 955), 'numpy.concatenate', 'np.concatenate', (['(shu_img_list, shu_img)'], {'axis': '(0)'}), '((shu_img_list, shu_img), axis=0)\n', (922, 955), True, 'import numpy as np\n'), ((1673, 1708), 'PIL.Image.open', 'Image.open', (['(mi_dir + mi_img_content)'], {}), '(mi_dir + mi_img_content)\n', (1683, 1708), False, 'from PIL import Image\n'), ((1787, 1832), 'numpy.concatenate', 'np.concatenate', (['(mi_img_list, mi_img)'], {'axis': '(0)'}), '((mi_img_list, mi_img), axis=0)\n', (1801, 1832), True, 'import numpy as np\n'), ((1907, 1944), 'PIL.Image.open', 'Image.open', (['(shu_dir + shu_img_content)'], {}), '(shu_dir + shu_img_content)\n', (1917, 1944), False, 'from PIL import Image\n'), ((2026, 2073), 'numpy.concatenate', 'np.concatenate', (['(shu_img_list, shu_img)'], {'axis': '(0)'}), '((shu_img_list, shu_img), axis=0)\n', (2040, 2073), True, 'import numpy as np\n'), ((606, 622), 'numpy.array', 'np.array', (['mi_img'], {}), '(mi_img)\n', (614, 622), True, 'import numpy as np\n'), ((843, 860), 'numpy.array', 'np.array', (['shu_img'], {}), '(shu_img)\n', (851, 860), True, 'import numpy as np\n'), ((1724, 1740), 'numpy.array', 'np.array', (['mi_img'], {}), '(mi_img)\n', (1732, 1740), True, 'import numpy as np\n'), ((1961, 1978), 'numpy.array', 'np.array', (['shu_img'], {}), '(shu_img)\n', (1969, 1978), True, 'import numpy as np\n')] |
"""Conjugate Gradient Optimizer.
Computes the decent direction using the conjugate gradient method, and then
computes the optimal step size that will satisfy the KL divergence constraint.
Finally, it performs a backtracking line search to optimize the objective.
"""
import warnings
from dowel import logger
import numpy as np
import torch
from torch.optim import Optimizer
from metarl.misc.tensor_utils import unflatten_tensors
def _build_hessian_vector_product(func, params, reg_coeff=1e-5):
"""Computes Hessian-vector product using Pearlmutter's algorithm.
`Pearlmutter, <NAME>. "Fast exact multiplication by the Hessian." Neural
computation 6.1 (1994): 147-160.`
Args:
func (callable): A function that returns a torch.Tensor. Hessian of
the return value will be computed.
params (list[torch.Tensor]): A list of function parameters.
reg_coeff (float): A small value so that A -> A + reg*I.
Returns:
function: It can be called to get the final result.
"""
param_shapes = [p.shape or torch.Size([1]) for p in params]
f = func()
f_grads = torch.autograd.grad(f, params, create_graph=True)
def _eval(vector):
"""The evaluation function.
Args:
vector (torch.Tensor): The vector to be multiplied with
Hessian.
Returns:
torch.Tensor: The product of Hessian of function f and v.
"""
unflatten_vector = unflatten_tensors(vector, param_shapes)
assert len(f_grads) == len(unflatten_vector)
grad_vector_product = torch.sum(
torch.stack(
[torch.sum(g * x) for g, x in zip(f_grads, unflatten_vector)]))
hvp = list(
torch.autograd.grad(grad_vector_product, params,
retain_graph=True))
for i, (hx, p) in enumerate(zip(hvp, params)):
if hx is None:
hvp[i] = torch.zeros_like(p)
flat_output = torch.cat([h.reshape(-1) for h in hvp])
return flat_output + reg_coeff * vector
return _eval
def _conjugate_gradient(f_Ax, b, cg_iters, residual_tol=1e-10):
"""Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312.
Args:
f_Ax (callable): A function to compute Hessian vector product.
b (torch.Tensor): Right hand side of the equation to solve.
cg_iters (int): Number of iterations to run conjugate gradient
algorithm.
residual_tol (float): Tolerence for convergence.
Returns:
torch.Tensor: Solution x* for equation Ax = b.
"""
p = b.clone()
r = b.clone()
x = torch.zeros_like(b)
rdotr = torch.dot(r, r)
for _ in range(cg_iters):
z = f_Ax(p)
v = rdotr / torch.dot(p, z)
x += v * p
r -= v * z
newrdotr = torch.dot(r, r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
return x
class ConjugateGradientOptimizer(Optimizer):
"""Performs constrained optimization via backtracking line search.
The search direction is computed using a conjugate gradient algorithm,
which gives x = A^{-1}g, where A is a second order approximation of the
constraint and g is the gradient of the loss function.
Args:
params (iterable): Iterable of parameters to optimize.
max_constraint_value (float): Maximum constraint value.
cg_iters (int): The number of CG iterations used to calculate A^-1 g
max_backtracks (int): Max number of iterations for backtrack
linesearch.
backtrack_ratio (float): backtrack ratio for backtracking line search.
hvp_reg_coeff (float): A small value so that A -> A + reg*I. It is
used by Hessian Vector Product calculation.
accept_violation (bool): whether to accept the descent step if it
violates the line search condition after exhausting all
backtracking budgets.
"""
def __init__(self,
params,
max_constraint_value,
cg_iters=10,
max_backtracks=15,
backtrack_ratio=0.8,
hvp_reg_coeff=1e-5,
accept_violation=False):
super().__init__(params, {})
self._max_constraint_value = max_constraint_value
self._cg_iters = cg_iters
self._max_backtracks = max_backtracks
self._backtrack_ratio = backtrack_ratio
self._hvp_reg_coeff = hvp_reg_coeff
self._accept_violation = accept_violation
def step(self, f_loss, f_constraint): # pylint: disable=arguments-differ
"""Take an optimization step.
Args:
f_loss (callable): Function to compute the loss.
f_constraint (callable): Function to compute the constraint value.
"""
# Collect trainable parameters and gradients
params = []
grads = []
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
params.append(p)
grads.append(p.grad.reshape(-1))
flat_loss_grads = torch.cat(grads)
# Build Hessian-vector-product function
f_Ax = _build_hessian_vector_product(f_constraint, params,
self._hvp_reg_coeff)
# Compute step direction
step_dir = _conjugate_gradient(f_Ax, flat_loss_grads, self._cg_iters)
# Replace nan with 0.
step_dir[step_dir.ne(step_dir)] = 0.
# Compute step size
step_size = np.sqrt(2.0 * self._max_constraint_value *
(1. /
(torch.dot(step_dir, f_Ax(step_dir)) + 1e-8)))
if np.isnan(step_size):
step_size = 1.
descent_step = step_size * step_dir
# Update parameters using backtracking line search
self._backtracking_line_search(params, descent_step, f_loss,
f_constraint)
@property
def state(self):
"""dict: The hyper-parameters of the optimizer."""
return {
'max_constraint_value': self._max_constraint_value,
'cg_iters': self._cg_iters,
'max_backtracks': self._max_backtracks,
'backtrack_ratio': self._backtrack_ratio,
'hvp_reg_coeff': self._hvp_reg_coeff,
'accept_violation': self._accept_violation,
}
@state.setter
def state(self, state):
# _max_constraint_value doesn't have a default value in __init__.
# The rest of thsese should match those default values.
# These values should only actually get used when unpickling a
self._max_constraint_value = state.get('max_constraint_value', 0.01)
self._cg_iters = state.get('cg_iters', 10)
self._max_backtracks = state.get('max_backtracks', 15)
self._backtrack_ratio = state.get('backtrack_ratio', 0.8)
self._hvp_reg_coeff = state.get('hvp_reg_coeff', 1e-5)
self._accept_violation = state.get('accept_violation', False)
def __setstate__(self, state):
"""Restore the optimizer state.
Args:
state (dict): State dictionary.
"""
if 'hvp_reg_coeff' not in state['state']:
warnings.warn(
'Resuming ConjugateGradientOptimizer with lost state. '
'This behavior is fixed if pickling from metarl>=2020.02.0.')
self.defaults = state['defaults']
# Set the fields manually so that the setter gets called.
self.state = state['state']
self.param_groups = state['param_groups']
def _backtracking_line_search(self, params, descent_step, f_loss,
f_constraint):
prev_params = [p.clone() for p in params]
ratio_list = self._backtrack_ratio**np.arange(self._max_backtracks)
loss_before = f_loss()
param_shapes = [p.shape or torch.Size([1]) for p in params]
descent_step = unflatten_tensors(descent_step, param_shapes)
assert len(descent_step) == len(params)
for ratio in ratio_list:
for step, prev_param, param in zip(descent_step, prev_params,
params):
step = ratio * step
new_param = prev_param.data - step
param.data = new_param.data
loss = f_loss()
constraint_val = f_constraint()
if (loss < loss_before
and constraint_val <= self._max_constraint_value):
break
if ((torch.isnan(loss) or torch.isnan(constraint_val)
or loss >= loss_before
or constraint_val >= self._max_constraint_value)
and not self._accept_violation):
logger.log('Line search condition violated. Rejecting the step!')
if torch.isnan(loss):
logger.log('Violated because loss is NaN')
if torch.isnan(constraint_val):
logger.log('Violated because constraint is NaN')
if loss >= loss_before:
logger.log('Violated because loss not improving')
if constraint_val >= self._max_constraint_value:
logger.log('Violated because constraint is violated')
for prev, cur in zip(prev_params, params):
cur.data = prev.data
| [
"torch.isnan",
"numpy.arange",
"metarl.misc.tensor_utils.unflatten_tensors",
"dowel.logger.log",
"torch.autograd.grad",
"numpy.isnan",
"torch.sum",
"warnings.warn",
"torch.zeros_like",
"torch.Size",
"torch.cat",
"torch.dot"
] | [((1129, 1178), 'torch.autograd.grad', 'torch.autograd.grad', (['f', 'params'], {'create_graph': '(True)'}), '(f, params, create_graph=True)\n', (1148, 1178), False, 'import torch\n'), ((2665, 2684), 'torch.zeros_like', 'torch.zeros_like', (['b'], {}), '(b)\n', (2681, 2684), False, 'import torch\n'), ((2697, 2712), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (2706, 2712), False, 'import torch\n'), ((1475, 1514), 'metarl.misc.tensor_utils.unflatten_tensors', 'unflatten_tensors', (['vector', 'param_shapes'], {}), '(vector, param_shapes)\n', (1492, 1514), False, 'from metarl.misc.tensor_utils import unflatten_tensors\n'), ((2857, 2872), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (2866, 2872), False, 'import torch\n'), ((5246, 5262), 'torch.cat', 'torch.cat', (['grads'], {}), '(grads)\n', (5255, 5262), False, 'import torch\n'), ((5847, 5866), 'numpy.isnan', 'np.isnan', (['step_size'], {}), '(step_size)\n', (5855, 5866), True, 'import numpy as np\n'), ((8144, 8189), 'metarl.misc.tensor_utils.unflatten_tensors', 'unflatten_tensors', (['descent_step', 'param_shapes'], {}), '(descent_step, param_shapes)\n', (8161, 8189), False, 'from metarl.misc.tensor_utils import unflatten_tensors\n'), ((1067, 1082), 'torch.Size', 'torch.Size', (['[1]'], {}), '([1])\n', (1077, 1082), False, 'import torch\n'), ((1748, 1815), 'torch.autograd.grad', 'torch.autograd.grad', (['grad_vector_product', 'params'], {'retain_graph': '(True)'}), '(grad_vector_product, params, retain_graph=True)\n', (1767, 1815), False, 'import torch\n'), ((2784, 2799), 'torch.dot', 'torch.dot', (['p', 'z'], {}), '(p, z)\n', (2793, 2799), False, 'import torch\n'), ((7416, 7554), 'warnings.warn', 'warnings.warn', (['"""Resuming ConjugateGradientOptimizer with lost state. This behavior is fixed if pickling from metarl>=2020.02.0."""'], {}), "(\n 'Resuming ConjugateGradientOptimizer with lost state. This behavior is fixed if pickling from metarl>=2020.02.0.'\n )\n", (7429, 7554), False, 'import warnings\n'), ((7989, 8020), 'numpy.arange', 'np.arange', (['self._max_backtracks'], {}), '(self._max_backtracks)\n', (7998, 8020), True, 'import numpy as np\n'), ((8956, 9021), 'dowel.logger.log', 'logger.log', (['"""Line search condition violated. Rejecting the step!"""'], {}), "('Line search condition violated. Rejecting the step!')\n", (8966, 9021), False, 'from dowel import logger\n'), ((9037, 9054), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (9048, 9054), False, 'import torch\n'), ((9130, 9157), 'torch.isnan', 'torch.isnan', (['constraint_val'], {}), '(constraint_val)\n', (9141, 9157), False, 'import torch\n'), ((1956, 1975), 'torch.zeros_like', 'torch.zeros_like', (['p'], {}), '(p)\n', (1972, 1975), False, 'import torch\n'), ((8088, 8103), 'torch.Size', 'torch.Size', (['[1]'], {}), '([1])\n', (8098, 8103), False, 'import torch\n'), ((8748, 8765), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (8759, 8765), False, 'import torch\n'), ((8769, 8796), 'torch.isnan', 'torch.isnan', (['constraint_val'], {}), '(constraint_val)\n', (8780, 8796), False, 'import torch\n'), ((9072, 9114), 'dowel.logger.log', 'logger.log', (['"""Violated because loss is NaN"""'], {}), "('Violated because loss is NaN')\n", (9082, 9114), False, 'from dowel import logger\n'), ((9175, 9223), 'dowel.logger.log', 'logger.log', (['"""Violated because constraint is NaN"""'], {}), "('Violated because constraint is NaN')\n", (9185, 9223), False, 'from dowel import logger\n'), ((9276, 9325), 'dowel.logger.log', 'logger.log', (['"""Violated because loss not improving"""'], {}), "('Violated because loss not improving')\n", (9286, 9325), False, 'from dowel import logger\n'), ((9403, 9456), 'dowel.logger.log', 'logger.log', (['"""Violated because constraint is violated"""'], {}), "('Violated because constraint is violated')\n", (9413, 9456), False, 'from dowel import logger\n'), ((1652, 1668), 'torch.sum', 'torch.sum', (['(g * x)'], {}), '(g * x)\n', (1661, 1668), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#import tensorflow as tf
import numpy as np
import math
try:
import cPickle as pickle
except:
import pickle
import os
class BlockScramble:
def __init__( self, blockSize_filename ):
if( isinstance( blockSize_filename, str ) ):
self.load( blockSize_filename )
else:
self.blockSize = blockSize_filename
key = self.genKey()
self.setKey( key )
def setKey( self, key ):
self.key = key
self.rev = ( key > key.size/2 )
self.invKey = np.argsort(key)
def load( self, filename ):
fin = open(filename, 'rb')
self.blockSize, self.key = pickle.load( fin )
fin.close()
self.setKey( self.key )
def save( self, filename ): # pkl
fout = open(filename, 'wb')
pickle.dump( [self.blockSize, self.key], fout )
fout.close()
def genKey( self ):
key = self.blockSize[0] * self.blockSize[1]*self.blockSize[2]
key = np.arange(key*2, dtype=np.uint32)
np.random.shuffle(key)
return key
def padding( self, X ): # X is [datanum, width, height, channel]
s = X.shape
t = s[1] / self.blockSize[0]
d = t - math.floor(t)
if( d > 0 ):
paddingSize = self.blockSize[0] * ( math.floor(t) + 1 ) - s[1]
padding = X[:,-1:,:,:]
padding = np.tile( padding, (1, paddingSize, 1, 1 ) )
X = np.concatenate( (X, padding), axis = 1 )
t = s[2] / self.blockSize[1]
d = t - math.floor(t)
if( d > 0 ):
paddingSize = self.blockSize[1] * ( math.floor(t) + 1 ) - s[2]
padding = X[:,:,-1:,:]
padding = np.tile( padding, (1, 1, paddingSize, 1 ) )
X = np.concatenate( (X, padding), axis = 2 )
return X
def Scramble(self, X):
XX = (X * 255).astype(np.uint8)
XX = self.doScramble(XX, self.key, self.rev)
return XX.astype('float32')/255.0
def Decramble(self, X):
XX = (X * 255).astype(np.uint8)
XX = self.doScramble(XX, self.invKey, self.rev)
return XX.astype('float32')/255.0
def doScramble(self, X, ord, rev): # X should be uint8
s = X.shape
#print(s)
# print(self.blockSize)
assert( X.dtype == np.uint8 )
assert( s[1] % self.blockSize[0] == 0 )
assert( s[2] % self.blockSize[1] == 0 )
assert( s[3] == self.blockSize[2] )
numBlock = np.int32( [ s[1] / self.blockSize[0], s[2] / self.blockSize[1] ] );
numCh = self.blockSize[2];
X = np.reshape( X, ( s[0], numBlock[0], self.blockSize[0], numBlock[1], self.blockSize[1], numCh ) )
X = np.transpose( X, (0, 1, 3, 2, 4, 5) )
X = np.reshape( X, ( s[0], numBlock[0], numBlock[1], self.blockSize[0] * self.blockSize[1] * numCh ) )
d = self.blockSize[0] * self.blockSize[1] * numCh;
# print(X)
# print(0xF)
X0 = X & 0xF # あまりが入る(/16)
# print(X0)
X1 = X >> 4 # 16で割ったときの商がはいる
# print(X1)
X = np.concatenate( (X0,X1), axis=3 )
X[:,:,:,rev] = ( 15 - X[:,:,:,rev].astype(np.int32) ).astype(np.uint8)
# print(ord)
X = X[:,:,:,ord]
X[:,:,:,rev] = ( 15 - X[:,:,:,rev].astype(np.int32) ).astype(np.uint8)
X0 = X[:,:,:,:d]
X1 = X[:,:,:,d:]
X = ( X1 << 4 ) + X0
X = np.reshape( X, ( s[0], numBlock[0], numBlock[1], self.blockSize[0], self.blockSize[1], numCh ) )
X = np.transpose( X, ( 0, 1, 3, 2, 4, 5) )
X = np.reshape( X, ( s[0], numBlock[0] * self.blockSize[0], numBlock[1] * self.blockSize[1], numCh ) );
return X
if( __name__ == '__main__' ):
from PIL import Image
import os
import scipy.misc
from matplotlib import cm
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '1' #use GPU with ID=0
# config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM
config.gpu_options.allow_growth = True #allocate dynamically
# sess = tf.Session(config = config)
im = Image.open('lena.png')
data = np.asarray(im, dtype=np.uint8)
data = np.reshape( data, (1,)+data.shape )
print(data.shape)
key_file = 'key16/keys1.pkl'
if( os.path.exists(key_file) ):
bs = BlockScramble( key_file )
else:
bs = BlockScramble( [16,16,3] )
bs.save(key_file)
data = bs.padding( data )
print(data.shape)
im = Image.fromarray( data[0,:,:,:] )
im.save('test_bs1.png')
print(data.shape)
data = bs.Scramble( data )
print(data.shape)
#array_resized_image = data[0,:,:,:]
#scipy.misc.imsave("test_bs2.png", array_resized_image)
#im = Image.fromarray( data[0,:,:,:] ,mode='F')
im = Image.fromarray(np.uint8(cm.gist_earth(data[0,:,:,:],bytes=True))*255)
im.save('test_bs2.png')
data = bs.Decramble( data )
print(data.shape)
#array_resized_image = data[0,:,:,:]
#scipy.misc.imsave("test_bs3.png", array_resized_image)
im = Image.fromarray(np.uint8(cm.gist_earth(data[0,:,:,:],bytes=True))*255)
im.save('test_bs3.png')
| [
"os.path.exists",
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.reshape",
"pickle.dump",
"numpy.tile",
"math.floor",
"numpy.int32",
"numpy.asarray",
"pickle.load",
"matplotlib.cm.gist_earth",
"numpy.argsort",
"numpy.concatenate",
"numpy.transpose",
"numpy.arange",
"numpy.random.shuff... | [((4260, 4282), 'PIL.Image.open', 'Image.open', (['"""lena.png"""'], {}), "('lena.png')\n", (4270, 4282), False, 'from PIL import Image\n'), ((4294, 4324), 'numpy.asarray', 'np.asarray', (['im'], {'dtype': 'np.uint8'}), '(im, dtype=np.uint8)\n', (4304, 4324), True, 'import numpy as np\n'), ((4336, 4371), 'numpy.reshape', 'np.reshape', (['data', '((1,) + data.shape)'], {}), '(data, (1,) + data.shape)\n', (4346, 4371), True, 'import numpy as np\n'), ((4445, 4469), 'os.path.exists', 'os.path.exists', (['key_file'], {}), '(key_file)\n', (4459, 4469), False, 'import os\n'), ((4659, 4692), 'PIL.Image.fromarray', 'Image.fromarray', (['data[0, :, :, :]'], {}), '(data[0, :, :, :])\n', (4674, 4692), False, 'from PIL import Image\n'), ((576, 591), 'numpy.argsort', 'np.argsort', (['key'], {}), '(key)\n', (586, 591), True, 'import numpy as np\n'), ((699, 715), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (710, 715), False, 'import pickle\n'), ((866, 911), 'pickle.dump', 'pickle.dump', (['[self.blockSize, self.key]', 'fout'], {}), '([self.blockSize, self.key], fout)\n', (877, 911), False, 'import pickle\n'), ((1056, 1091), 'numpy.arange', 'np.arange', (['(key * 2)'], {'dtype': 'np.uint32'}), '(key * 2, dtype=np.uint32)\n', (1065, 1091), True, 'import numpy as np\n'), ((1098, 1120), 'numpy.random.shuffle', 'np.random.shuffle', (['key'], {}), '(key)\n', (1115, 1120), True, 'import numpy as np\n'), ((2588, 2650), 'numpy.int32', 'np.int32', (['[s[1] / self.blockSize[0], s[2] / self.blockSize[1]]'], {}), '([s[1] / self.blockSize[0], s[2] / self.blockSize[1]])\n', (2596, 2650), True, 'import numpy as np\n'), ((2712, 2809), 'numpy.reshape', 'np.reshape', (['X', '(s[0], numBlock[0], self.blockSize[0], numBlock[1], self.blockSize[1], numCh)'], {}), '(X, (s[0], numBlock[0], self.blockSize[0], numBlock[1], self.\n blockSize[1], numCh))\n', (2722, 2809), True, 'import numpy as np\n'), ((2822, 2857), 'numpy.transpose', 'np.transpose', (['X', '(0, 1, 3, 2, 4, 5)'], {}), '(X, (0, 1, 3, 2, 4, 5))\n', (2834, 2857), True, 'import numpy as np\n'), ((2872, 2971), 'numpy.reshape', 'np.reshape', (['X', '(s[0], numBlock[0], numBlock[1], self.blockSize[0] * self.blockSize[1] * numCh)'], {}), '(X, (s[0], numBlock[0], numBlock[1], self.blockSize[0] * self.\n blockSize[1] * numCh))\n', (2882, 2971), True, 'import numpy as np\n'), ((3190, 3222), 'numpy.concatenate', 'np.concatenate', (['(X0, X1)'], {'axis': '(3)'}), '((X0, X1), axis=3)\n', (3204, 3222), True, 'import numpy as np\n'), ((3537, 3634), 'numpy.reshape', 'np.reshape', (['X', '(s[0], numBlock[0], numBlock[1], self.blockSize[0], self.blockSize[1], numCh)'], {}), '(X, (s[0], numBlock[0], numBlock[1], self.blockSize[0], self.\n blockSize[1], numCh))\n', (3547, 3634), True, 'import numpy as np\n'), ((3646, 3681), 'numpy.transpose', 'np.transpose', (['X', '(0, 1, 3, 2, 4, 5)'], {}), '(X, (0, 1, 3, 2, 4, 5))\n', (3658, 3681), True, 'import numpy as np\n'), ((3697, 3796), 'numpy.reshape', 'np.reshape', (['X', '(s[0], numBlock[0] * self.blockSize[0], numBlock[1] * self.blockSize[1], numCh)'], {}), '(X, (s[0], numBlock[0] * self.blockSize[0], numBlock[1] * self.\n blockSize[1], numCh))\n', (3707, 3796), True, 'import numpy as np\n'), ((1300, 1313), 'math.floor', 'math.floor', (['t'], {}), '(t)\n', (1310, 1313), False, 'import math\n'), ((1467, 1507), 'numpy.tile', 'np.tile', (['padding', '(1, paddingSize, 1, 1)'], {}), '(padding, (1, paddingSize, 1, 1))\n', (1474, 1507), True, 'import numpy as np\n'), ((1527, 1563), 'numpy.concatenate', 'np.concatenate', (['(X, padding)'], {'axis': '(1)'}), '((X, padding), axis=1)\n', (1541, 1563), True, 'import numpy as np\n'), ((1622, 1635), 'math.floor', 'math.floor', (['t'], {}), '(t)\n', (1632, 1635), False, 'import math\n'), ((1789, 1829), 'numpy.tile', 'np.tile', (['padding', '(1, 1, paddingSize, 1)'], {}), '(padding, (1, 1, paddingSize, 1))\n', (1796, 1829), True, 'import numpy as np\n'), ((1849, 1885), 'numpy.concatenate', 'np.concatenate', (['(X, padding)'], {'axis': '(2)'}), '((X, padding), axis=2)\n', (1863, 1885), True, 'import numpy as np\n'), ((4982, 5025), 'matplotlib.cm.gist_earth', 'cm.gist_earth', (['data[0, :, :, :]'], {'bytes': '(True)'}), '(data[0, :, :, :], bytes=True)\n', (4995, 5025), False, 'from matplotlib import cm\n'), ((5246, 5289), 'matplotlib.cm.gist_earth', 'cm.gist_earth', (['data[0, :, :, :]'], {'bytes': '(True)'}), '(data[0, :, :, :], bytes=True)\n', (5259, 5289), False, 'from matplotlib import cm\n'), ((1383, 1396), 'math.floor', 'math.floor', (['t'], {}), '(t)\n', (1393, 1396), False, 'import math\n'), ((1705, 1718), 'math.floor', 'math.floor', (['t'], {}), '(t)\n', (1715, 1718), False, 'import math\n')] |
"""Abstract baseclass for all distributions."""
import logging
import numpy
import chaospy
from .utils import check_dependencies
class Distribution(object):
"""Baseclass for all probability distributions."""
__array_priority__ = 9000
"""Numpy override variable."""
interpret_as_integer = False
"""
Flag indicating that return value from the methods sample, and inv
should be interpreted as integers instead of floating point.
"""
@property
def stochastic_dependent(self):
"""True if distribution contains stochastically dependent components."""
return any(len(deps) > 1 for deps in self._dependencies)
def __init__(
self,
parameters,
dependencies,
rotation=None,
exclusion=None,
repr_args=None,
):
"""
Distribution initializer.
In addition to assigning some object variables, also checks for
some consistency issues.
Args:
parameters (Optional[Distribution[str, Union[ndarray, Distribution]]]):
Collection of model parameters.
dependencies (Optional[Sequence[Set[int]]]):
Dependency identifiers. One collection for each dimension.
rotation (Optional[Sequence[int]]):
The order of which to resolve dependencies.
exclusion (Optional[Sequence[int]]):
Distributions that has been "taken out of play" and
therefore can not be reused other places in the
dependency hierarchy.
repr_args (Optional[Sequence[str]]):
Positional arguments to place in the object string
representation. The repr output will then be:
`<class name>(<arg1>, <arg2>, ...)`.
Raises:
StochasticallyDependentError:
For dependency structures that can not later be
rectified. This include under-defined
distributions, and inclusion of distributions that
should be exclusion.
"""
assert isinstance(parameters, dict)
self._parameters = parameters
self._dependencies = list(dependencies)
if rotation is None:
rotation = sorted(enumerate(self._dependencies), key=lambda x: len(x[1]))
rotation = [key for key, _ in rotation]
rotation = list(rotation)
assert len(set(rotation)) == len(dependencies)
assert min(rotation) == 0
assert max(rotation) == len(dependencies)-1
self._rotation = rotation
if exclusion is None:
exclusion = set()
self._exclusion = set(exclusion)
if repr_args is None:
repr_args = ("{}={}".format(key, self._parameters[key])
for key in sorted(self._parameters))
self._repr_args = list(repr_args)
self._mom_cache = {(0,)*len(dependencies): 1.}
self._ttr_cache = {}
self._indices = {}
self._all_dependencies = {dep for deps in self._dependencies for dep in deps}
if len(self._all_dependencies) < len(dependencies):
raise chaospy.StochasticallyDependentError(
"%s is an under-defined probability distribution." % self)
for key, param in list(parameters.items()):
if isinstance(param, Distribution):
if self._all_dependencies.intersection(param._exclusion):
raise chaospy.StochasticallyDependentError((
"%s contains dependencies that can not also exist "
"other places in the dependency hierarchy") % param)
self._exclusion.update(param._exclusion)
else:
self._parameters[key] = numpy.asarray(param)
def get_parameters(self, idx, cache, assert_numerical=True):
"""Get distribution parameters."""
del assert_numerical
out = self._parameters.copy()
assert isinstance(cache, dict)
if idx is not None:
assert not isinstance(idx, dict), idx
assert idx == int(idx), idx
assert "idx" not in out
assert "cache" not in out
out["cache"] = cache
out["idx"] = idx
return out
@property
def lower(self):
"""Lower bound for the distribution."""
cache = {}
out = numpy.zeros(len(self))
for idx in self._rotation:
out[idx] = self._get_lower(idx, cache=cache)
return out
def _get_lower(self, idx, cache):
"""In-processes function for getting lower bounds."""
if (idx, self) in cache:
return cache[idx, self][0]
if hasattr(self, "get_lower_parameters"):
parameters = self.get_lower_parameters(idx, cache)
else:
parameters = self.get_parameters(idx, cache, assert_numerical=False)
out = self._lower(**parameters)
assert not isinstance(out, Distribution), (self, out)
out = numpy.atleast_1d(out)
assert out.ndim == 1, (self, out, cache)
cache[idx, self] = (out, None)
return out
def _lower(self, **kwargs): # pragma: no cover
"""Backend lower bound."""
raise chaospy.UnsupportedFeature("lower not supported")
@property
def upper(self):
"""Upper bound for the distribution."""
cache = {}
out = numpy.zeros(len(self))
for idx in self._rotation:
out[idx] = self._get_upper(idx, cache=cache)
return out
def _get_upper(self, idx, cache):
"""In-processes function for getting upper bounds."""
if (idx, self) in cache:
return cache[idx, self][0]
if hasattr(self, "get_upper_parameters"):
parameters = self.get_upper_parameters(idx, cache)
else:
parameters = self.get_parameters(idx, cache, assert_numerical=False)
out = self._upper(**parameters)
assert not isinstance(out, Distribution), (self, out)
out = numpy.atleast_1d(out)
assert out.ndim == 1, (self, out, cache)
cache[idx, self] = (out, None)
size = max([elem[0].size for elem in cache.values()])
assert all([elem[0].size in (1, size) for elem in cache.values()])
return out
def _upper(self, **kwargs): # pragma: no cover
"""Backend upper bound."""
raise chaospy.UnsupportedFeature("lower not supported")
def fwd(self, x_data):
"""
Forward Rosenblatt transformation.
Args:
x_data (numpy.ndarray):
Location for the distribution function. ``x_data.shape`` must
be compatible with distribution shape.
Returns:
(numpy.ndarray):
Evaluated distribution function values, where
``out.shape==x_data.shape``.
"""
logger = logging.getLogger(__name__)
check_dependencies(self)
x_data = numpy.asfarray(x_data)
shape = x_data.shape
x_data = x_data.reshape(len(self), -1)
cache = {}
q_data = numpy.zeros(x_data.shape)
for idx in self._rotation:
q_data[idx] = self._get_fwd(x_data[idx], idx, cache)
indices = (q_data > 1) | (q_data < 0)
if numpy.any(indices): # pragma: no cover
logger.debug("%s.fwd: %d/%d outputs out of bounds",
self, numpy.sum(indices), len(indices))
q_data = numpy.clip(q_data, a_min=0, a_max=1)
q_data = q_data.reshape(shape)
return q_data
def _get_fwd(self, x_data, idx, cache):
"""In-process function for getting cdf-values."""
logger = logging.getLogger(__name__)
assert (idx, self) not in cache, "repeated evaluation"
lower = numpy.broadcast_to(self._get_lower(idx, cache=cache.copy()), x_data.shape)
upper = numpy.broadcast_to(self._get_upper(idx, cache=cache.copy()), x_data.shape)
parameters = self.get_parameters(idx, cache, assert_numerical=True)
ret_val = self._cdf(x_data, **parameters)
assert not isinstance(ret_val, Distribution), (self, ret_val)
out = numpy.zeros(x_data.shape)
out[:] = ret_val
indices = x_data < lower
if numpy.any(indices):
logger.debug("%s.fwd: %d/%d inputs below bounds",
self, numpy.sum(indices), len(indices))
out = numpy.where(indices, 0, out)
indices = x_data > upper
if numpy.any(indices):
logger.debug("%s.fwd: %d/%d inputs above bounds",
self, numpy.sum(indices), len(indices))
out = numpy.where(indices, 1, out)
assert numpy.all((out >= 0) | (out <= 1))
cache[idx, self] = (x_data, out)
assert out.ndim == 1, (self, out, cache)
return out
def cdf(self, x_data):
"""
Cumulative distribution function.
Note that chaospy only supports cumulative distribution functions for
stochastically independent distributions.
Args:
x_data (numpy.ndarray):
Location for the distribution function. Assumes that
``len(x_data) == len(distribution)``.
Returns:
(numpy.ndarray):
Evaluated distribution function values, where output has shape
``x_data.shape`` in one dimension and ``x_data.shape[1:]`` in
higher dimensions.
"""
check_dependencies(self)
if self.stochastic_dependent:
raise chaospy.StochasticallyDependentError(
"Cumulative distribution does not support dependencies.")
x_data = numpy.asarray(x_data)
if self.interpret_as_integer:
x_data = x_data+0.5
q_data = self.fwd(x_data)
if len(self) > 1:
q_data = numpy.prod(q_data, 0)
return q_data
def inv(self, q_data, max_iterations=100, tollerance=1e-5):
"""
Inverse Rosenblatt transformation.
If possible the transformation is done analytically. If not possible,
transformation is approximated using an algorithm that alternates
between Newton-Raphson and binary search.
Args:
q_data (numpy.ndarray):
Probabilities to be inverse. If any values are outside ``[0,
1]``, error will be raised. ``q_data.shape`` must be compatible
with distribution shape.
max_iterations (int):
If approximation is used, this sets the maximum number of
allowed iterations in the Newton-Raphson algorithm.
tollerance (float):
If approximation is used, this set the error tolerance level
required to define a sample as converged.
Returns:
(numpy.ndarray):
Inverted probability values where
``out.shape == q_data.shape``.
"""
logger = logging.getLogger(__name__)
check_dependencies(self)
q_data = numpy.asfarray(q_data)
assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!"
shape = q_data.shape
q_data = q_data.reshape(len(self), -1)
cache = {}
x_data = numpy.zeros(q_data.shape)
for idx in self._rotation:
x_data[idx] = self._get_inv(q_data[idx], idx, cache)
x_data = x_data.reshape(shape)
return x_data
def _get_inv(self, q_data, idx, cache):
"""In-process function for getting ppf-values."""
logger = logging.getLogger(__name__)
assert numpy.all(q_data <= 1) and numpy.all(q_data >= 0)
assert q_data.ndim == 1
if (idx, self) in cache:
return cache[idx, self][0]
lower = numpy.broadcast_to(self._get_lower(idx, cache=cache.copy()), q_data.shape)
upper = numpy.broadcast_to(self._get_upper(idx, cache=cache.copy()), q_data.shape)
try:
parameters = self.get_parameters(idx, cache, assert_numerical=True)
ret_val = self._ppf(q_data, **parameters)
except chaospy.UnsupportedFeature:
ret_val = chaospy.approximate_inverse(
self, idx, q_data, cache=cache)
assert not isinstance(ret_val, Distribution), (self, ret_val)
out = numpy.zeros(q_data.shape)
out[:] = ret_val
indices = out < lower
if numpy.any(indices):
logger.debug("%s.inv: %d/%d outputs below bounds",
self, numpy.sum(indices), len(indices))
out = numpy.where(indices, lower, out)
indices = out > upper
if numpy.any(indices):
logger.debug("%s.inv: %d/%d outputs above bounds",
self, numpy.sum(indices), len(indices))
out = numpy.where(indices, upper, out)
assert out.ndim == 1
cache[idx, self] = (out, q_data)
assert out.ndim == 1, (self, out, cache)
return out
def _ppf(self, xloc, **kwargs):
raise chaospy.UnsupportedFeature(
"%s: does not support analytical ppf." % self)
def pdf(self, x_data, decompose=False, allow_approx=True, step_size=1e-7):
"""
Probability density function.
If possible the density will be calculated analytically. If not
possible, it will be approximated by approximating the one-dimensional
derivative of the forward Rosenblatt transformation and multiplying the
component parts. Note that even if the distribution is multivariate,
each component of the Rosenblatt is one-dimensional.
Args:
x_data (numpy.ndarray):
Location for the density function. If multivariate,
`len(x_data) == len(self)` is required.
decompose (bool):
Decompose multivariate probability density `p(x), p(y|x), ...`
instead of multiplying them together into `p(x, y, ...)`.
allow_approx (bool):
Allow the density to be estimated using numerical derivative of
forward mapping if analytical approach fails. Raises error
instead if false.
step_size (float):
The relative step size between two points used to calculate the
derivative, assuming approximation is being used.
Raises:
chaospy.UnsupportedFeature:
If analytical calculation is not possible and `allow_approx` is
false.
Returns:
(numpy.ndarray):
Evaluated density function evaluated in `x_data`. If decompose,
`output.shape == x_data.shape`, else if multivariate the first
dimension is multiplied together.
Example:
>>> chaospy.Gamma(2).pdf([1, 2, 3, 4, 5]).round(3)
array([0.368, 0.271, 0.149, 0.073, 0.034])
>>> dist = chaospy.Iid(chaospy.Normal(0, 1), 2)
>>> grid = numpy.mgrid[-1.5:2, -1.5:2]
>>> dist.pdf(grid).round(3)
array([[0.017, 0.046, 0.046, 0.017],
[0.046, 0.124, 0.124, 0.046],
[0.046, 0.124, 0.124, 0.046],
[0.017, 0.046, 0.046, 0.017]])
>>> dist.pdf(grid, decompose=True).round(3)
array([[[0.13 , 0.13 , 0.13 , 0.13 ],
[0.352, 0.352, 0.352, 0.352],
[0.352, 0.352, 0.352, 0.352],
[0.13 , 0.13 , 0.13 , 0.13 ]],
<BLANKLINE>
[[0.13 , 0.352, 0.352, 0.13 ],
[0.13 , 0.352, 0.352, 0.13 ],
[0.13 , 0.352, 0.352, 0.13 ],
[0.13 , 0.352, 0.352, 0.13 ]]])
"""
logger = logging.getLogger(__name__)
check_dependencies(self)
x_data = numpy.asfarray(x_data)
shape = x_data.shape
x_data = x_data.reshape(len(self), -1)
f_data = numpy.zeros(x_data.shape)
cache = {}
for idx in self._rotation:
try:
cache_ = cache.copy()
f_data[idx] = self._get_pdf(x_data[idx], idx, cache)
except chaospy.UnsupportedFeature:
if allow_approx:
logger.info(
"%s: has stochastic dependencies; "
"Approximating density with numerical derivative.", str(self)
)
cache = cache_
f_data[idx] = chaospy.approximate_density(
self, idx, x_data[idx], cache=cache, step_size=step_size)
else:
raise
f_data = f_data.reshape(shape)
if len(self) > 1 and not decompose:
f_data = numpy.prod(f_data, 0)
return f_data
def _get_pdf(self, x_data, idx, cache):
"""In-process function for getting pdf-values."""
logger = logging.getLogger(__name__)
assert x_data.ndim == 1
if (idx, self) in cache:
return cache[idx, self][1]
lower = numpy.broadcast_to(self._get_lower(idx, cache=cache.copy()), x_data.shape)
upper = numpy.broadcast_to(self._get_upper(idx, cache=cache.copy()), x_data.shape)
parameters = self.get_parameters(idx, cache, assert_numerical=True)
ret_val = self._pdf(x_data, **parameters)
assert not isinstance(ret_val, Distribution), (self, ret_val)
out = numpy.zeros(x_data.shape)
out[:] = ret_val
indices = (x_data < lower) | (x_data > upper)
if numpy.any(indices):
logger.debug("%s.fwd: %d/%d inputs out of bounds",
self, numpy.sum(indices), len(indices))
logger.debug("%s[%s]: %s - %s - %s", self, idx, lower, x_data, upper)
out = numpy.where(indices, 0, ret_val)
if self in cache:
out = numpy.where(x_data == cache[self][0], out, 0)
cache[idx, self] = (x_data, out)
assert out.ndim == 1, (self, out, cache)
return out
def _pdf(self, xloc, **kwargs):
raise chaospy.UnsupportedFeature(
"%s: does not support analytical pdf." % self)
def sample(self, size=(), rule="random", antithetic=None, include_axis_dim=False):
"""
Create pseudo-random generated samples.
By default, the samples are created using standard (pseudo-)random
samples. However, if needed, the samples can also be created by either
low-discrepancy sequences, and/or variance reduction techniques.
Changing the sampling scheme, use the following ``rule`` flag:
---------------------- -------------------------------------------
key description
---------------------- -------------------------------------------
``additive_recursion`` Modulus of golden ratio samples.
``chebyshev`` Roots of first order Chebyshev polynomials.
``grid`` Regular spaced grid.
``halton`` Halton low-discrepancy sequence.
``hammersley`` Hammersley low-discrepancy sequence.
``korobov`` Korobov lattice.
``latin_hypercube`` Latin hypercube samples.
``nested_chebyshev`` Chebyshev nodes adjusted to ensure nested.
``nested_grid`` Nested regular spaced grid.
``random`` Classical (Pseudo-)Random samples.
``sobol`` Sobol low-discrepancy sequence.
---------------------- -------------------------------------------
All samples are created on the ``[0, 1]``-hypercube, which then is
mapped into the domain of the distribution using the inverse Rosenblatt
transformation.
Args:
size (numpy.ndarray):
The size of the samples to generate.
rule (str):
Indicator defining the sampling scheme.
antithetic (bool, numpy.ndarray):
If provided, will be used to setup antithetic variables. If
array, defines the axes to mirror.
include_axis_dim (bool):
By default an extra dimension even if the number of dimensions
is 1.
Returns:
(numpy.ndarray):
Random samples with ``self.shape``. An extra dimension might be
added to the front if either ``len(dist) > 1`` or
``include_axis_dim=True``.
"""
check_dependencies(self)
size_ = numpy.prod(size, dtype=int)
dim = len(self)
shape = ((size,) if isinstance(size, (int, float, numpy.number)) else tuple(size))
shape = (-1,)+shape[1:]
shape = shape if dim == 1 and not include_axis_dim else (dim,)+shape
from chaospy.distributions import sampler
out = sampler.generator.generate_samples(
order=size_, domain=self, rule=rule, antithetic=antithetic)
for idx, dist in enumerate(self):
if dist.interpret_as_integer:
out[idx] = numpy.round(out[idx])
if self.interpret_as_integer:
out = numpy.round(out).astype(int)
out = out.reshape(shape)
return out
def mom(self, K, allow_approx=True, **kwargs):
"""
Raw statistical moments.
Creates non-centralized raw moments from the random variable. If
analytical options can not be utilized, Monte Carlo integration
will be used.
Args:
K (numpy.ndarray):
Index of the raw moments. k.shape must be compatible with
distribution shape. Sampling scheme when performing Monte
Carlo
allow_approx (bool):
Allow the moments to be calculated using quadrature integration
if analytical approach fails. Raises error instead if false.
kwargs (Any):
Arguments passed to :func:`chaospy.approximate_moment` if
approximation is used.
Raises:
chaospy.UnsupportedFeature:
If analytical calculation is not possible and `allow_approx` is
false.
Returns:
(numpy.ndarray):
Shapes are related through the identity
``k.shape == dist.shape+k.shape``.
"""
logger = logging.getLogger(__name__)
K = numpy.asarray(K, dtype=int)
assert numpy.all(K >= 0)
shape = K.shape
dim = len(self)
if dim > 1:
assert len(self) == shape[0]
shape = shape[1:]
size = int(K.size/dim)
K = K.reshape(dim, size)
try:
out = [self._get_mom(kdata) for kdata in K.T]
logger.debug("%s: moment calculated successfully", str(self))
except chaospy.UnsupportedFeature:
if allow_approx:
logger.info(
"%s: has stochastic dependencies; "
"Approximating moments with quadrature.", str(self))
out = [chaospy.approximate_moment(self, kdata) for kdata in K.T]
else:
out = [self._get_mom(kdata) for kdata in K.T]
out = numpy.array(out)
assert out.size == numpy.prod(shape), (out, shape)
return out.reshape(shape)
def _get_mom(self, kdata):
"""In-process function for getting moments."""
if tuple(kdata) in self._mom_cache:
return self._mom_cache[tuple(kdata)]
if hasattr(self, "get_mom_parameters"):
parameters = self.get_mom_parameters()
else:
parameters = self.get_parameters(idx=None, cache={}, assert_numerical=False)
assert "idx" not in parameters, (self, parameters)
ret_val = float(self._mom(kdata, **parameters))
assert not isinstance(ret_val, Distribution), (self, ret_val)
self._mom_cache[tuple(kdata)] = ret_val
return ret_val
def _mom(self, kloc, **kwargs):
raise chaospy.UnsupportedFeature(
"moments not supported for this distribution")
def ttr(self, kloc):
"""
Three terms relation's coefficient generator.
Args:
k (numpy.ndarray, int):
The order of the coefficients.
Returns:
(Recurrence coefficients):
Where out[0] is the first (A) and out[1] is the second
coefficient With ``out.shape==(2,)+k.shape``.
"""
check_dependencies(self)
kloc = numpy.asarray(kloc, dtype=int)
shape = kloc.shape
kloc = kloc.reshape(len(self), -1)
out = numpy.zeros((2,)+kloc.shape)
for idy, kloc_ in enumerate(kloc.T):
for idx in range(len(self)):
out[:, idx, idy] = self._get_ttr(kloc_[idx], idx)
return out.reshape((2,)+shape)
def _get_ttr(self, kdata, idx):
"""In-process function for getting TTR-values."""
if (idx, kdata) in self._ttr_cache:
return self._ttr_cache[idx, kdata]
if hasattr(self, "get_ttr_parameters"):
parameters = self.get_ttr_parameters(idx)
else:
parameters = self.get_parameters(idx, cache={}, assert_numerical=True)
alpha, beta = self._ttr(kdata, **parameters)
assert not isinstance(alpha, Distribution), (self, alpha)
assert not isinstance(beta, Distribution), (self, beta)
alpha = numpy.asfarray(alpha).item()
beta = numpy.asfarray(beta).item()
self._ttr_cache[idx, kdata] = (alpha, beta)
return alpha, beta
def _ttr(self, kloc, **kwargs):
raise chaospy.UnsupportedFeature(
"three terms recursion not supported for this distribution")
def _get_cache(self, idx, cache, get=None):
"""
In-process function for getting cached values.
Each time a distribution has been processed, the input and output
values are stored in the cache.
This checks if a distribution has been processed before and return a
cache value if it is.
The cached values are as follows:
----------- ------------- -------------
Context Get 0 Get 1
----------- ------------- -------------
pdf Input values Output values
cdf/fwd Input values Output values
ppf/inv Output values Input values
lower/upper Output values N/A
----------- ------------- -------------
Args:
idx (int):
Which dimension to get cache from.
cache (Dict[Distribution, Tuple[numpy.ndarray, numpy.ndarray]]):
Collection of cached values. Keys are distributions that has
been processed earlier, values consist of up to two cache
value.
get (int):
Which cache to retrieve.
Returns:
(numpy.ndarray, Distribution):
The content of the cache, if any. Else return self.
"""
if (idx, self) in cache:
assert get in (0, 1)
out = cache[idx, self][get]
else:
out = self._cache(idx=idx, cache=cache, get=get)
return out
def _cache(self, idx, cache, get):
"""Backend function of retrieving cache values."""
return self
def __getitem__(self, index):
if isinstance(index, numpy.number):
assert index.dtype == int
index = int(index)
if isinstance(index, int):
if not -len(self) < index < len(self):
raise IndexError("index out of bounds: %s" % index)
if index < 0:
index += len(self)
return chaospy.ItemDistribution(int(index), self)
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = len(self) if index.stop is None else index.stop
step = 1 if index.step is None else index.step
return chaospy.J(*[self[idx] for idx in range(start, stop, step)])
raise IndexError("unrecognized key: %s" % repr(index))
def __iter__(self):
for idx in range(len(self)):
yield self[idx]
def __len__(self):
"""Distribution length."""
return len(self._dependencies)
def __repr__(self):
"""Distribution repr function."""
args = ", ".join([str(arg) for arg in self._repr_args])
return "{}({})".format(self.__class__.__name__, args)
def __str__(self):
"""Distribution str function."""
return repr(self)
def __add__(self, X):
"""Y.__add__(X) <==> X+Y"""
return chaospy.Add(self, X)
def __radd__(self, X):
"""Y.__radd__(X) <==> Y+X"""
return chaospy.Add(self, X)
def __sub__(self, X):
"""Y.__sub__(X) <==> X-Y"""
return chaospy.Add(self, -X)
def __rsub__(self, X):
"""Y.__rsub__(X) <==> Y-X"""
return chaospy.Add(X, -self)
def __neg__(self):
"""X.__neg__() <==> -X"""
return chaospy.Negative(self)
def __mul__(self, X):
"""Y.__mul__(X) <==> X*Y"""
return chaospy.Multiply(self, X)
def __rmul__(self, X):
"""Y.__rmul__(X) <==> Y*X"""
return chaospy.Multiply(X, self)
def __div__(self, X):
"""Y.__div__(X) <==> Y/X"""
return chaospy.Multiply(self, X**-1)
def __rdiv__(self, X):
"""Y.__rdiv__(X) <==> X/Y"""
return chaospy.Multiply(X, self**-1)
def __floordiv__(self, X):
"""Y.__floordiv__(X) <==> Y/X"""
return chaospy.Multiply(self, X**-1)
def __rfloordiv__(self, X):
"""Y.__rfloordiv__(X) <==> X/Y"""
return chaospy.Multiply(X, self**-1)
def __truediv__(self, X):
"""Y.__truediv__(X) <==> Y/X"""
return chaospy.Multiply(self, X**-1)
def __rtruediv__(self, X):
"""Y.__rtruediv__(X) <==> X/Y"""
return chaospy.Multiply(X, self**-1)
def __pow__(self, X):
"""Y.__pow__(X) <==> Y**X"""
return chaospy.Power(self, X)
def __rpow__(self, X):
"""Y.__rpow__(X) <==> X**Y"""
return chaospy.Power(X, self)
def __eq__(self, other):
if not isinstance(other, Distribution):
return False
if len(other) != len(self):
return False
if len(self) > 1:
return all([self == other for self, other in zip(self, other)])
if isinstance(self, chaospy.ItemDistribution) and isinstance(other, chaospy.ItemDistribution):
return (self._parameters["index"] == other._parameters["index"] and
self._parameters["parent"] is other._parameters["parent"])
return self is other
def __hash__(self):
return id(self)
| [
"logging.getLogger",
"numpy.prod",
"numpy.clip",
"chaospy.StochasticallyDependentError",
"numpy.asfarray",
"numpy.array",
"chaospy.approximate_moment",
"chaospy.Negative",
"chaospy.Power",
"chaospy.UnsupportedFeature",
"chaospy.approximate_inverse",
"numpy.where",
"numpy.asarray",
"numpy.r... | [((5076, 5097), 'numpy.atleast_1d', 'numpy.atleast_1d', (['out'], {}), '(out)\n', (5092, 5097), False, 'import numpy\n'), ((5307, 5356), 'chaospy.UnsupportedFeature', 'chaospy.UnsupportedFeature', (['"""lower not supported"""'], {}), "('lower not supported')\n", (5333, 5356), False, 'import chaospy\n'), ((6105, 6126), 'numpy.atleast_1d', 'numpy.atleast_1d', (['out'], {}), '(out)\n', (6121, 6126), False, 'import numpy\n'), ((6473, 6522), 'chaospy.UnsupportedFeature', 'chaospy.UnsupportedFeature', (['"""lower not supported"""'], {}), "('lower not supported')\n", (6499, 6522), False, 'import chaospy\n'), ((6973, 7000), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6990, 7000), False, 'import logging\n'), ((7051, 7073), 'numpy.asfarray', 'numpy.asfarray', (['x_data'], {}), '(x_data)\n', (7065, 7073), False, 'import numpy\n'), ((7186, 7211), 'numpy.zeros', 'numpy.zeros', (['x_data.shape'], {}), '(x_data.shape)\n', (7197, 7211), False, 'import numpy\n'), ((7370, 7388), 'numpy.any', 'numpy.any', (['indices'], {}), '(indices)\n', (7379, 7388), False, 'import numpy\n'), ((7779, 7806), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7796, 7806), False, 'import logging\n'), ((8262, 8287), 'numpy.zeros', 'numpy.zeros', (['x_data.shape'], {}), '(x_data.shape)\n', (8273, 8287), False, 'import numpy\n'), ((8357, 8375), 'numpy.any', 'numpy.any', (['indices'], {}), '(indices)\n', (8366, 8375), False, 'import numpy\n'), ((8595, 8613), 'numpy.any', 'numpy.any', (['indices'], {}), '(indices)\n', (8604, 8613), False, 'import numpy\n'), ((8804, 8838), 'numpy.all', 'numpy.all', (['((out >= 0) | (out <= 1))'], {}), '((out >= 0) | (out <= 1))\n', (8813, 8838), False, 'import numpy\n'), ((9803, 9824), 'numpy.asarray', 'numpy.asarray', (['x_data'], {}), '(x_data)\n', (9816, 9824), False, 'import numpy\n'), ((11108, 11135), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (11125, 11135), False, 'import logging\n'), ((11186, 11208), 'numpy.asfarray', 'numpy.asfarray', (['q_data'], {}), '(q_data)\n', (11200, 11208), False, 'import numpy\n'), ((11224, 11264), 'numpy.all', 'numpy.all', (['((q_data >= 0) & (q_data <= 1))'], {}), '((q_data >= 0) & (q_data <= 1))\n', (11233, 11264), False, 'import numpy\n'), ((11402, 11427), 'numpy.zeros', 'numpy.zeros', (['q_data.shape'], {}), '(q_data.shape)\n', (11413, 11427), False, 'import numpy\n'), ((11710, 11737), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (11727, 11737), False, 'import logging\n'), ((12463, 12488), 'numpy.zeros', 'numpy.zeros', (['q_data.shape'], {}), '(q_data.shape)\n', (12474, 12488), False, 'import numpy\n'), ((12556, 12574), 'numpy.any', 'numpy.any', (['indices'], {}), '(indices)\n', (12565, 12574), False, 'import numpy\n'), ((12797, 12815), 'numpy.any', 'numpy.any', (['indices'], {}), '(indices)\n', (12806, 12815), False, 'import numpy\n'), ((13186, 13259), 'chaospy.UnsupportedFeature', 'chaospy.UnsupportedFeature', (["('%s: does not support analytical ppf.' % self)"], {}), "('%s: does not support analytical ppf.' % self)\n", (13212, 13259), False, 'import chaospy\n'), ((15943, 15970), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (15960, 15970), False, 'import logging\n'), ((16021, 16043), 'numpy.asfarray', 'numpy.asfarray', (['x_data'], {}), '(x_data)\n', (16035, 16043), False, 'import numpy\n'), ((16137, 16162), 'numpy.zeros', 'numpy.zeros', (['x_data.shape'], {}), '(x_data.shape)\n', (16148, 16162), False, 'import numpy\n'), ((17119, 17146), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (17136, 17146), False, 'import logging\n'), ((17644, 17669), 'numpy.zeros', 'numpy.zeros', (['x_data.shape'], {}), '(x_data.shape)\n', (17655, 17669), False, 'import numpy\n'), ((17761, 17779), 'numpy.any', 'numpy.any', (['indices'], {}), '(indices)\n', (17770, 17779), False, 'import numpy\n'), ((18293, 18366), 'chaospy.UnsupportedFeature', 'chaospy.UnsupportedFeature', (["('%s: does not support analytical pdf.' % self)"], {}), "('%s: does not support analytical pdf.' % self)\n", (18319, 18366), False, 'import chaospy\n'), ((20772, 20799), 'numpy.prod', 'numpy.prod', (['size'], {'dtype': 'int'}), '(size, dtype=int)\n', (20782, 20799), False, 'import numpy\n'), ((21089, 21187), 'chaospy.distributions.sampler.generator.generate_samples', 'sampler.generator.generate_samples', ([], {'order': 'size_', 'domain': 'self', 'rule': 'rule', 'antithetic': 'antithetic'}), '(order=size_, domain=self, rule=rule,\n antithetic=antithetic)\n', (21123, 21187), False, 'from chaospy.distributions import sampler\n'), ((22622, 22649), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (22639, 22649), False, 'import logging\n'), ((22662, 22689), 'numpy.asarray', 'numpy.asarray', (['K'], {'dtype': 'int'}), '(K, dtype=int)\n', (22675, 22689), False, 'import numpy\n'), ((22705, 22722), 'numpy.all', 'numpy.all', (['(K >= 0)'], {}), '(K >= 0)\n', (22714, 22722), False, 'import numpy\n'), ((23478, 23494), 'numpy.array', 'numpy.array', (['out'], {}), '(out)\n', (23489, 23494), False, 'import numpy\n'), ((24277, 24350), 'chaospy.UnsupportedFeature', 'chaospy.UnsupportedFeature', (['"""moments not supported for this distribution"""'], {}), "('moments not supported for this distribution')\n", (24303, 24350), False, 'import chaospy\n'), ((24804, 24834), 'numpy.asarray', 'numpy.asarray', (['kloc'], {'dtype': 'int'}), '(kloc, dtype=int)\n', (24817, 24834), False, 'import numpy\n'), ((24919, 24949), 'numpy.zeros', 'numpy.zeros', (['((2,) + kloc.shape)'], {}), '((2,) + kloc.shape)\n', (24930, 24949), False, 'import numpy\n'), ((25925, 26017), 'chaospy.UnsupportedFeature', 'chaospy.UnsupportedFeature', (['"""three terms recursion not supported for this distribution"""'], {}), "(\n 'three terms recursion not supported for this distribution')\n", (25951, 26017), False, 'import chaospy\n'), ((28996, 29016), 'chaospy.Add', 'chaospy.Add', (['self', 'X'], {}), '(self, X)\n', (29007, 29016), False, 'import chaospy\n'), ((29097, 29117), 'chaospy.Add', 'chaospy.Add', (['self', 'X'], {}), '(self, X)\n', (29108, 29117), False, 'import chaospy\n'), ((29196, 29217), 'chaospy.Add', 'chaospy.Add', (['self', '(-X)'], {}), '(self, -X)\n', (29207, 29217), False, 'import chaospy\n'), ((29298, 29319), 'chaospy.Add', 'chaospy.Add', (['X', '(-self)'], {}), '(X, -self)\n', (29309, 29319), False, 'import chaospy\n'), ((29393, 29415), 'chaospy.Negative', 'chaospy.Negative', (['self'], {}), '(self)\n', (29409, 29415), False, 'import chaospy\n'), ((29494, 29519), 'chaospy.Multiply', 'chaospy.Multiply', (['self', 'X'], {}), '(self, X)\n', (29510, 29519), False, 'import chaospy\n'), ((29600, 29625), 'chaospy.Multiply', 'chaospy.Multiply', (['X', 'self'], {}), '(X, self)\n', (29616, 29625), False, 'import chaospy\n'), ((29704, 29735), 'chaospy.Multiply', 'chaospy.Multiply', (['self', '(X ** -1)'], {}), '(self, X ** -1)\n', (29720, 29735), False, 'import chaospy\n'), ((29814, 29845), 'chaospy.Multiply', 'chaospy.Multiply', (['X', '(self ** -1)'], {}), '(X, self ** -1)\n', (29830, 29845), False, 'import chaospy\n'), ((29932, 29963), 'chaospy.Multiply', 'chaospy.Multiply', (['self', '(X ** -1)'], {}), '(self, X ** -1)\n', (29948, 29963), False, 'import chaospy\n'), ((30052, 30083), 'chaospy.Multiply', 'chaospy.Multiply', (['X', '(self ** -1)'], {}), '(X, self ** -1)\n', (30068, 30083), False, 'import chaospy\n'), ((30168, 30199), 'chaospy.Multiply', 'chaospy.Multiply', (['self', '(X ** -1)'], {}), '(self, X ** -1)\n', (30184, 30199), False, 'import chaospy\n'), ((30286, 30317), 'chaospy.Multiply', 'chaospy.Multiply', (['X', '(self ** -1)'], {}), '(X, self ** -1)\n', (30302, 30317), False, 'import chaospy\n'), ((30395, 30417), 'chaospy.Power', 'chaospy.Power', (['self', 'X'], {}), '(self, X)\n', (30408, 30417), False, 'import chaospy\n'), ((30499, 30521), 'chaospy.Power', 'chaospy.Power', (['X', 'self'], {}), '(X, self)\n', (30512, 30521), False, 'import chaospy\n'), ((3210, 3310), 'chaospy.StochasticallyDependentError', 'chaospy.StochasticallyDependentError', (["('%s is an under-defined probability distribution.' % self)"], {}), "(\n '%s is an under-defined probability distribution.' % self)\n", (3246, 3310), False, 'import chaospy\n'), ((7560, 7596), 'numpy.clip', 'numpy.clip', (['q_data'], {'a_min': '(0)', 'a_max': '(1)'}), '(q_data, a_min=0, a_max=1)\n', (7570, 7596), False, 'import numpy\n'), ((8522, 8550), 'numpy.where', 'numpy.where', (['indices', '(0)', 'out'], {}), '(indices, 0, out)\n', (8533, 8550), False, 'import numpy\n'), ((8760, 8788), 'numpy.where', 'numpy.where', (['indices', '(1)', 'out'], {}), '(indices, 1, out)\n', (8771, 8788), False, 'import numpy\n'), ((9674, 9773), 'chaospy.StochasticallyDependentError', 'chaospy.StochasticallyDependentError', (['"""Cumulative distribution does not support dependencies."""'], {}), "(\n 'Cumulative distribution does not support dependencies.')\n", (9710, 9773), False, 'import chaospy\n'), ((9976, 9997), 'numpy.prod', 'numpy.prod', (['q_data', '(0)'], {}), '(q_data, 0)\n', (9986, 9997), False, 'import numpy\n'), ((11753, 11775), 'numpy.all', 'numpy.all', (['(q_data <= 1)'], {}), '(q_data <= 1)\n', (11762, 11775), False, 'import numpy\n'), ((11780, 11802), 'numpy.all', 'numpy.all', (['(q_data >= 0)'], {}), '(q_data >= 0)\n', (11789, 11802), False, 'import numpy\n'), ((12722, 12754), 'numpy.where', 'numpy.where', (['indices', 'lower', 'out'], {}), '(indices, lower, out)\n', (12733, 12754), False, 'import numpy\n'), ((12963, 12995), 'numpy.where', 'numpy.where', (['indices', 'upper', 'out'], {}), '(indices, upper, out)\n', (12974, 12995), False, 'import numpy\n'), ((16955, 16976), 'numpy.prod', 'numpy.prod', (['f_data', '(0)'], {}), '(f_data, 0)\n', (16965, 16976), False, 'import numpy\n'), ((18009, 18041), 'numpy.where', 'numpy.where', (['indices', '(0)', 'ret_val'], {}), '(indices, 0, ret_val)\n', (18020, 18041), False, 'import numpy\n'), ((18087, 18132), 'numpy.where', 'numpy.where', (['(x_data == cache[self][0])', 'out', '(0)'], {}), '(x_data == cache[self][0], out, 0)\n', (18098, 18132), False, 'import numpy\n'), ((23522, 23539), 'numpy.prod', 'numpy.prod', (['shape'], {}), '(shape)\n', (23532, 23539), False, 'import numpy\n'), ((3831, 3851), 'numpy.asarray', 'numpy.asarray', (['param'], {}), '(param)\n', (3844, 3851), False, 'import numpy\n'), ((7505, 7523), 'numpy.sum', 'numpy.sum', (['indices'], {}), '(indices)\n', (7514, 7523), False, 'import numpy\n'), ((8470, 8488), 'numpy.sum', 'numpy.sum', (['indices'], {}), '(indices)\n', (8479, 8488), False, 'import numpy\n'), ((8708, 8726), 'numpy.sum', 'numpy.sum', (['indices'], {}), '(indices)\n', (8717, 8726), False, 'import numpy\n'), ((12301, 12360), 'chaospy.approximate_inverse', 'chaospy.approximate_inverse', (['self', 'idx', 'q_data'], {'cache': 'cache'}), '(self, idx, q_data, cache=cache)\n', (12328, 12360), False, 'import chaospy\n'), ((12670, 12688), 'numpy.sum', 'numpy.sum', (['indices'], {}), '(indices)\n', (12679, 12688), False, 'import numpy\n'), ((12911, 12929), 'numpy.sum', 'numpy.sum', (['indices'], {}), '(indices)\n', (12920, 12929), False, 'import numpy\n'), ((17875, 17893), 'numpy.sum', 'numpy.sum', (['indices'], {}), '(indices)\n', (17884, 17893), False, 'import numpy\n'), ((21309, 21330), 'numpy.round', 'numpy.round', (['out[idx]'], {}), '(out[idx])\n', (21320, 21330), False, 'import numpy\n'), ((25723, 25744), 'numpy.asfarray', 'numpy.asfarray', (['alpha'], {}), '(alpha)\n', (25737, 25744), False, 'import numpy\n'), ((25767, 25787), 'numpy.asfarray', 'numpy.asfarray', (['beta'], {}), '(beta)\n', (25781, 25787), False, 'import numpy\n'), ((3524, 3671), 'chaospy.StochasticallyDependentError', 'chaospy.StochasticallyDependentError', (["('%s contains dependencies that can not also exist other places in the dependency hierarchy'\n % param)"], {}), "(\n '%s contains dependencies that can not also exist other places in the dependency hierarchy'\n % param)\n", (3560, 3671), False, 'import chaospy\n'), ((21387, 21403), 'numpy.round', 'numpy.round', (['out'], {}), '(out)\n', (21398, 21403), False, 'import numpy\n'), ((16691, 16781), 'chaospy.approximate_density', 'chaospy.approximate_density', (['self', 'idx', 'x_data[idx]'], {'cache': 'cache', 'step_size': 'step_size'}), '(self, idx, x_data[idx], cache=cache, step_size=\n step_size)\n', (16718, 16781), False, 'import chaospy\n'), ((23326, 23365), 'chaospy.approximate_moment', 'chaospy.approximate_moment', (['self', 'kdata'], {}), '(self, kdata)\n', (23352, 23365), False, 'import chaospy\n')] |
"""Basic filtering of garbage and perplexity sampling for OSCAR v1."""
import gzip
import multiprocessing
import os
from random import sample
import fsspec
import kenlm # pip install https://github.com/kpu/kenlm/archive/master.zip
import langid
import numpy as np
from datasets import load_dataset
from nltk.corpus import stopwords
from numpy.random import default_rng
from transformers import AutoTokenizer
class OscarSampler:
"""Based on bertin/mc4/mc4.py.
This code does not use HF's datasets for efficiency reasons."""
langs = {
"af": "Afrikaans",
"als": "Tosk Albanian",
"am": "Amharic",
"an": "Aragonese",
"ar": "Arabic",
"arz": "Egyptian Arabic",
"ast": "Asturian",
"as": "Assamese",
"av": "Avaric",
"azb": "South Azerbaijani",
"az": "Azerbaijani",
"bar": "Bavarian",
"ba": "Bashkir",
"bcl": "Central Bikol",
"be": "Belarusian",
"bg": "Bulgarian",
"bh": "Bihari",
"bn": "Bengali",
"bo": "Tibetan",
"bpy": "Bishnupriya",
"br": "Breton",
"bs": "Bosnian",
"bxr": "Russia Buriat",
"ca": "Catalan",
"cbk": "Chavacano",
"ceb": "Cebuano",
"ce": "Chechen",
"ckb": "Central Kurdish",
"cs": "Czech",
"cv": "Chuvash",
"cy": "Welsh",
"da": "Danish",
"de": "German",
"diq": "Dimli",
"dsb": "Lower Sorbian",
"dv": "Dhivehi",
"el": "Modern Greek",
"eml": "Emilian-Romagnol",
"en": "English",
"eo": "Esperanto",
"es": "Spanish",
"et": "Estonian",
"eu": "Basque",
"fa": "Persian",
"fi": "Finnish",
"frr": "Northern Frisian",
"fr": "French",
"fy": "Western Frisian",
"ga": "Irish",
"gd": "Scottish Gaelic",
"gl": "Galician",
"gn": "Guarani",
"gom": "Goan Konkani",
"gu": "Gujarati",
"he": "Hebrew",
"hi": "Hindi",
"hr": "Croatian",
"hsb": "Upper Sorbian",
"ht": "Haitian",
"hu": "Hungarian",
"hy": "Armenian",
"ia": "Interlingua",
"id": "Indonesian",
"ie": "Interlingue",
"ilo": "Iloko",
"io": "Ido",
"is": "Icelandic",
"it": "Italian",
"ja": "Japanese",
"jbo": "Lojban",
"jv": "Javanese",
"ka": "Georgian",
"kk": "Kazakh",
"km": "Central Khmer",
"kn": "Kannada",
"ko": "Korean",
"krc": "Karachay-Balkar",
"ku": "Kurdish",
"kv": "Komi",
"kw": "Cornish",
"ky": "Kirghiz",
"la": "Latin",
"lb": "Luxembourgish",
"lez": "Lezghian",
"li": "Limburgan",
"lmo": "Lombard",
"lo": "Lao",
"lrc": "Northern Luri",
"lt": "Lithuanian",
"lv": "Latvian",
"mai": "Maithili",
"mg": "Malagasy",
"mhr": "Eastern Mari",
"min": "Minangkabau",
"mk": "Macedonian",
"ml": "Malayalam",
"mn": "Mongolian",
"mrj": "Western Mari",
"mr": "Marathi",
"ms": "Malay",
"mt": "Maltese",
"mwl": "Mirandese",
"my": "Burmese",
"myv": "Erzya",
"mzn": "Mazanderani",
"nah": "Nahuatl", # languages
"nap": "Neapolitan",
"nds": "Low German",
"ne": "Nepali",
"new": "Newari",
"nl": "Dutch",
"nn": "Norwegian Nynorsk",
"no": "Norwegian",
"oc": "Occitan",
"or": "Oriya",
"os": "Ossetian",
"pam": "Pampanga",
"pa": "Panjabi",
"pl": "Polish",
"pms": "Piemontese",
"pnb": "Western Panjabi",
"ps": "Pushto",
"pt": "Portuguese",
"qu": "Quechua",
"rm": "Romansh",
"ro": "Romanian",
"ru": "Russian",
"sah": "Yakut",
"sa": "Sanskrit",
"scn": "Sicilian",
"sd": "Sindhi",
"sh": "Serbo-Croatian",
"si": "Sinhala",
"sk": "Slovak",
"sl": "Slovenian",
"so": "Somali",
"sq": "Albanian",
"sr": "Serbian",
"su": "Sundanese",
"sv": "Swedish",
"sw": "Swahili",
"ta": "Tamil",
"te": "Telugu",
"tg": "Tajik",
"th": "Thai",
"tk": "Turkmen",
"tl": "Tagalog",
"tr": "Turkish",
"tt": "Tatar",
"tyv": "Tuvinian",
"ug": "Uighur",
"uk": "Ukrainian",
"ur": "Urdu",
"uz": "Uzbek",
"vec": "Venetian",
"vi": "Vietnamese",
"vo": "Volapük",
"war": "Waray",
"wa": "Walloon",
"wuu": "Wu Chinese",
"xal": "Kalmyk",
"xmf": "Mingrelian",
"yi": "Yiddish",
"yo": "Yoruba",
"yue": "Yue Chinese",
"zh": "Chinese",
}
stopwords_cutoff = 0.1
junk_ratio = 0.5
stopword_check = True
special_characters = (
"' 0123456789¯_%$§½¼¾×|†—~\"—±′–'°−{}[]·-'?,./<>!@#^&*()+-‑=:;`→¶'"
)
# TODO - add params for other languages
params = {
"en": {
"stopwords_cutoff": stopwords_cutoff,
"junk_ratio": junk_ratio,
"stopword_check": stopword_check,
"strip_chars": special_characters,
"junk_chars": special_characters,
}
}
def __init__(self, **kwargs):
self.sampling_method = kwargs.pop("sampling_method", "random")
self.perplexity_model = kwargs.pop("perplexity_model", None)
self.sampling_factor = kwargs.pop("sampling_factor", None)
self.boundaries = kwargs.pop("boundaries", None)
if self.sampling_method:
if self.sampling_method == "random":
self.should_keep_doc = self._should_keep_doc_random
else:
# Loading 5-gram model
# http://dl.fbaipublicfiles.com/cc_net/lm/es.arpa.bin
print("loading model = %s", self.perplexity_model)
if self.sampling_method == "gaussian":
self.should_keep_doc = self._should_keep_doc_gaussian
else:
self.should_keep_doc = self._should_keep_doc_step
self.seed = kwargs.pop("seed", None)
self.kwargs = kwargs
@staticmethod
def get_oscar_urls(language, shuffled="unshuffled", deduplicated="deduplicated"):
_BASE_DATA_URL_FORMAT_STR = "https://s3.amazonaws.com/datasets.huggingface.co/oscar/1.0/{shuffled}/{deduplicated}/{language}/"
_BASE_CHECKSUM_FILE_NAME = "{language}_sha256.txt"
base_data_url = _BASE_DATA_URL_FORMAT_STR.format(
shuffled=shuffled, language=language, deduplicated=deduplicated
)
checksum_url = base_data_url + _BASE_CHECKSUM_FILE_NAME.format(
language=language
)
with fsspec.open(checksum_url, encoding="utf-8") as f:
data_filenames = [line.decode().split("\t")[0] for line in f if line]
return [base_data_url + data_filename for data_filename in data_filenames]
@staticmethod
def _download_urls(urls):
for url in urls:
if not os.path.exists(url.split("/")[-1]):
os.system(f"wget {url}")
@staticmethod
def check_good_sentence(
sentence,
stopwords,
junk_dict,
strip_chars,
target_lang,
stopwords_cutoff,
junk_ratio,
stopword_check,
):
# basic dejunk
sent = sentence.lower().strip()
if not sent:
return False
jr = len([char for char in sent if char in junk_dict]) / len(sent)
if jr >= junk_ratio:
return False
words = [word.strip(strip_chars) for word in sent.split()]
if len(words) == 0:
return False
# stopword check
if stopword_check:
stopword_cond = (
len([word for word in words if word in stopwords]) / len(words)
< stopwords_cutoff
)
if stopword_cond:
return False
else:
# langid check
try:
lang = langid.classify(sent)[0]
except:
lang = ""
return lang == target_lang
@staticmethod
def filter_and_tok_cjk(
url,
target_lang,
sampling_factor,
boundaries,
should_keep_doc,
perplexity_model,
seed,
stopwords_cutoff,
junk_ratio,
stopword_check,
strip_chars,
junk_chars,
):
mt5_underscore = "_"
if seed is not None:
rng = default_rng(seed)
else:
rng = default_rng()
if perplexity_model:
pp_model = kenlm.Model(perplexity_model)
else:
pp_model = None
stopwords = set(stopwords.words(OscarSampler.langs[target_lang].lower()))
junk_dict = {a: 1 for a in junk_chars}
if target_lang in ("ja", "zh", "ko"):
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
OscarSampler._download_urls([url])
file = url.split("/")[-1]
with open(
file.replace("txt.gz", "") + ".sample_filtered.txt", "w", encoding="utf8"
) as f:
with gzip.open(file, "rb") as f2:
for id_, line in enumerate(f2):
line = line.decode().strip()
if target_lang in ("ja", "zh", "ko"):
line = " ".join(tokenizer.tokenize(line)).replace(
mt5_underscore + " ", mt5_underscore
)
if OscarSampler.check_good_sentence(
line,
stopwords,
junk_dict,
strip_chars,
target_lang,
stopwords_cutoff,
junk_ratio,
stopword_check,
):
# now do perplexity sampling
if should_keep_doc(
line,
rng=rng,
factor=sampling_factor,
boundaries=boundaries,
pp_model=pp_model,
):
f.write(line + "\n")
os.unlink(file)
def sample_filter(self, target_lang, sample_shard=5):
if target_lang in self.params:
param = self.params[target_lang]
else:
param = self.params["en"]
stopwords_cutoff = param["stopwords_cutoff"]
junk_ratio = param["junk_ratio"]
stopword_check = param["stopword_check"]
strip_chars = param["strip_chars"]
junk_chars = param["junk_chars"]
if target_lang in self.langs:
lst = self.get_oscar_urls(target_lang)
if sample_shard and len(lst) > sample_shard:
lst = sample(lst, sample_shard)
# TODO, we should create
processes = [
multiprocessing.Process(
target=OscarSampler.filter_and_tok_cjk,
args=(
url,
target_lang,
self.sampling_factor,
self.boundaries,
self.should_keep_doc,
self.perplexity_model,
self.seed,
stopwords_cutoff,
junk_ratio,
stopword_check,
strip_chars,
junk_chars,
),
)
for url in lst
]
for process in processes:
process.start()
for process in processes:
process.join()
os.system(
f"cat {target_lang}_*.sample_filtered.txt > {target_lang}.sample_filtered.txt"
)
os.system(f"gzip {target_lang}.sample_filtered.txt")
return f"{target_lang}.sample_filtered.txt.gz" # TODO put this in a data folder.
else:
print(f"{target_lang} not supported")
return ""
@staticmethod
def create_knlm_model(lang="pt"):
if not os.path.exists("/content/lmplz"):
os.system(
"cp /content/drive/Shareddrives/BigScience/kenlm/bin/lmplz /content/"
)
os.system("chmod ugo+x /content/lmplz")
file = tokenize_oscar_subset(lang, force=False)
file2 = os.path.split(file)[-1]
if not os.path.exists(file2) and not os.path.exists(file2.replace(".gz", "")):
os.system(f"cp {file} ./{file2}")
if os.path.exists(file2):
os.system(f"gunzip ./{file2}")
file2 = file2.replace(".gz", "")
os.system(
f"/content/lmplz --discount_fallback --skip_symbols -o 5 --prune 5 --collapse_values --arpa {lang}.arpa < ./{file2}"
)
os.system(f"mv {lang}.arpa /content/drive/Shareddrives/BigScience")
@staticmethod
def get_perplexity(doc, pp_model):
doc_log_score, doc_length = 0, 0
for line in doc.split("\n"):
log_score = pp_model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
return 10.0 ** (-doc_log_score / doc_length)
@staticmethod
def _should_keep_doc_step(doc, rng, factor=1.5e5, boundaries=None, **kwargs):
pp_model = width = kwargs.get("pp_model")
perplexity = OscarSampler.get_perplexity(doc, pp_model)
if boundaries is None:
boundaries = [536394.99320948, 662247.50212365, 919250.87225178]
if perplexity <= boundaries[0]:
quartile_range = boundaries[0]
elif boundaries[0] < perplexity < boundaries[1]:
quartile_range = boundaries[1] - boundaries[0]
elif boundaries[1] < perplexity < boundaries[2]:
quartile_range = boundaries[2] - boundaries[1]
elif perplexity >= boundaries[2]:
quartile_range = 10 * boundaries[2]
probability = factor / quartile_range
return rng.uniform() < probability
@staticmethod
def _should_keep_doc_gaussian(doc, rng, factor=0.78, boundaries=None, **kwargs):
pp_model = width = kwargs.get("pp_model")
width = kwargs.get("width", 9 / 2) # width (spread) of the exponential curve
perplexity = OscarSampler.get_perplexity(doc, pp_model)
if boundaries is not None:
m = boundaries[1]
else:
m = 662247.50212365
exponential = np.exp((-1 / width) * ((perplexity - m) / m) ** 2)
weighted_perplexity = factor * exponential
return rng.uniform() < weighted_perplexity
@staticmethod
def _should_keep_doc_random(doc, rng, factor=None, boundaries=None, **kwargs):
if factor is None:
factor = 0.5
return rng.uniform() <= factor
| [
"os.path.exists",
"kenlm.Model",
"random.sample",
"numpy.random.default_rng",
"gzip.open",
"multiprocessing.Process",
"os.path.split",
"numpy.exp",
"langid.classify",
"os.unlink",
"transformers.AutoTokenizer.from_pretrained",
"os.system",
"fsspec.open"
] | [((10563, 10578), 'os.unlink', 'os.unlink', (['file'], {}), '(file)\n', (10572, 10578), False, 'import os\n'), ((12981, 13002), 'os.path.exists', 'os.path.exists', (['file2'], {}), '(file2)\n', (12995, 13002), False, 'import os\n'), ((13096, 13235), 'os.system', 'os.system', (['f"""/content/lmplz --discount_fallback --skip_symbols -o 5 --prune 5 --collapse_values --arpa {lang}.arpa < ./{file2}"""'], {}), "(\n f'/content/lmplz --discount_fallback --skip_symbols -o 5 --prune 5 --collapse_values --arpa {lang}.arpa < ./{file2}'\n )\n", (13105, 13235), False, 'import os\n'), ((13256, 13323), 'os.system', 'os.system', (['f"""mv {lang}.arpa /content/drive/Shareddrives/BigScience"""'], {}), "(f'mv {lang}.arpa /content/drive/Shareddrives/BigScience')\n", (13265, 13323), False, 'import os\n'), ((14927, 14975), 'numpy.exp', 'np.exp', (['(-1 / width * ((perplexity - m) / m) ** 2)'], {}), '(-1 / width * ((perplexity - m) / m) ** 2)\n', (14933, 14975), True, 'import numpy as np\n'), ((6975, 7018), 'fsspec.open', 'fsspec.open', (['checksum_url'], {'encoding': '"""utf-8"""'}), "(checksum_url, encoding='utf-8')\n", (6986, 7018), False, 'import fsspec\n'), ((8787, 8804), 'numpy.random.default_rng', 'default_rng', (['seed'], {}), '(seed)\n', (8798, 8804), False, 'from numpy.random import default_rng\n'), ((8837, 8850), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (8848, 8850), False, 'from numpy.random import default_rng\n'), ((8903, 8932), 'kenlm.Model', 'kenlm.Model', (['perplexity_model'], {}), '(perplexity_model)\n', (8914, 8932), False, 'import kenlm\n'), ((9174, 9223), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""google/mt5-small"""'], {}), "('google/mt5-small')\n", (9203, 9223), False, 'from transformers import AutoTokenizer\n'), ((12095, 12194), 'os.system', 'os.system', (['f"""cat {target_lang}_*.sample_filtered.txt > {target_lang}.sample_filtered.txt"""'], {}), "(\n f'cat {target_lang}_*.sample_filtered.txt > {target_lang}.sample_filtered.txt'\n )\n", (12104, 12194), False, 'import os\n'), ((12227, 12279), 'os.system', 'os.system', (['f"""gzip {target_lang}.sample_filtered.txt"""'], {}), "(f'gzip {target_lang}.sample_filtered.txt')\n", (12236, 12279), False, 'import os\n'), ((12532, 12564), 'os.path.exists', 'os.path.exists', (['"""/content/lmplz"""'], {}), "('/content/lmplz')\n", (12546, 12564), False, 'import os\n'), ((12578, 12663), 'os.system', 'os.system', (['"""cp /content/drive/Shareddrives/BigScience/kenlm/bin/lmplz /content/"""'], {}), "('cp /content/drive/Shareddrives/BigScience/kenlm/bin/lmplz /content/'\n )\n", (12587, 12663), False, 'import os\n'), ((12701, 12740), 'os.system', 'os.system', (['"""chmod ugo+x /content/lmplz"""'], {}), "('chmod ugo+x /content/lmplz')\n", (12710, 12740), False, 'import os\n'), ((12813, 12832), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (12826, 12832), False, 'import os\n'), ((12936, 12969), 'os.system', 'os.system', (['f"""cp {file} ./{file2}"""'], {}), "(f'cp {file} ./{file2}')\n", (12945, 12969), False, 'import os\n'), ((13016, 13046), 'os.system', 'os.system', (['f"""gunzip ./{file2}"""'], {}), "(f'gunzip ./{file2}')\n", (13025, 13046), False, 'import os\n'), ((7335, 7359), 'os.system', 'os.system', (['f"""wget {url}"""'], {}), "(f'wget {url}')\n", (7344, 7359), False, 'import os\n'), ((9439, 9460), 'gzip.open', 'gzip.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (9448, 9460), False, 'import gzip\n'), ((11169, 11194), 'random.sample', 'sample', (['lst', 'sample_shard'], {}), '(lst, sample_shard)\n', (11175, 11194), False, 'from random import sample\n'), ((11274, 11541), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'OscarSampler.filter_and_tok_cjk', 'args': '(url, target_lang, self.sampling_factor, self.boundaries, self.\n should_keep_doc, self.perplexity_model, self.seed, stopwords_cutoff,\n junk_ratio, stopword_check, strip_chars, junk_chars)'}), '(target=OscarSampler.filter_and_tok_cjk, args=(url,\n target_lang, self.sampling_factor, self.boundaries, self.\n should_keep_doc, self.perplexity_model, self.seed, stopwords_cutoff,\n junk_ratio, stopword_check, strip_chars, junk_chars))\n', (11297, 11541), False, 'import multiprocessing\n'), ((12852, 12873), 'os.path.exists', 'os.path.exists', (['file2'], {}), '(file2)\n', (12866, 12873), False, 'import os\n'), ((8292, 8313), 'langid.classify', 'langid.classify', (['sent'], {}), '(sent)\n', (8307, 8313), False, 'import langid\n')] |
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import pytest
import test_util as tu
@tu.requires_ipu_model
def test_ipu_copy_bca1():
popart.getLogger().setLevel("TRACE")
builder = popart.Builder()
i1 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
i2 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
o1 = builder.aiOnnx.add([i1, i2])
o2 = builder.aiOnnx.add([i1, i2])
o = builder.aiOnnx.add([o1, o2])
builder.addOutputTensor(o)
builder.virtualGraph(o1, 0)
builder.virtualGraph(o2, 0)
builder.virtualGraph(o, 1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = popart.SessionOptions()
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
s = popart.InferenceSession(fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=3))
s.prepareDevice()
# Will fail due to an invalid virtual graph
@tu.requires_ipu_model
def test_ipu_copy_aca1():
popart.getLogger().setLevel("TRACE")
builder = popart.Builder()
i1 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
i2 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
o1 = builder.aiOnnx.add([i1, i2])
o2 = builder.aiOnnx.add([i1, i2])
o = builder.aiOnnx.add([o1, o2])
builder.addOutputTensor(o)
builder.virtualGraph(o1, 0)
builder.virtualGraph(o2, 0)
builder.virtualGraph(o, 10) # << Invalid virtual graph
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = popart.SessionOptions()
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
s = popart.InferenceSession(fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=3))
with pytest.raises(popart.popart_exception) as e_info:
s.prepareDevice()
assert (("inputs=[{}, {}], outputs=[{}]) " +
"has been assigned to an invalid virtual graph 10").format(
o1 + "_c10", o2 + "_c10", o) in e_info.value.args[0])
# Test that an input stream tensor is correctly mapped to multiple ipus
@tu.requires_ipu_model
def test_ipu_copy_bca4():
popart.getLogger().setLevel("TRACE")
builder = popart.Builder()
i1 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
i2 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
o1 = builder.aiOnnx.add([i1, i2])
o2 = builder.aiOnnx.add([i1, i2])
t1 = builder.aiOnnx.transpose([i1], [])
o3 = builder.aiOnnx.add([o1, o2])
o = builder.aiOnnx.add([o3, t1])
builder.addOutputTensor(o)
builder.virtualGraph(o1, 0)
builder.virtualGraph(o2, 2)
builder.virtualGraph(t1, 2)
builder.virtualGraph(o3, 1)
builder.virtualGraph(o, 1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = popart.SessionOptions()
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
s = popart.InferenceSession(fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=3))
s.prepareDevice()
# Test to ensure that same tensor it not copied multiple times to the same IPU
@tu.requires_ipu_model
def test_ipu_copy_bca2():
popart.getLogger().setLevel("TRACE")
builder = popart.Builder()
i1 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
i2 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
o1 = builder.aiOnnx.add([i1, i2])
o2 = builder.aiOnnx.add([i1, i2])
o3 = builder.aiOnnx.add([o1, o2])
o4 = builder.aiOnnx.add([o1, o2])
o = builder.aiOnnx.add([o3, o4])
builder.addOutputTensor(o)
builder.virtualGraph(o1, 0)
builder.virtualGraph(o2, 0)
builder.virtualGraph(o3, 1)
builder.virtualGraph(o4, 1)
builder.virtualGraph(o, 2)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = popart.SessionOptions()
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
s = popart.InferenceSession(fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=3))
s.prepareDevice()
# Test to make sure that if a single op has multiple it mapped to multiple inputs then the copy does
# the right thing
@tu.requires_ipu_model
def test_ipu_copy_bca3():
popart.getLogger().setLevel("TRACE")
builder = popart.Builder()
i1 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
i2 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
o1 = builder.aiOnnx.add([i1, i2])
o = builder.aiOnnx.add([o1, o1])
builder.addOutputTensor(o)
builder.virtualGraph(o1, 0)
builder.virtualGraph(o, 1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = popart.SessionOptions()
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
s = popart.InferenceSession(fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=2))
s.prepareDevice()
@tu.requires_ipu_model
def test_ipu_copy_bca5():
popart.getLogger().setLevel("TRACE")
builder = popart.Builder()
constData = np.random.rand(2, 2).astype(np.float32)
c1 = builder.aiOnnx.constant(constData, "constShapeData")
i2 = builder.addInputTensor(popart.TensorInfo("FLOAT", [2, 2]))
o1 = builder.aiOnnx.add([c1, i2])
o2 = builder.aiOnnx.add([c1, i2])
t1 = builder.aiOnnx.transpose([c1], [])
o3 = builder.aiOnnx.add([o1, o2])
o = builder.aiOnnx.add([o3, t1])
builder.addOutputTensor(o)
builder.virtualGraph(o1, 0)
builder.virtualGraph(o2, 2)
builder.virtualGraph(t1, 2)
builder.virtualGraph(o3, 1)
builder.virtualGraph(o, 1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = popart.SessionOptions()
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
s = popart.InferenceSession(fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=3))
s.prepareDevice()
# IPU 0 * IPU 1
# =========================================
# *
# i1 -----> copy ---> mul
# | * |
# v * v
# add -----> copy --> add
# * |
# * v
# * output
@tu.requires_ipu_model
def test_copy_to_op_with_duplicate_inputs():
popart.getLogger().setLevel("TRACE")
builder = popart.Builder()
i1 = builder.addInputTensor(popart.TensorInfo("FLOAT", [1]))
o1 = builder.aiOnnx.add([i1, i1])
builder.virtualGraph(o1, 0)
o2 = builder.aiOnnx.mul([i1, i1])
builder.virtualGraph(o2, 1)
o3 = builder.aiOnnx.add([o1, o2])
builder.virtualGraph(o3, 1)
o = o3
builder.addOutputTensor(o)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = popart.SessionOptions()
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
s = popart.InferenceSession(fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=3))
s.prepareDevice()
| [
"popart.Builder",
"test_util.create_test_device",
"popart.AnchorReturnType",
"numpy.random.rand",
"popart.SessionOptions",
"popart.TensorInfo",
"pytest.raises",
"popart.getLogger"
] | [((235, 251), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (249, 251), False, 'import popart\n'), ((744, 767), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (765, 767), False, 'import popart\n'), ((1228, 1244), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (1242, 1244), False, 'import popart\n'), ((1766, 1789), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (1787, 1789), False, 'import popart\n'), ((2535, 2551), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (2549, 2551), False, 'import popart\n'), ((3190, 3213), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (3211, 3213), False, 'import popart\n'), ((3709, 3725), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (3723, 3725), False, 'import popart\n'), ((4361, 4384), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (4382, 4384), False, 'import popart\n'), ((4920, 4936), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (4934, 4936), False, 'import popart\n'), ((5359, 5382), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (5380, 5382), False, 'import popart\n'), ((5799, 5815), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (5813, 5815), False, 'import popart\n'), ((6510, 6533), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (6531, 6533), False, 'import popart\n'), ((7265, 7281), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (7279, 7281), False, 'import popart\n'), ((7725, 7748), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (7746, 7748), False, 'import popart\n'), ((285, 316), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (302, 316), False, 'import popart\n'), ((350, 381), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (367, 381), False, 'import popart\n'), ((1278, 1309), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (1295, 1309), False, 'import popart\n'), ((1343, 1374), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (1360, 1374), False, 'import popart\n'), ((2085, 2123), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (2098, 2123), False, 'import pytest\n'), ((2585, 2616), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (2602, 2616), False, 'import popart\n'), ((2650, 2681), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (2667, 2681), False, 'import popart\n'), ((3759, 3790), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (3776, 3790), False, 'import popart\n'), ((3824, 3855), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (3841, 3855), False, 'import popart\n'), ((4970, 5001), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (4987, 5001), False, 'import popart\n'), ((5035, 5066), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (5052, 5066), False, 'import popart\n'), ((5967, 6001), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[2, 2]'], {}), "('FLOAT', [2, 2])\n", (5984, 6001), False, 'import popart\n'), ((7315, 7346), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1]'], {}), "('FLOAT', [1])\n", (7332, 7346), False, 'import popart\n'), ((183, 201), 'popart.getLogger', 'popart.getLogger', ([], {}), '()\n', (199, 201), False, 'import popart\n'), ((699, 729), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (722, 729), False, 'import popart\n'), ((1019, 1051), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(3)'}), '(numIpus=3)\n', (1040, 1051), True, 'import test_util as tu\n'), ((1176, 1194), 'popart.getLogger', 'popart.getLogger', ([], {}), '()\n', (1192, 1194), False, 'import popart\n'), ((1721, 1751), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (1744, 1751), False, 'import popart\n'), ((2041, 2073), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(3)'}), '(numIpus=3)\n', (2062, 2073), True, 'import test_util as tu\n'), ((2483, 2501), 'popart.getLogger', 'popart.getLogger', ([], {}), '()\n', (2499, 2501), False, 'import popart\n'), ((3145, 3175), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (3168, 3175), False, 'import popart\n'), ((3465, 3497), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(3)'}), '(numIpus=3)\n', (3486, 3497), True, 'import test_util as tu\n'), ((3657, 3675), 'popart.getLogger', 'popart.getLogger', ([], {}), '()\n', (3673, 3675), False, 'import popart\n'), ((4316, 4346), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (4339, 4346), False, 'import popart\n'), ((4636, 4668), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(3)'}), '(numIpus=3)\n', (4657, 4668), True, 'import test_util as tu\n'), ((4868, 4886), 'popart.getLogger', 'popart.getLogger', ([], {}), '()\n', (4884, 4886), False, 'import popart\n'), ((5314, 5344), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (5337, 5344), False, 'import popart\n'), ((5634, 5666), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(2)'}), '(numIpus=2)\n', (5655, 5666), True, 'import test_util as tu\n'), ((5747, 5765), 'popart.getLogger', 'popart.getLogger', ([], {}), '()\n', (5763, 5765), False, 'import popart\n'), ((5833, 5853), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (5847, 5853), True, 'import numpy as np\n'), ((6465, 6495), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (6488, 6495), False, 'import popart\n'), ((6785, 6817), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(3)'}), '(numIpus=3)\n', (6806, 6817), True, 'import test_util as tu\n'), ((7213, 7231), 'popart.getLogger', 'popart.getLogger', ([], {}), '()\n', (7229, 7231), False, 'import popart\n'), ((7680, 7710), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (7703, 7710), False, 'import popart\n'), ((8000, 8032), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(3)'}), '(numIpus=3)\n', (8021, 8032), True, 'import test_util as tu\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Homework 1 Solution Question 2
# In[1]:
#importing libraries
import csv
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# In[2]:
#opening the data files for data 1
with open('data_1.csv') as csvfile:
x_1=[]
y_1=[]
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if row[0].isdigit() == True: #removing x and y
x_1.append(float(row[0]))
y_1.append(float(row[1]))
# In[3]:
#opening the data files for data 2
with open('data_2.csv') as csvfile:
x_2=[]
y_2=[]
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if row[0].isdigit() == True: #removing x and y
x_2.append(float(row[0]))
y_2.append(float(row[1]))
# In[4]:
#plotting graph 1
fig = plt.figure(figsize=(15,4))
plt.subplot(121)
plt.xlabel('X Axis')
plt.ylabel('Y Axis')
plt.scatter(x_1,y_1,c='red')
plt.title('Data 1')
#plotting graph 2
plt.subplot(122)
plt.xlabel('X Axis')
plt.ylabel('Y Axis')
plt.title('Data 2')
plt.scatter(x_2,y_2,c='pink')
# In[5]:
#Using total least square error
# In[6]:
#error calculation
#all the tables that are necessary are written here
x_1_p2 = []
xy = []
x_p2_y = []
x_1_p3 = []
x_1_p4 = []
# In[7]:
#adding all elements to the list
for i in range(len(x_1)):
x_1_p2.append(float(x_1[i]**2))
xy.append(float(x_1[i]*y_1[i]))
x_p2_y.append(float((x_1[i]**2)*y_1[i]))
x_1_p3.append(float(x_1[i]**3))
x_1_p4.append(float(x_1[i]**4))
# In[8]:
#initializing all sums to zero
sum_n = 0.0
sum_x_1=0.0
sum_y_1=0.0
sum_x_1_p2 =0.0
sum_xy =0.0
sum_x_p2_y =0.0
sum_x_1_p3 = 0.0
sum_x_1_p4 =0.0
#finding all sums in the table
for i in range(len(x_1)):
sum_n = sum_n + i
sum_x_1=sum_x_1+x_1[i]
sum_y_1=sum_y_1+y_1[i]
sum_x_1_p2 =sum_x_1_p2 + x_1_p2[i]
sum_xy =sum_xy+xy[i]
sum_x_p2_y =sum_x_p2_y+x_p2_y[i]
sum_x_1_p3 = sum_x_1_p3+x_1_p3[i]
sum_x_1_p4 = sum_x_1_p4 + x_1_p4[i]
# In[9]:
#three equations to find a b and c are >>>
# sum_y_1 = a*sum_x_1_p2 + b*sum_x_1 + c*sum_n
# sum_xy = a*sum_x_1_p3 + b*sum_x_1_p2 + c*sum_x_1
# sum_x_p2_y = a*sum_x_1_p4 + b*sum_x_1_p3 + c*sum_x_1_p2
#just for showcasing the theory
# In[10]:
#finding the solution for the above set of linear equations
a= np.array([[sum_x_1_p2,sum_x_1,sum_n],[sum_x_1_p3,sum_x_1_p2,sum_x_1],[sum_x_1_p4,sum_x_1_p3,sum_x_1_p2]])
b=np.array([sum_y_1,sum_xy,sum_x_p2_y])
# In[11]:
from numpy import linalg as LA
def Compute_Svd(A): #function definition for svd calculation
AT=A.T #transpose
AAT=A.dot(AT)
eigenvalue_U,eigenvector_U=LA.eig(AAT)
sort_val1 = eigenvalue_U.argsort()[::-1]
new_eigenvalue_U = eigenvalue_U[sort_val1]
new_eigenvector_U = eigenvector_U[:,sort_val1]
#temp = np.diag((np.sqrt(new_eigenvalue_U))) #compute sigma matrix as a diagonal matrix with elements as square root of eigen values of U
#sigma = np.zeros_like(A).astype(np.float64)
#sigma[:temp.shape[0],:temp.shape[1]]=temp
temp_1 = np.diag((np.sqrt((1/new_eigenvalue_U)))) #compute sigma matrix as a diagonal matrix with elements as square root of eigen values of U
sigma_1 = np.zeros_like(A).astype(np.float64)
sigma_1[:temp_1.shape[0],:temp_1.shape[1]]=temp_1
VT_inter = sigma_1.dot(new_eigenvector_U.T)
VT = VT_inter.dot(A)
return new_eigenvector_U,sigma_1,VT
# In[12]:
U,S_inv,VT = Compute_Svd(a)
# In[13]:
sol_1 = VT.T.dot(S_inv.dot(U.T.dot(b)))
print("Co-efficients=",sol_1)
a = sol_1[0]
b = sol_1[1]
c = sol_1[2]
# In[14]:
y_new=[]#empty list
for i in range(0,len(x_1)):
y = (a*(x_1[i]**2))+(b*x_1[i]) + c
y_new.append(y)
fig = plt.figure(figsize=(10,4))
plt.xlabel('X Axis')
plt.ylabel('Y Axis')
plt.title('Data 1')
plt.scatter(x_1,y_1,c='red')
plt.text(400,100,'TLS Algorithm')
plt.plot(x_1,y_new,'k')
plt.show()
# In[15]:
#the end
# In[ ]:
| [
"matplotlib.pyplot.text",
"numpy.sqrt",
"numpy.linalg.eig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.zeros_like",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"csv.reader",
"matplotlib.pyplot.title",
"matplotlib.pyplo... | [((844, 871), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (854, 871), True, 'import matplotlib.pyplot as plt\n'), ((871, 887), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (882, 887), True, 'import matplotlib.pyplot as plt\n'), ((888, 908), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Axis"""'], {}), "('X Axis')\n", (898, 908), True, 'import matplotlib.pyplot as plt\n'), ((909, 929), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y Axis"""'], {}), "('Y Axis')\n", (919, 929), True, 'import matplotlib.pyplot as plt\n'), ((930, 960), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_1', 'y_1'], {'c': '"""red"""'}), "(x_1, y_1, c='red')\n", (941, 960), True, 'import matplotlib.pyplot as plt\n'), ((959, 978), 'matplotlib.pyplot.title', 'plt.title', (['"""Data 1"""'], {}), "('Data 1')\n", (968, 978), True, 'import matplotlib.pyplot as plt\n'), ((997, 1013), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1008, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1034), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Axis"""'], {}), "('X Axis')\n", (1024, 1034), True, 'import matplotlib.pyplot as plt\n'), ((1035, 1055), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y Axis"""'], {}), "('Y Axis')\n", (1045, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1075), 'matplotlib.pyplot.title', 'plt.title', (['"""Data 2"""'], {}), "('Data 2')\n", (1065, 1075), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1107), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_2', 'y_2'], {'c': '"""pink"""'}), "(x_2, y_2, c='pink')\n", (1087, 1107), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2464), 'numpy.array', 'np.array', (['[[sum_x_1_p2, sum_x_1, sum_n], [sum_x_1_p3, sum_x_1_p2, sum_x_1], [\n sum_x_1_p4, sum_x_1_p3, sum_x_1_p2]]'], {}), '([[sum_x_1_p2, sum_x_1, sum_n], [sum_x_1_p3, sum_x_1_p2, sum_x_1],\n [sum_x_1_p4, sum_x_1_p3, sum_x_1_p2]])\n', (2355, 2464), True, 'import numpy as np\n'), ((2455, 2494), 'numpy.array', 'np.array', (['[sum_y_1, sum_xy, sum_x_p2_y]'], {}), '([sum_y_1, sum_xy, sum_x_p2_y])\n', (2463, 2494), True, 'import numpy as np\n'), ((3800, 3827), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (3810, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3827, 3847), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Axis"""'], {}), "('X Axis')\n", (3837, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3848, 3868), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y Axis"""'], {}), "('Y Axis')\n", (3858, 3868), True, 'import matplotlib.pyplot as plt\n'), ((3869, 3888), 'matplotlib.pyplot.title', 'plt.title', (['"""Data 1"""'], {}), "('Data 1')\n", (3878, 3888), True, 'import matplotlib.pyplot as plt\n'), ((3889, 3919), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_1', 'y_1'], {'c': '"""red"""'}), "(x_1, y_1, c='red')\n", (3900, 3919), True, 'import matplotlib.pyplot as plt\n'), ((3918, 3953), 'matplotlib.pyplot.text', 'plt.text', (['(400)', '(100)', '"""TLS Algorithm"""'], {}), "(400, 100, 'TLS Algorithm')\n", (3926, 3953), True, 'import matplotlib.pyplot as plt\n'), ((3952, 3977), 'matplotlib.pyplot.plot', 'plt.plot', (['x_1', 'y_new', '"""k"""'], {}), "(x_1, y_new, 'k')\n", (3960, 3977), True, 'import matplotlib.pyplot as plt\n'), ((3976, 3986), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3984, 3986), True, 'import matplotlib.pyplot as plt\n'), ((307, 341), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (317, 341), False, 'import csv\n'), ((617, 651), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (627, 651), False, 'import csv\n'), ((2737, 2748), 'numpy.linalg.eig', 'LA.eig', (['AAT'], {}), '(AAT)\n', (2743, 2748), True, 'from numpy import linalg as LA\n'), ((3153, 3182), 'numpy.sqrt', 'np.sqrt', (['(1 / new_eigenvalue_U)'], {}), '(1 / new_eigenvalue_U)\n', (3160, 3182), True, 'import numpy as np\n'), ((3293, 3309), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (3306, 3309), True, 'import numpy as np\n')] |
import json
import logging
import math
import socket
import time
import numpy as np
import tables
import torch
import torch.nn.functional as F
import transformers
from torch import nn
from torch.nn import DataParallel
from torchtext import data
from torchtext.data import Iterator, Batch
from tqdm import tqdm
from transformers import AutoTokenizer, AdamW
from ..dataset.irrelevant_psg_dataset import IrrelevantPassageDataset, IrrelevantPassagePredictionDataset
from ..models.transformer_binary_cls import TransformerBinaryClassifier
from ...common.utility.utility import report_parameters, count_parameters, get_timestamp, mkdir
class IRRDocClassifier:
def __init__(self, config, device, force_local=False):
self.config = config
self.device = device
self.update_it = 0
self.tokenizer = AutoTokenizer.from_pretrained(config["tokenizer_type"], cache_dir=config["cache_dir"],
local_files_only=force_local, use_fast=True)
# preprocessing fields
self.INPUT_field = \
data.Field(use_vocab=False, batch_first=True, sequential=True, pad_token=self.tokenizer.pad_token_id)
self.SEGMENT_field = self.PADDING_field = \
data.Field(use_vocab=False, batch_first=True, sequential=True, pad_token=0)
def fit(self):
logging.debug(json.dumps(self.config, indent=4, sort_keys=True))
train_iter, val_iter, test_iter = self.get_data()
logging.info("Creating/Loading model")
if not self.config["test_only"]:
model = self.init_model()
optimizer = self.init_optimizer(model)
total_steps = (len(train_iter.data()) // train_iter.batch_size + 1) // \
(self.config["true_batch_size"] // self.config["batch_size"]) \
* self.config["epochs"]
warmup_steps = round(
self.config["warmup_proportion"] * total_steps) if "warmup_proportion" in self.config else \
self.config["warmup_steps"]
print(f"TOTAL STEPS: {total_steps}, WARMUP STEPS: {warmup_steps}")
scheduler = self.init_scheduler(optimizer, total_steps, warmup_steps)
else:
model = self.load_model(self.config["model_to_validate"])
self.log_model_info(model)
start_time = time.time()
self.best_accuracy = 0
self.best_model_p = ""
if not self.config["test_only"]:
try:
for it in range(self.config["epochs"]):
logging.info(f"Epoch {it}")
self.train_epoch(model=model,
optimizer=optimizer,
train_iter=train_iter,
val_iter=val_iter,
scheduler=scheduler)
val_loss, accuracy = self.validate(model, val_iter)
self.save_if_best(accuracy, model)
except KeyboardInterrupt:
logging.info('-' * 120)
logging.info('Exit from training early.')
except BaseException as be:
logging.error(be)
raise be
finally:
logging.info(f'Finished after {(time.time() - start_time) / 60} minutes.')
if self.best_model_p != "":
logging.info(f"Loading best model {self.best_model_p}")
model = torch.load(self.best_model_p, map_location=self.device)
if self.config.get("hyperparameter_tuning_mode", False):
return self.best_accuracy, self.best_model_p
logging.info(f"Obtaining results on test data...")
test_loss, test_accuracy = self.validate(model, test_iter)
logging.info(f"Test loss: {test_loss}")
logging.info(f"Test accuracy: {test_accuracy}")
def get_data(self):
train_iter, val_iter = None, None
test = IrrelevantPassageDataset(self.config["test_data"], self.tokenizer,
cache_dir=self.config["data_cache_dir"])
if not self.config["test_only"]:
val = IrrelevantPassageDataset(self.config["validation_data"], self.tokenizer,
cache_dir=self.config["data_cache_dir"])
train = IrrelevantPassageDataset(self.config["training_data"], self.tokenizer,
cache_dir=self.config["data_cache_dir"])
train_iter = Iterator(train,
shuffle=True,
batch_size=self.config["batch_size"], train=True,
repeat=False,
device=self.device)
val_iter = Iterator(val,
batch_size=self.config["validation_batch_size"],
repeat=False, shuffle=False,
device=self.device)
test_iter = Iterator(test,
batch_size=self.config["validation_batch_size"],
repeat=False, shuffle=False,
device=self.device)
return train_iter, val_iter, test_iter
def save_if_best(self, accuracy, model):
if accuracy > self.best_accuracy:
self.best_accuracy = accuracy
if self.best_accuracy >= self.config["min_p_to_save"]:
mkdir(self.config['save_dir'])
model_p = f"{self.config['save_dir']}/" \
f"irrelevant_doc_cls_{self.config['model_type'].replace('/', '_')}_" \
f"acc_{accuracy:.4f}_" \
f"{get_timestamp()}_" \
f"{socket.gethostname()}.pt"
self.best_model_p = model_p
torch.save(model, model_p)
def train_epoch(self, model: TransformerBinaryClassifier,
optimizer: torch.optim.Optimizer,
train_iter: Iterator,
val_iter: Iterator,
scheduler: torch.optim.lr_scheduler.LambdaLR):
model.train()
total_losses = []
update_ratio = self.config["true_batch_size"] // self.config["batch_size"]
optimizer.zero_grad()
updated = False
tr_loss = math.inf
iter = tqdm(enumerate(train_iter), total=len(train_iter.data()) // train_iter.batch_size + 1)
pred_hits = 0
total_preds = 0
WEIGHT = torch.Tensor([1. / self.config["x-negatives"]]).to(self.device)
logging.info(f"Weight for positives: {WEIGHT.item():.3f}")
for current_it, raw_batch in iter:
inputs, segments, input_masks = self.prepare_batch(raw_batch)
targets = raw_batch.label
# detokenized_inps = [self.tokenizer.convert_ids_to_tokens(x.tolist()) for x in inputs]
scores = model(input_ids=inputs, token_type_ids=segments, attention_mask=input_masks)
loss = F.binary_cross_entropy_with_logits(scores, targets, pos_weight=WEIGHT, reduction="mean")
# this is not entirely correct, as the weights are not the same in all mini-batches
# but we neglect this
loss = loss / update_ratio
loss.backward()
pred_hits += ((scores > 0) == targets.bool()).sum().item()
total_preds += len(scores)
total_losses.append(loss)
if (current_it + 1) % update_ratio == 0:
self.update_parameters(model, optimizer, scheduler)
updated = True
if (current_it + 1) % (update_ratio * 10) == 0:
iter.set_description(
f"Steps: {self.update_it}, Tr loss: {sum(total_losses) / len(total_losses):.3f}, Acc {pred_hits / total_preds:.3f}")
total_losses = []
pred_hits, total_preds = 0, 0
if "validate_update_steps" in self.config and \
(current_it + 1) % (update_ratio * self.config["validate_update_steps"]) == 0:
val_loss, accuracy = self.validate(model, val_iter)
self.save_if_best(accuracy, model)
logging.info("Training validation:")
logging.info(f"loss: {val_loss}")
logging.info(f"accuracy: {accuracy}")
if not updated:
self.update_parameters(model, optimizer, scheduler)
@torch.no_grad()
def validate(self, model: TransformerBinaryClassifier,
val_iter: Iterator):
model.eval()
iter = tqdm(enumerate(val_iter), total=len(val_iter.data()) // val_iter.batch_size + 1)
total_losses = []
total_elements = 0
total_hits = 0
for current_it, raw_batch in iter:
inputs, segments, input_masks = self.prepare_batch(raw_batch)
targets = raw_batch.label
scores = model(input_ids=inputs, token_type_ids=segments, attention_mask=input_masks)
losses = F.binary_cross_entropy_with_logits(scores, targets,
reduction="none")
total_hits += ((scores > 0) == targets.bool()).sum().item()
total_elements += len(scores)
total_losses += losses.tolist()
if (current_it + 1) % 10 == 0:
iter.set_description(
f"Val loss: {sum(total_losses) / len(total_losses):.3f}, Acc {total_hits / total_elements:.3f}")
val_loss = sum(total_losses) / len(total_losses)
accuracy = total_hits / total_elements
logging.info(f"Validation loss: {val_loss}")
logging.info(f"Accuracy: {accuracy}")
return val_loss, accuracy
def init_model(self):
return self.make_parallel(TransformerBinaryClassifier(self.config))
def make_parallel(self, model):
"""
Wrap model in dataparallel, if possible
"""
if self.config["multi_gpu"] and torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
logging.info("DataParallel active!")
logging.info(f"Using device ids: {model.device_ids}")
model = model.to(self.device)
return model
def log_model_info(self, model):
logging.info(f"Models has {count_parameters(model)} parameters")
param_sizes, param_shapes = report_parameters(model)
param_sizes = "\n'".join(str(param_sizes).split(", '"))
param_shapes = "\n'".join(str(param_shapes).split(", '"))
logging.debug(f"Model structure:\n{param_sizes}\n{param_shapes}\n")
def init_optimizer(self, model):
optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
return optimizer
def init_scheduler(self, optimizer, total_steps, warmup_steps):
"""
Initialization of lr scheduler.
:param optimizer: The optimizer that is used for the training.
:type optimizer: Optimizer
:return: Created scheduler.
:rtype: LambdaLR
"""
lastEpoch = -1
if self.config["scheduler"] == "linear":
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=total_steps,
last_epoch=lastEpoch)
elif self.config["scheduler"] == "cosine":
scheduler = transformers.get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=total_steps,
num_cycles=0.5,
last_epoch=lastEpoch)
elif self.config["scheduler"] == "constant":
scheduler = transformers.get_constant_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=warmup_steps,
last_epoch=lastEpoch)
else:
scheduler = None
return scheduler
def load_model(self, path):
model = torch.load(path, map_location=self.device)
if type(model) == DataParallel:
model = model.module
return self.make_parallel(model)
def update_parameters(self, model, optimizer, scheduler):
torch.nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()),
self.config["max_grad_norm"])
self.update_it += 1
optimizer.step()
optimizer.zero_grad()
if scheduler is not None:
scheduler.step()
@torch.no_grad()
# @profile
def predict(self, infile, outfile, total_infile_len=21_015_324):
logging.debug(json.dumps(self.config, indent=4, sort_keys=True))
test = IrrelevantPassagePredictionDataset(infile, self.tokenizer)
test_iter = Iterator(test,
batch_size=self.config["inference_batch_size"],
repeat=False, shuffle=False,
device=self.device)
model = self.load_model(self.config["cls_checkpoint"])
self.log_model_info(model)
model.eval()
start_time = time.time()
assert model.training is False
f = tables.open_file(outfile, mode='w')
try:
atom = tables.Float32Atom()
array_c = f.create_earray(f.root, 'data', atom, (0, 2))
for raw_batch in tqdm(test_iter, total=total_infile_len // test_iter.batch_size + 1):
inputs, segments, input_masks = self.prepare_batch(raw_batch)
# detokenized_inps = [self.tokenizer.convert_ids_to_tokens(x.tolist()) for x in inputs]
scores = model(input_ids=inputs, token_type_ids=segments, attention_mask=input_masks)
probs = torch.sigmoid(scores).cpu().unsqueeze(1).numpy()
scores = scores.cpu().unsqueeze(1).numpy()
d = np.concatenate((scores, probs), 1)
array_c.append(d)
except KeyboardInterrupt:
logging.info('-' * 120)
logging.info('Exit from training early.')
except BaseException as be:
logging.error(be)
raise be
finally:
logging.info(f'Finished after {(time.time() - start_time) / 60} minutes.')
def prepare_batch(self, raw_batch: Batch, max_len: int = 512):
include_title = self.config["use_title"]
inputs = []
input_segments = []
input_paddings = []
title_batch, psg_batch = raw_batch.title, raw_batch.psg
assert len(title_batch) == len(psg_batch)
for title, passage in zip(title_batch, psg_batch):
if include_title:
preprocessed = self.tokenizer.encode_plus(title, passage, add_special_tokens=True,
return_token_type_ids=True, truncation=True,
max_length=max_len)
else:
preprocessed = self.tokenizer.encode_plus(passage, add_special_tokens=True,
return_token_type_ids=True, truncation=True,
max_length=max_len)
input_ids, segment_mask = preprocessed['input_ids'], preprocessed['token_type_ids']
inputs.append(input_ids)
input_segments.append(segment_mask)
input_paddings.append([1] * len(input_ids))
lt = lambda x: torch.LongTensor(x).to(self.device)
inputs = self.INPUT_field.pad(inputs)
segments = self.SEGMENT_field.pad(input_segments)
input_masks = self.PADDING_field.pad(input_paddings)
return lt(inputs), lt(segments), lt(input_masks)
| [
"logging.debug",
"torch.LongTensor",
"transformers.get_constant_schedule_with_warmup",
"torch.cuda.device_count",
"transformers.AutoTokenizer.from_pretrained",
"logging.info",
"logging.error",
"json.dumps",
"numpy.concatenate",
"socket.gethostname",
"tables.Float32Atom",
"torchtext.data.Field"... | [((8610, 8625), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8623, 8625), False, 'import torch\n'), ((12921, 12936), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12934, 12936), False, 'import torch\n'), ((827, 963), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["config['tokenizer_type']"], {'cache_dir': "config['cache_dir']", 'local_files_only': 'force_local', 'use_fast': '(True)'}), "(config['tokenizer_type'], cache_dir=config[\n 'cache_dir'], local_files_only=force_local, use_fast=True)\n", (856, 963), False, 'from transformers import AutoTokenizer, AdamW\n'), ((1086, 1192), 'torchtext.data.Field', 'data.Field', ([], {'use_vocab': '(False)', 'batch_first': '(True)', 'sequential': '(True)', 'pad_token': 'self.tokenizer.pad_token_id'}), '(use_vocab=False, batch_first=True, sequential=True, pad_token=\n self.tokenizer.pad_token_id)\n', (1096, 1192), False, 'from torchtext import data\n'), ((1252, 1327), 'torchtext.data.Field', 'data.Field', ([], {'use_vocab': '(False)', 'batch_first': '(True)', 'sequential': '(True)', 'pad_token': '(0)'}), '(use_vocab=False, batch_first=True, sequential=True, pad_token=0)\n', (1262, 1327), False, 'from torchtext import data\n'), ((1489, 1527), 'logging.info', 'logging.info', (['"""Creating/Loading model"""'], {}), "('Creating/Loading model')\n", (1501, 1527), False, 'import logging\n'), ((2373, 2384), 'time.time', 'time.time', ([], {}), '()\n', (2382, 2384), False, 'import time\n'), ((3719, 3769), 'logging.info', 'logging.info', (['f"""Obtaining results on test data..."""'], {}), "(f'Obtaining results on test data...')\n", (3731, 3769), False, 'import logging\n'), ((3845, 3884), 'logging.info', 'logging.info', (['f"""Test loss: {test_loss}"""'], {}), "(f'Test loss: {test_loss}')\n", (3857, 3884), False, 'import logging\n'), ((3893, 3940), 'logging.info', 'logging.info', (['f"""Test accuracy: {test_accuracy}"""'], {}), "(f'Test accuracy: {test_accuracy}')\n", (3905, 3940), False, 'import logging\n'), ((5091, 5208), 'torchtext.data.Iterator', 'Iterator', (['test'], {'batch_size': "self.config['validation_batch_size']", 'repeat': '(False)', 'shuffle': '(False)', 'device': 'self.device'}), "(test, batch_size=self.config['validation_batch_size'], repeat=\n False, shuffle=False, device=self.device)\n", (5099, 5208), False, 'from torchtext.data import Iterator, Batch\n'), ((9787, 9831), 'logging.info', 'logging.info', (['f"""Validation loss: {val_loss}"""'], {}), "(f'Validation loss: {val_loss}')\n", (9799, 9831), False, 'import logging\n'), ((9840, 9877), 'logging.info', 'logging.info', (['f"""Accuracy: {accuracy}"""'], {}), "(f'Accuracy: {accuracy}')\n", (9852, 9877), False, 'import logging\n'), ((10723, 10791), 'logging.debug', 'logging.debug', (['f"""Model structure:\n{param_sizes}\n{param_shapes}\n"""'], {}), '(f"""Model structure:\n{param_sizes}\n{param_shapes}\n""")\n', (10736, 10791), False, 'import logging\n'), ((12386, 12428), 'torch.load', 'torch.load', (['path'], {'map_location': 'self.device'}), '(path, map_location=self.device)\n', (12396, 12428), False, 'import torch\n'), ((13189, 13304), 'torchtext.data.Iterator', 'Iterator', (['test'], {'batch_size': "self.config['inference_batch_size']", 'repeat': '(False)', 'shuffle': '(False)', 'device': 'self.device'}), "(test, batch_size=self.config['inference_batch_size'], repeat=False,\n shuffle=False, device=self.device)\n", (13197, 13304), False, 'from torchtext.data import Iterator, Batch\n'), ((13531, 13542), 'time.time', 'time.time', ([], {}), '()\n', (13540, 13542), False, 'import time\n'), ((13594, 13629), 'tables.open_file', 'tables.open_file', (['outfile'], {'mode': '"""w"""'}), "(outfile, mode='w')\n", (13610, 13629), False, 'import tables\n'), ((1370, 1419), 'json.dumps', 'json.dumps', (['self.config'], {'indent': '(4)', 'sort_keys': '(True)'}), '(self.config, indent=4, sort_keys=True)\n', (1380, 1419), False, 'import json\n'), ((4589, 4707), 'torchtext.data.Iterator', 'Iterator', (['train'], {'shuffle': '(True)', 'batch_size': "self.config['batch_size']", 'train': '(True)', 'repeat': '(False)', 'device': 'self.device'}), "(train, shuffle=True, batch_size=self.config['batch_size'], train=\n True, repeat=False, device=self.device)\n", (4597, 4707), False, 'from torchtext.data import Iterator, Batch\n'), ((4863, 4978), 'torchtext.data.Iterator', 'Iterator', (['val'], {'batch_size': "self.config['validation_batch_size']", 'repeat': '(False)', 'shuffle': '(False)', 'device': 'self.device'}), "(val, batch_size=self.config['validation_batch_size'], repeat=False,\n shuffle=False, device=self.device)\n", (4871, 4978), False, 'from torchtext.data import Iterator, Batch\n'), ((7128, 7220), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['scores', 'targets'], {'pos_weight': 'WEIGHT', 'reduction': '"""mean"""'}), "(scores, targets, pos_weight=WEIGHT,\n reduction='mean')\n", (7162, 7220), True, 'import torch.nn.functional as F\n'), ((9191, 9260), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['scores', 'targets'], {'reduction': '"""none"""'}), "(scores, targets, reduction='none')\n", (9225, 9260), True, 'import torch.nn.functional as F\n'), ((10216, 10238), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (10231, 10238), False, 'from torch import nn\n'), ((10251, 10287), 'logging.info', 'logging.info', (['"""DataParallel active!"""'], {}), "('DataParallel active!')\n", (10263, 10287), False, 'import logging\n'), ((10300, 10353), 'logging.info', 'logging.info', (['f"""Using device ids: {model.device_ids}"""'], {}), "(f'Using device ids: {model.device_ids}')\n", (10312, 10353), False, 'import logging\n'), ((11482, 11640), 'transformers.get_linear_schedule_with_warmup', 'transformers.get_linear_schedule_with_warmup', ([], {'optimizer': 'optimizer', 'num_warmup_steps': 'warmup_steps', 'num_training_steps': 'total_steps', 'last_epoch': 'lastEpoch'}), '(optimizer=optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=total_steps,\n last_epoch=lastEpoch)\n', (11526, 11640), False, 'import transformers\n'), ((13043, 13092), 'json.dumps', 'json.dumps', (['self.config'], {'indent': '(4)', 'sort_keys': '(True)'}), '(self.config, indent=4, sort_keys=True)\n', (13053, 13092), False, 'import json\n'), ((13662, 13682), 'tables.Float32Atom', 'tables.Float32Atom', ([], {}), '()\n', (13680, 13682), False, 'import tables\n'), ((13781, 13848), 'tqdm.tqdm', 'tqdm', (['test_iter'], {'total': '(total_infile_len // test_iter.batch_size + 1)'}), '(test_iter, total=total_infile_len // test_iter.batch_size + 1)\n', (13785, 13848), False, 'from tqdm import tqdm\n'), ((5953, 5979), 'torch.save', 'torch.save', (['model', 'model_p'], {}), '(model, model_p)\n', (5963, 5979), False, 'import torch\n'), ((6624, 6672), 'torch.Tensor', 'torch.Tensor', (["[1.0 / self.config['x-negatives']]"], {}), "([1.0 / self.config['x-negatives']])\n", (6636, 6672), False, 'import torch\n'), ((10165, 10190), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10188, 10190), False, 'import torch\n'), ((11773, 11947), 'transformers.get_cosine_schedule_with_warmup', 'transformers.get_cosine_schedule_with_warmup', ([], {'optimizer': 'optimizer', 'num_warmup_steps': 'warmup_steps', 'num_training_steps': 'total_steps', 'num_cycles': '(0.5)', 'last_epoch': 'lastEpoch'}), '(optimizer=optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=total_steps,\n num_cycles=0.5, last_epoch=lastEpoch)\n', (11817, 11947), False, 'import transformers\n'), ((14286, 14320), 'numpy.concatenate', 'np.concatenate', (['(scores, probs)', '(1)'], {}), '((scores, probs), 1)\n', (14300, 14320), True, 'import numpy as np\n'), ((14402, 14425), 'logging.info', 'logging.info', (["('-' * 120)"], {}), "('-' * 120)\n", (14414, 14425), False, 'import logging\n'), ((14438, 14479), 'logging.info', 'logging.info', (['"""Exit from training early."""'], {}), "('Exit from training early.')\n", (14450, 14479), False, 'import logging\n'), ((14528, 14545), 'logging.error', 'logging.error', (['be'], {}), '(be)\n', (14541, 14545), False, 'import logging\n'), ((2582, 2609), 'logging.info', 'logging.info', (['f"""Epoch {it}"""'], {}), "(f'Epoch {it}')\n", (2594, 2609), False, 'import logging\n'), ((3074, 3097), 'logging.info', 'logging.info', (["('-' * 120)"], {}), "('-' * 120)\n", (3086, 3097), False, 'import logging\n'), ((3114, 3155), 'logging.info', 'logging.info', (['"""Exit from training early."""'], {}), "('Exit from training early.')\n", (3126, 3155), False, 'import logging\n'), ((3212, 3229), 'logging.error', 'logging.error', (['be'], {}), '(be)\n', (3225, 3229), False, 'import logging\n'), ((3431, 3486), 'logging.info', 'logging.info', (['f"""Loading best model {self.best_model_p}"""'], {}), "(f'Loading best model {self.best_model_p}')\n", (3443, 3486), False, 'import logging\n'), ((3515, 3570), 'torch.load', 'torch.load', (['self.best_model_p'], {'map_location': 'self.device'}), '(self.best_model_p, map_location=self.device)\n', (3525, 3570), False, 'import torch\n'), ((8366, 8402), 'logging.info', 'logging.info', (['"""Training validation:"""'], {}), "('Training validation:')\n", (8378, 8402), False, 'import logging\n'), ((8423, 8456), 'logging.info', 'logging.info', (['f"""loss: {val_loss}"""'], {}), "(f'loss: {val_loss}')\n", (8435, 8456), False, 'import logging\n'), ((8477, 8514), 'logging.info', 'logging.info', (['f"""accuracy: {accuracy}"""'], {}), "(f'accuracy: {accuracy}')\n", (8489, 8514), False, 'import logging\n'), ((12098, 12222), 'transformers.get_constant_schedule_with_warmup', 'transformers.get_constant_schedule_with_warmup', ([], {'optimizer': 'optimizer', 'num_warmup_steps': 'warmup_steps', 'last_epoch': 'lastEpoch'}), '(optimizer=optimizer,\n num_warmup_steps=warmup_steps, last_epoch=lastEpoch)\n', (12144, 12222), False, 'import transformers\n'), ((15899, 15918), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (15915, 15918), False, 'import torch\n'), ((5867, 5887), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (5885, 5887), False, 'import socket\n'), ((14628, 14639), 'time.time', 'time.time', ([], {}), '()\n', (14637, 14639), False, 'import time\n'), ((3324, 3335), 'time.time', 'time.time', ([], {}), '()\n', (3333, 3335), False, 'import time\n'), ((14158, 14179), 'torch.sigmoid', 'torch.sigmoid', (['scores'], {}), '(scores)\n', (14171, 14179), False, 'import torch\n')] |
import os
import sys
import numpy as np
from joblib import Parallel, delayed
import joblib
import argparse
import importlib
from itertools import product
import collections
from copy import deepcopy
from mcpy.utils import filesafe
from mcpy import plotting
def _get(opts, key, default):
return opts[key] if (key in opts) else default
def _check_valid_config(config):
assert 'dgps' in config, "config dict must contain dgps"
assert 'dgp_opts' in config, "config dict must contain dgp_opts"
assert 'method_opts' in config, "config dict must contain method_opts"
assert 'mc_opts' in config, "config dict must contain mc_opts"
assert 'metrics' in config, "config dict must contain metrics"
assert 'methods' in config, "config dict must contain methods"
assert 'plots' in config, "config dict must contain plots"
assert 'target_dir' in config, "config must contain target_dir"
assert 'reload_results' in config, "config must contain reload_results"
assert 'n_experiments' in config['mc_opts'], "config[mc_opts] must contain n_experiments"
assert 'seed' in config['mc_opts'], "config[mc_opts] must contain seed"
def _get(opts, key, default):
return opts[key] if (key in opts) else default
class MonteCarlo:
def __init__(self, config):
self.config = config
_check_valid_config(self.config)
config['param_str'] = '_'.join(
['{}_{}'.format(filesafe(k), v) for k, v in self.config['mc_opts'].items()])
config['param_str'] += '_' + '_'.join(
['{}_{}'.format(filesafe(k), v) for k, v in self.config['dgp_opts'].items()])
config['param_str'] += '_' + '_'.join(
['{}_{}'.format(filesafe(k), v) for k, v in self.config['method_opts'].items()])
return
def experiment(self, exp_id):
''' Runs an experiment on a single randomly generated instance and sample and returns
the parameter estimates for each method and the evaluated metrics for each method
'''
np.random.seed(exp_id)
param_estimates = {}
true_params = {}
for dgp_name, dgp_fn in self.config['dgps'].items():
data, true_param = dgp_fn(self.config['dgp_opts'])
true_params[dgp_name] = true_param
param_estimates[dgp_name] = {}
for method_name, method in self.config['methods'].items():
param_estimates[dgp_name][method_name] = method(
data, self.config['method_opts'])
return param_estimates, true_params
def run(self):
''' Runs multiple experiments in parallel on randomly generated instances and samples and returns
the parameter estimates for each method and the evaluated metrics for each method across all
experiments
'''
random_seed = self.config['mc_opts']['seed']
if not os.path.exists(self.config['target_dir']):
os.makedirs(self.config['target_dir'])
results_file = os.path.join(
self.config['target_dir'], 'results_{}.jbl'.format(self.config['param_str']))
if self.config['reload_results'] and os.path.exists(results_file):
results = joblib.load(results_file)
else:
results = Parallel(n_jobs=_get(self.config['mc_opts'], 'n_jobs', -1), verbose=1)(
delayed(self.experiment)(random_seed + exp_id)
for exp_id in range(self.config['mc_opts']['n_experiments']))
joblib.dump(results, results_file)
param_estimates = {}
metric_results = {}
for dgp_name in self.config['dgps'].keys():
param_estimates[dgp_name] = {}
metric_results[dgp_name] = {}
for method_name in self.config['methods'].keys():
param_estimates[dgp_name][method_name] = np.array(
[results[i][0][dgp_name][method_name] for i in range(self.config['mc_opts']['n_experiments'])])
metric_results[dgp_name][method_name] = {}
for metric_name, metric_fn in self.config['metrics'].items():
metric_results[dgp_name][method_name][metric_name] = np.array([metric_fn(results[i][0][dgp_name][method_name], results[i][1][dgp_name])
for i in range(self.config['mc_opts']['n_experiments'])])
for plot_name, plot_fn in self.config['plots'].items():
if isinstance(plot_fn, dict):
plotting.instance_plot(
plot_name, param_estimates, metric_results, self.config, plot_fn)
else:
plot_fn(param_estimates, metric_results, self.config)
return param_estimates, metric_results
class MonteCarloSweep:
def __init__(self, config):
self.config = config
_check_valid_config(self.config)
config['param_str'] = '_'.join(['{}_{}'.format(filesafe(
k), self._stringify_param(v)) for k, v in self.config['mc_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(
k), self._stringify_param(v)) for k, v in self.config['dgp_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(
k), self._stringify_param(v)) for k, v in self.config['method_opts'].items()])
return
def _stringify_param(self, param):
if hasattr(param, "__len__"):
return '{}_to_{}'.format(np.min(param), np.max(param))
else:
return param
def run(self):
dgp_sweep_params = []
dgp_sweep_param_vals = []
for dgp_key, dgp_val in self.config['dgp_opts'].items():
if hasattr(dgp_val, "__len__"):
dgp_sweep_params.append(dgp_key)
dgp_sweep_param_vals.append(dgp_val)
n_sweeps = len(list(product(*dgp_sweep_param_vals)))
if 'cluster_opts' in self.config:
n_nodes = _get(self.config['cluster_opts'], 'n_nodes', 1)
node_id = _get(self.config['cluster_opts'], 'node_id', 0)
else:
n_nodes = 1
node_id = 0
start_sweep, end_sweep = 0, 0
if node_id < n_nodes - 1:
node_splits = np.array_split(np.arange(n_sweeps), n_nodes - 1)
start_sweep, end_sweep = node_splits[node_id][0], node_splits[node_id][-1]
sweep_keys = []
sweep_params = []
sweep_metrics = []
inst_config = deepcopy(self.config)
# This is the node that loads results and plots sweep plots
if (n_nodes > 1) and (node_id == n_nodes - 1):
inst_config['reload_results'] = True
inst_config['plots'] = {}
for it, vec in enumerate(product(*dgp_sweep_param_vals)):
if (node_id == n_nodes - 1) or ((it >= start_sweep) and (it <= end_sweep)):
setting = list(zip(dgp_sweep_params, vec))
for k, v in setting:
inst_config['dgp_opts'][k] = v
params, metrics = MonteCarlo(inst_config).run()
sweep_keys.append(setting)
sweep_params.append(params)
sweep_metrics.append(metrics)
if node_id == n_nodes - 1:
for plot_key, plot_fn in self.config['sweep_plots'].items():
if isinstance(plot_fn, dict):
plotting.sweep_plot(
plot_key, sweep_keys, sweep_params, sweep_metrics, self.config, plot_fn)
else:
plot_fn(plot_key, sweep_keys, sweep_params,
sweep_metrics, self.config)
return sweep_keys, sweep_params, sweep_metrics
def monte_carlo_main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--config', type=str, help='config file')
args = parser.parse_args(sys.argv[1:])
config = importlib.import_module(args.config)
MonteCarlo(config.CONFIG).run()
if __name__ == "__main__":
monte_carlo_main()
| [
"os.path.exists",
"mcpy.utils.filesafe",
"mcpy.plotting.sweep_plot",
"importlib.import_module",
"argparse.ArgumentParser",
"os.makedirs",
"joblib.load",
"itertools.product",
"mcpy.plotting.instance_plot",
"numpy.min",
"numpy.max",
"numpy.random.seed",
"copy.deepcopy",
"joblib.delayed",
"... | [((7760, 7821), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (7783, 7821), False, 'import argparse\n'), ((7945, 7981), 'importlib.import_module', 'importlib.import_module', (['args.config'], {}), '(args.config)\n', (7968, 7981), False, 'import importlib\n'), ((2024, 2046), 'numpy.random.seed', 'np.random.seed', (['exp_id'], {}), '(exp_id)\n', (2038, 2046), True, 'import numpy as np\n'), ((6500, 6521), 'copy.deepcopy', 'deepcopy', (['self.config'], {}), '(self.config)\n', (6508, 6521), False, 'from copy import deepcopy\n'), ((2879, 2920), 'os.path.exists', 'os.path.exists', (["self.config['target_dir']"], {}), "(self.config['target_dir'])\n", (2893, 2920), False, 'import os\n'), ((2934, 2972), 'os.makedirs', 'os.makedirs', (["self.config['target_dir']"], {}), "(self.config['target_dir'])\n", (2945, 2972), False, 'import os\n'), ((3146, 3174), 'os.path.exists', 'os.path.exists', (['results_file'], {}), '(results_file)\n', (3160, 3174), False, 'import os\n'), ((3198, 3223), 'joblib.load', 'joblib.load', (['results_file'], {}), '(results_file)\n', (3209, 3223), False, 'import joblib\n'), ((3485, 3519), 'joblib.dump', 'joblib.dump', (['results', 'results_file'], {}), '(results, results_file)\n', (3496, 3519), False, 'import joblib\n'), ((6765, 6795), 'itertools.product', 'product', (['*dgp_sweep_param_vals'], {}), '(*dgp_sweep_param_vals)\n', (6772, 6795), False, 'from itertools import product\n'), ((4517, 4610), 'mcpy.plotting.instance_plot', 'plotting.instance_plot', (['plot_name', 'param_estimates', 'metric_results', 'self.config', 'plot_fn'], {}), '(plot_name, param_estimates, metric_results, self.\n config, plot_fn)\n', (4539, 4610), False, 'from mcpy import plotting\n'), ((5496, 5509), 'numpy.min', 'np.min', (['param'], {}), '(param)\n', (5502, 5509), True, 'import numpy as np\n'), ((5511, 5524), 'numpy.max', 'np.max', (['param'], {}), '(param)\n', (5517, 5524), True, 'import numpy as np\n'), ((5889, 5919), 'itertools.product', 'product', (['*dgp_sweep_param_vals'], {}), '(*dgp_sweep_param_vals)\n', (5896, 5919), False, 'from itertools import product\n'), ((6279, 6298), 'numpy.arange', 'np.arange', (['n_sweeps'], {}), '(n_sweeps)\n', (6288, 6298), True, 'import numpy as np\n'), ((1432, 1443), 'mcpy.utils.filesafe', 'filesafe', (['k'], {}), '(k)\n', (1440, 1443), False, 'from mcpy.utils import filesafe\n'), ((4946, 4957), 'mcpy.utils.filesafe', 'filesafe', (['k'], {}), '(k)\n', (4954, 4957), False, 'from mcpy.utils import filesafe\n'), ((7405, 7502), 'mcpy.plotting.sweep_plot', 'plotting.sweep_plot', (['plot_key', 'sweep_keys', 'sweep_params', 'sweep_metrics', 'self.config', 'plot_fn'], {}), '(plot_key, sweep_keys, sweep_params, sweep_metrics, self\n .config, plot_fn)\n', (7424, 7502), False, 'from mcpy import plotting\n'), ((1568, 1579), 'mcpy.utils.filesafe', 'filesafe', (['k'], {}), '(k)\n', (1576, 1579), False, 'from mcpy.utils import filesafe\n'), ((1705, 1716), 'mcpy.utils.filesafe', 'filesafe', (['k'], {}), '(k)\n', (1713, 1716), False, 'from mcpy.utils import filesafe\n'), ((3348, 3372), 'joblib.delayed', 'delayed', (['self.experiment'], {}), '(self.experiment)\n', (3355, 3372), False, 'from joblib import Parallel, delayed\n'), ((5105, 5116), 'mcpy.utils.filesafe', 'filesafe', (['k'], {}), '(k)\n', (5113, 5116), False, 'from mcpy.utils import filesafe\n'), ((5265, 5276), 'mcpy.utils.filesafe', 'filesafe', (['k'], {}), '(k)\n', (5273, 5276), False, 'from mcpy.utils import filesafe\n')] |
import pytest
import pyvibdmc as pv
import os
import numpy as np
def test_imp_samp_derivs():
water_coord = np.array([[1.81005599, 0., 0.],
[-0.45344658, 1.75233806, 0.],
[0., 0., 0.]]) * 1.01
water_coord = np.tile(water_coord, (1000, 1, 1))
ohs = [[0,2],[1,2]]
hoh = [0,2,1]
crh = pv.ChainRuleHelper(water_coord,np)
dr_dxs = [crh.dr_dx(oh) for oh in ohs]
d2r_dx2s = [crh.d2r_dx2(oh) for oh in ohs]
d2r_dx2s = [crh.d2r_dx2(oh,dr_dx=dr_dxs[num]) for num, oh in enumerate(ohs)]
dcth_dx = crh.dcth_dx(hoh)
dcth_dx = crh.dcth_dx(hoh,
dr_da=dr_dxs[0],
dr_dc=dr_dxs[1])
dth_dx = crh.dth_dx(hoh)
dth_dx = crh.dth_dx(hoh,
dcth_dx=dcth_dx,
dr_da=dr_dxs[0],
dr_dc=dr_dxs[1])
d2th_dx2 = crh.d2th_dx2(hoh)
d2th_dx2 = crh.d2th_dx2(hoh,
dcth_dx=dcth_dx,
dr_da=dr_dxs[0],
dr_dc=dr_dxs[1],
d2r_da2=d2r_dx2s[0],
d2r_dc2=d2r_dx2s[0])
print('hi')
assert True
#
# def test_dpsi_dx():
# sim_ex_dir = "imp_samp_results"
# # initialize potential
# potDir = os.path.join(os.path.dirname(__file__), '../sample_potentials/FortPots/Partridge_Schwenke_H2O/')
# pyFile = 'h2o_potential.py'
# potFunc = 'water_pot'
# harm_pot = pv.Potential(potential_function=potFunc,
# python_file=pyFile,
# potential_directory=potDir,
# num_cores=8)
#
# water_coord = np.array([[1.81005599, 0., 0.],
# [-0.45344658, 1.75233806, 0.],
# [0., 0., 0.]]) * 1.01
# water_coord = np.expand_dims(water_coord, 0)
# start_cds = np.tile(water_coord, (2000, 1, 1))
# impo = pv.ImpSampManager(trial_function='trial_wavefunction',
# trial_directory=potDir,
# python_file='h2o_trial.py',
# pot_manager=harm_pot,
# deriv_function='dpsi_dx')
#
# myDMC = pv.DMC_Sim(sim_name="water_impsamp_test_ana",
# output_folder=sim_ex_dir,
# weighting='discrete',
# num_walkers=2000,
# num_timesteps=200,
# equil_steps=5,
# chkpt_every=10,
# wfn_every=10,
# desc_wt_steps=5,
# atoms=["H", "H", "O"],
# delta_t=1,
# potential=harm_pot,
# imp_samp=impo,
# log_every=1,
# start_structures=start_cds,
# )
# myDMC.run()
#
# def test_hex():
# sim_ex_dir = "imp_samp_results"
# # initialize potential
# potDir = '/home/netid.washington.edu/rjdiri/Documents/Potentials/legacy_mbpol'
# pyFile = 'call_mbpol.py'
# potFunc = 'call_hexamer'
# harm_pot = pv.Potential(potential_function=potFunc,
# python_file=pyFile,
# potential_directory=potDir,
# num_cores=8)
#
# hex = np.array([[0.80559297, 1.82637417, 0.19044583],
# [1.64546268, 1.33062728, 0.20230004],
# [1.03131975, 2.74531261, 0.3303837],
# [-0.86971419, -0.05280485, 1.64663647],
# [-0.40947453, 0.75209702, 1.37618396],
# [-1.70683682, -0.02424652, 1.15831962],
# [0.65167739, -1.73597316, 0.2335045],
# [0.05821864, -1.2362209, 0.84210027],
# [0.569203, -2.6591634, 0.4706903],
# [-0.51396268, 0.08861126, -1.76674358],
# [-0.09074241, 0.82334616, -1.30525568],
# [-0.09916254, -0.6895166, -1.37517223],
# [2.81742948, -0.01780752, 0.18363679],
# [2.20422291, -0.77223806, 0.20893524],
# [3.38891525, -0.17263024, -0.5686021],
# [-2.86669414, -0.14282213, -0.31653989],
# [-2.17356321, -0.01889467, -0.98894102],
# [-3.61843908, 0.36974668, -0.61083718]])
# hex = pv.Constants.convert(hex, 'angstroms', to_AU=True)
# water_coord = np.expand_dims(hex, 0)
# start_cds = np.tile(water_coord, (20000, 1, 1))
# impo = pv.ImpSampManager(trial_function='trial_wavefunction',
# trial_directory=potDir,
# python_file='call_trial.py',
# pot_manager=harm_pot,
# deriv_function='dpsi_dx')
#
# myDMC = pv.DMC_Sim(sim_name="hex_impsamp_test_ana",
# output_folder=sim_ex_dir,
# weighting='discrete',
# num_walkers=20000,
# num_timesteps=100,
# equil_steps=5,
# chkpt_every=10,
# wfn_every=10,
# desc_wt_steps=5,
# atoms=["O", "H", "H"]*6,
# delta_t=1,
# potential=harm_pot,
# imp_samp=impo,
# log_every=1,
# start_structures=start_cds,
# )
# myDMC.run()
| [
"numpy.tile",
"numpy.array",
"pyvibdmc.ChainRuleHelper"
] | [((272, 306), 'numpy.tile', 'np.tile', (['water_coord', '(1000, 1, 1)'], {}), '(water_coord, (1000, 1, 1))\n', (279, 306), True, 'import numpy as np\n'), ((360, 395), 'pyvibdmc.ChainRuleHelper', 'pv.ChainRuleHelper', (['water_coord', 'np'], {}), '(water_coord, np)\n', (378, 395), True, 'import pyvibdmc as pv\n'), ((113, 200), 'numpy.array', 'np.array', (['[[1.81005599, 0.0, 0.0], [-0.45344658, 1.75233806, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.81005599, 0.0, 0.0], [-0.45344658, 1.75233806, 0.0], [0.0, 0.0,\n 0.0]])\n', (121, 200), True, 'import numpy as np\n')] |
import numpy as np
from scipy.signal import kaiserord, lfilter, firwin, freqz, lfilter_zi
class fir_filter(object):
def __init__(self, fs, cutoff, ripple_db):
self.fs = fs # sample_rate
# The Nyquist rate of the signal.
nyq_rate = self.fs / 2.0
# The desired width of the transition from pass to stop,
# relative to the Nyquist rate. We'll design the filter
# with a 5 Hz transition width.
width = 5.0 / nyq_rate
# The desired attenuation in the stop band, in dB.
self.ripple_db = 10
# Compute the order and Kaiser parameter for the FIR filter.
N, beta = kaiserord(ripple_db, width)
#print('N = ', N)
# The cutoff frequency of the filter.
self.cutoff = cutoff
# Use firwin with a Kaiser window to create a lowpass FIR filter.
self.taps = firwin(N, self.cutoff / nyq_rate, window=('kaiser', beta))
self.N = N
self.x_buffer = []
self.index = 0
self.zi = lfilter_zi(self.taps, 1.0)
self.init_done = False
def apply(self, x):
self.x_buffer.append(x)
if not self.init_done:
if self.index < self.N-1:
filtered_x = x
self.index += 1
else:
self.init_done = True
self.index =0
if self.init_done:
y = lfilter(self.taps, 1.0, np.array(self.x_buffer))
filtered_x = y[-1]
#
#
# if self.index < self.N - 1:
# filtered_x = x
# else:
# self.init_done = True
# # Use lfilter to filter x with the FIR filter.
# #y = lfilter(self.taps, 1.0, np.array(self.x_buffer))
# y, _ = lfilter(self.taps, 1.0, np.array(self.x_buffer), zi=self.zi * self.x_buffer[0])
# filtered_x = y[-1]
#
# if len(self.x_buffer) > self.N:
# del self.x_buffer[0]
# self.index += 1
return filtered_x
def reset(self):
self.x_buffer = []
self.index = 0
self.init_done = False
from utils.Logger import IO
from matplotlib.pylab import plt
rlt = IO('RewardDuty/fitness5_param.pkl').read_pickle()
(rewards, commands, v_e) = rlt
x = rewards[:,0]
T = 1000
fir = fir_filter(100,10,10)
filter_x_list = []
for i in range(T):
state = x[i]
state_e = fir.apply(state)
filter_x_list.append(state_e)
filter_x_list = np.array(filter_x_list)
N = fir.N
t = np.arange(0,10,0.01)
plt.plot(t, x[:T], 'b--')
#plot(y)
plt.plot(t, filter_x_list, 'r')
#plt.plot(t[N-1:] , filter_x_list[N-1:], 'g', linewidth=4)
plt.show()
| [
"utils.Logger.IO",
"scipy.signal.firwin",
"scipy.signal.lfilter_zi",
"numpy.array",
"matplotlib.pylab.plt.plot",
"matplotlib.pylab.plt.show",
"numpy.arange",
"scipy.signal.kaiserord"
] | [((2478, 2501), 'numpy.array', 'np.array', (['filter_x_list'], {}), '(filter_x_list)\n', (2486, 2501), True, 'import numpy as np\n'), ((2516, 2538), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.01)'], {}), '(0, 10, 0.01)\n', (2525, 2538), True, 'import numpy as np\n'), ((2537, 2562), 'matplotlib.pylab.plt.plot', 'plt.plot', (['t', 'x[:T]', '"""b--"""'], {}), "(t, x[:T], 'b--')\n", (2545, 2562), False, 'from matplotlib.pylab import plt\n'), ((2572, 2603), 'matplotlib.pylab.plt.plot', 'plt.plot', (['t', 'filter_x_list', '"""r"""'], {}), "(t, filter_x_list, 'r')\n", (2580, 2603), False, 'from matplotlib.pylab import plt\n'), ((2663, 2673), 'matplotlib.pylab.plt.show', 'plt.show', ([], {}), '()\n', (2671, 2673), False, 'from matplotlib.pylab import plt\n'), ((651, 678), 'scipy.signal.kaiserord', 'kaiserord', (['ripple_db', 'width'], {}), '(ripple_db, width)\n', (660, 678), False, 'from scipy.signal import kaiserord, lfilter, firwin, freqz, lfilter_zi\n'), ((874, 932), 'scipy.signal.firwin', 'firwin', (['N', '(self.cutoff / nyq_rate)'], {'window': "('kaiser', beta)"}), "(N, self.cutoff / nyq_rate, window=('kaiser', beta))\n", (880, 932), False, 'from scipy.signal import kaiserord, lfilter, firwin, freqz, lfilter_zi\n'), ((1023, 1049), 'scipy.signal.lfilter_zi', 'lfilter_zi', (['self.taps', '(1.0)'], {}), '(self.taps, 1.0)\n', (1033, 1049), False, 'from scipy.signal import kaiserord, lfilter, firwin, freqz, lfilter_zi\n'), ((2199, 2234), 'utils.Logger.IO', 'IO', (['"""RewardDuty/fitness5_param.pkl"""'], {}), "('RewardDuty/fitness5_param.pkl')\n", (2201, 2234), False, 'from utils.Logger import IO\n'), ((1426, 1449), 'numpy.array', 'np.array', (['self.x_buffer'], {}), '(self.x_buffer)\n', (1434, 1449), True, 'import numpy as np\n')] |
"""
Application class for the model 'im_3_kW' of an induction machine
https://gitlab.onelab.info/doc/models/-/wikis/Electric-machines
"""
from typing import Dict
import os
import subprocess
from subprocess import PIPE
import tempfile
import time
import numpy as np
from pymgrit.core.application import Application
from pymgrit.induction_machine.vector_machine import VectorMachine
from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, \
set_resolution, get_preresolution
class InductionMachine(Application):
"""
Simulating an induction machine using the model 'im_3_kW' from
https://gitlab.onelab.info/doc/models/-/wikis/Electric-machines
"""
def __init__(self, grid: str, path_im3kw: str, path_getdp: str, imposed_speed: int = 1, nb_trelax: int = 2,
analysis_type: int = 1, nb_max_iter: int = 60, relaxation_factor: float = 0.5,
stop_criterion: float = 1e-6, nonlinear: bool = False, pwm: bool = False, pro_file: str = 'im_3kW.pro',
verbose: bool = False, *args, **kwargs):
"""
Constructor
:param nonlinear: Nonlinear or linear model
:param pwm: pwm or sin rhs
:param grid: mesh
:param path_im3kw: path to im_3kW data
:param path_getdp: path to getdp
:param imposed_speed: imposed speed
"""
super().__init__(*args, **kwargs)
self.pro_path = path_im3kw + pro_file
if not os.path.isfile(self.pro_path):
raise Exception('Found no valid .pro file in', self.pro_path)
self.getdp_path = path_getdp
if not os.path.isfile(self.getdp_path):
raise Exception('Getdp not found (http://getdp.info/)')
self.nl = int(nonlinear)
self.pwm = int(pwm)
self.mesh = grid + '.msh'
self.pre = grid + '.pre'
self.further_unknowns_front = 8
self.further_unknowns_back = 15
cor_to_un, un_to_cor, boundary = pre_file(path_im3kw + self.pre)
self.nx = len(un_to_cor) + self.further_unknowns_front + self.further_unknowns_back
self.gopt = {'Verbose': int(verbose), 'TimeStep': self.t[1] - self.t[0], 'Executable': self.getdp_path,
'PreProcessing': '#1'}
self.fopt = ['Flag_AnalysisType', analysis_type, 'Flag_NL', self.nl, 'Flag_ImposedSpeed', imposed_speed,
'Nb_max_iter', nb_max_iter, 'relaxation_factor', relaxation_factor, 'stop_criterion',
stop_criterion, 'NbTrelax', nb_trelax, 'Flag_PWM', self.pwm]
version_test = subprocess.run([self.gopt['Executable'], '--version'], stdout=PIPE, stderr=PIPE)
if version_test.returncode:
raise Exception('getdp not found.')
self.vector_template = VectorMachine(u_front_size=self.further_unknowns_front,
u_back_size=self.further_unknowns_back,
u_middle_size=len(un_to_cor))
self.vector_t_start = VectorMachine(u_front_size=self.further_unknowns_front,
u_back_size=self.further_unknowns_back,
u_middle_size=len(un_to_cor))
def step(self, u_start: VectorMachine, t_start: float, t_stop: float) -> VectorMachine:
"""
Time integration routine
:param u_start: approximate solution for the input time t_start
:param t_start: time associated with the input approximate solution u_start
:param t_stop: time to evolve the input approximate solution to
:return: approximate solution at input time t_stop
"""
soli = self.run_getdp(u_start=u_start.get_values(), t_start=t_start, t_stop=t_stop)
ret = VectorMachine(u_front_size=u_start.u_front_size,
u_back_size=u_start.u_back_size, u_middle_size=u_start.u_middle_size)
ret.set_values(values=soli['y'][-1], jl=soli['jl'][-1], ia=soli['ia'][-1], ib=soli['ib'][-1], ic=soli['ic'][-1],
ua=soli['ua'][-1], ub=soli['ub'][-1], uc=soli['uc'][-1], tr=soli['tr'][-1])
return ret
def run_getdp(self, u_start: np.ndarray, t_start: float, t_stop: float) -> Dict:
"""
Calls getdp
:param u_start: approximate solution for the input time t_start
:param t_start: time associated with the input approximate solution u_start
:param t_stop: time to evolve the input approximate solution to
:return: approximate solution at input time t_stop
"""
if np.max(np.isnan(u_start)):
raise Exception('Approximation contains nan')
fdir, file = os.path.split(self.pro_path)
fname, fext = os.path.splitext(file)
funargstr = ''
for i in range(0, len(self.fopt), 2):
if is_numeric(self.fopt[i + 1]):
funargstr = funargstr + ' -setnumber ' + str(self.fopt[i]) + ' ' + str(self.fopt[i + 1])
else:
funargstr = funargstr + ' -setstring ' + str(self.fopt[i]) + ' ' + self.fopt[i + 1]
funargstr = funargstr[1:]
mshfile = os.path.join(fdir, self.mesh)
with tempfile.TemporaryDirectory() as tmpdir:
tmp_name = os.path.join(tmpdir, fname)
resdir = os.path.join(tmpdir, 'res')
prefile = os.path.join(tmpdir, fname + '.pre')
resfile = os.path.join(tmpdir, fname + '.res')
joule_file = os.path.join(tmpdir, 'resJL.dat')
ua_file = os.path.join(tmpdir, 'resUa.dat')
ub_file = os.path.join(tmpdir, 'resUb.dat')
uc_file = os.path.join(tmpdir, 'resUc.dat')
ia_file = os.path.join(tmpdir, 'resIa.dat')
ib_file = os.path.join(tmpdir, 'resIb.dat')
ic_file = os.path.join(tmpdir, 'resIc.dat')
tr_file = os.path.join(tmpdir, 'resTr.dat')
# Preprocessing
exe_string = [self.gopt['Executable'],
self.pro_path,
'-pre "' + self.gopt['PreProcessing'] + '"',
'-msh', mshfile,
'-name', tmp_name,
'-res', resfile,
'-setnumber timemax', str(t_stop),
'-setnumber dtime', str(self.gopt['TimeStep']),
'-setstring ResDir', resdir,
funargstr]
if self.gopt['Verbose'] == 1:
status = subprocess.run(' '.join(exe_string), shell=True)
else:
status = subprocess.run(' '.join(exe_string), shell=True, stdout=PIPE, stderr=PIPE)
if status.returncode:
raise Exception('preprocessing failed')
num_dofs = np.size(u_start)
num_pres = get_preresolution(file=prefile)
if num_dofs != np.sum(num_pres):
raise Exception(
'u_start has wrong size: ' + str(num_dofs) + ' instead of ' + str(num_pres) + ': ' + str(prefile))
# Create initial data
set_resolution(file=resfile, t_start=t_start, u_start=u_start, num_dofs=num_dofs)
# Compute solution
exe_string = [self.gopt['Executable'],
self.pro_path,
'-restart',
'-msh', mshfile,
'-name', tmp_name,
'-res', resfile,
'-setnumber timemax', str(t_stop),
'-setnumber dtime', str(self.gopt['TimeStep']),
'-setstring ResDir', resdir,
funargstr]
if self.gopt['Verbose'] == 1:
status = subprocess.run(' '.join(exe_string), shell=True)
else:
status = subprocess.run(' '.join(exe_string), shell=True, stdout=PIPE, stderr=PIPE)
if status.returncode:
raise Exception('getdp solving failed')
# Read results
t, y = getdp_read_resolution(file=resfile, num_dofs=num_dofs)
jl = get_values_from(file=joule_file)
ia = get_values_from(file=ia_file)
ib = get_values_from(file=ib_file)
ic = get_values_from(file=ic_file)
ua = get_values_from(file=ua_file)
ub = get_values_from(file=ub_file)
uc = get_values_from(file=uc_file)
tr = get_values_from(file=tr_file)
return {'x': t, 'y': y, 'jl': jl, 'ia': ia, 'ib': ib, 'ic': ic, 'ua': ua, 'ub': ub, 'uc': uc, 'tr': tr}
| [
"pymgrit.induction_machine.helper.is_numeric",
"tempfile.TemporaryDirectory",
"pymgrit.induction_machine.helper.set_resolution",
"pymgrit.induction_machine.helper.get_values_from",
"numpy.size",
"subprocess.run",
"os.path.splitext",
"os.path.join",
"os.path.split",
"os.path.isfile",
"numpy.sum",... | [((2013, 2044), 'pymgrit.induction_machine.helper.pre_file', 'pre_file', (['(path_im3kw + self.pre)'], {}), '(path_im3kw + self.pre)\n', (2021, 2044), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((2621, 2706), 'subprocess.run', 'subprocess.run', (["[self.gopt['Executable'], '--version']"], {'stdout': 'PIPE', 'stderr': 'PIPE'}), "([self.gopt['Executable'], '--version'], stdout=PIPE, stderr=PIPE\n )\n", (2635, 2706), False, 'import subprocess\n'), ((3822, 3945), 'pymgrit.induction_machine.vector_machine.VectorMachine', 'VectorMachine', ([], {'u_front_size': 'u_start.u_front_size', 'u_back_size': 'u_start.u_back_size', 'u_middle_size': 'u_start.u_middle_size'}), '(u_front_size=u_start.u_front_size, u_back_size=u_start.\n u_back_size, u_middle_size=u_start.u_middle_size)\n', (3835, 3945), False, 'from pymgrit.induction_machine.vector_machine import VectorMachine\n'), ((4745, 4773), 'os.path.split', 'os.path.split', (['self.pro_path'], {}), '(self.pro_path)\n', (4758, 4773), False, 'import os\n'), ((4796, 4818), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (4812, 4818), False, 'import os\n'), ((5210, 5239), 'os.path.join', 'os.path.join', (['fdir', 'self.mesh'], {}), '(fdir, self.mesh)\n', (5222, 5239), False, 'import os\n'), ((1503, 1532), 'os.path.isfile', 'os.path.isfile', (['self.pro_path'], {}), '(self.pro_path)\n', (1517, 1532), False, 'import os\n'), ((1661, 1692), 'os.path.isfile', 'os.path.isfile', (['self.getdp_path'], {}), '(self.getdp_path)\n', (1675, 1692), False, 'import os\n'), ((4645, 4662), 'numpy.isnan', 'np.isnan', (['u_start'], {}), '(u_start)\n', (4653, 4662), True, 'import numpy as np\n'), ((4904, 4932), 'pymgrit.induction_machine.helper.is_numeric', 'is_numeric', (['self.fopt[i + 1]'], {}), '(self.fopt[i + 1])\n', (4914, 4932), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((5254, 5283), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5281, 5283), False, 'import tempfile\n'), ((5318, 5345), 'os.path.join', 'os.path.join', (['tmpdir', 'fname'], {}), '(tmpdir, fname)\n', (5330, 5345), False, 'import os\n'), ((5367, 5394), 'os.path.join', 'os.path.join', (['tmpdir', '"""res"""'], {}), "(tmpdir, 'res')\n", (5379, 5394), False, 'import os\n'), ((5417, 5453), 'os.path.join', 'os.path.join', (['tmpdir', "(fname + '.pre')"], {}), "(tmpdir, fname + '.pre')\n", (5429, 5453), False, 'import os\n'), ((5476, 5512), 'os.path.join', 'os.path.join', (['tmpdir', "(fname + '.res')"], {}), "(tmpdir, fname + '.res')\n", (5488, 5512), False, 'import os\n'), ((5538, 5571), 'os.path.join', 'os.path.join', (['tmpdir', '"""resJL.dat"""'], {}), "(tmpdir, 'resJL.dat')\n", (5550, 5571), False, 'import os\n'), ((5594, 5627), 'os.path.join', 'os.path.join', (['tmpdir', '"""resUa.dat"""'], {}), "(tmpdir, 'resUa.dat')\n", (5606, 5627), False, 'import os\n'), ((5650, 5683), 'os.path.join', 'os.path.join', (['tmpdir', '"""resUb.dat"""'], {}), "(tmpdir, 'resUb.dat')\n", (5662, 5683), False, 'import os\n'), ((5706, 5739), 'os.path.join', 'os.path.join', (['tmpdir', '"""resUc.dat"""'], {}), "(tmpdir, 'resUc.dat')\n", (5718, 5739), False, 'import os\n'), ((5762, 5795), 'os.path.join', 'os.path.join', (['tmpdir', '"""resIa.dat"""'], {}), "(tmpdir, 'resIa.dat')\n", (5774, 5795), False, 'import os\n'), ((5818, 5851), 'os.path.join', 'os.path.join', (['tmpdir', '"""resIb.dat"""'], {}), "(tmpdir, 'resIb.dat')\n", (5830, 5851), False, 'import os\n'), ((5874, 5907), 'os.path.join', 'os.path.join', (['tmpdir', '"""resIc.dat"""'], {}), "(tmpdir, 'resIc.dat')\n", (5886, 5907), False, 'import os\n'), ((5930, 5963), 'os.path.join', 'os.path.join', (['tmpdir', '"""resTr.dat"""'], {}), "(tmpdir, 'resTr.dat')\n", (5942, 5963), False, 'import os\n'), ((6864, 6880), 'numpy.size', 'np.size', (['u_start'], {}), '(u_start)\n', (6871, 6880), True, 'import numpy as np\n'), ((6904, 6935), 'pymgrit.induction_machine.helper.get_preresolution', 'get_preresolution', ([], {'file': 'prefile'}), '(file=prefile)\n', (6921, 6935), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((7181, 7267), 'pymgrit.induction_machine.helper.set_resolution', 'set_resolution', ([], {'file': 'resfile', 't_start': 't_start', 'u_start': 'u_start', 'num_dofs': 'num_dofs'}), '(file=resfile, t_start=t_start, u_start=u_start, num_dofs=\n num_dofs)\n', (7195, 7267), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8155, 8209), 'pymgrit.induction_machine.helper.getdp_read_resolution', 'getdp_read_resolution', ([], {'file': 'resfile', 'num_dofs': 'num_dofs'}), '(file=resfile, num_dofs=num_dofs)\n', (8176, 8209), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8227, 8259), 'pymgrit.induction_machine.helper.get_values_from', 'get_values_from', ([], {'file': 'joule_file'}), '(file=joule_file)\n', (8242, 8259), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8277, 8306), 'pymgrit.induction_machine.helper.get_values_from', 'get_values_from', ([], {'file': 'ia_file'}), '(file=ia_file)\n', (8292, 8306), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8324, 8353), 'pymgrit.induction_machine.helper.get_values_from', 'get_values_from', ([], {'file': 'ib_file'}), '(file=ib_file)\n', (8339, 8353), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8371, 8400), 'pymgrit.induction_machine.helper.get_values_from', 'get_values_from', ([], {'file': 'ic_file'}), '(file=ic_file)\n', (8386, 8400), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8418, 8447), 'pymgrit.induction_machine.helper.get_values_from', 'get_values_from', ([], {'file': 'ua_file'}), '(file=ua_file)\n', (8433, 8447), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8465, 8494), 'pymgrit.induction_machine.helper.get_values_from', 'get_values_from', ([], {'file': 'ub_file'}), '(file=ub_file)\n', (8480, 8494), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8512, 8541), 'pymgrit.induction_machine.helper.get_values_from', 'get_values_from', ([], {'file': 'uc_file'}), '(file=uc_file)\n', (8527, 8541), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((8559, 8588), 'pymgrit.induction_machine.helper.get_values_from', 'get_values_from', ([], {'file': 'tr_file'}), '(file=tr_file)\n', (8574, 8588), False, 'from pymgrit.induction_machine.helper import is_numeric, pre_file, get_values_from, getdp_read_resolution, set_resolution, get_preresolution\n'), ((6964, 6980), 'numpy.sum', 'np.sum', (['num_pres'], {}), '(num_pres)\n', (6970, 6980), True, 'import numpy as np\n')] |
"""
Reduced 3-body Problem testing script
====================================
Testing the reduced 3-body problem solvers with different numerical algorithms.
"""
import os
import time
from math import pi,cos,sin
import numpy as np
import matplotlib.pyplot as plt
from const import *
import reduced3body as r3b
try:
threads = int(os.environ["OMP_NUM_THREADS"])
except KeyError:
threads = 1
runtime = time.time()
# Precalculated initial Conditions
#demo = 'earth_orbit'
#demo = 'lunar_orbit'
#demo = 'hohmann'
#demo = '3_day_hohmann'
#demo = '1_day_hohmann'
#demo = 'reverse_hohmann'
demo = 'low_energy_short'
#demo = 'low_energy_long'
#demo = 'earth_to_L1'
# or Search for trajectories
#demo = 'search_hohmann'
#demo = 'search_low_energy'
#demo = 'search_low_energy_parts8'
#demo = 'search_refine'
n = 1000000
# Set coordinates
if demo == 'earth_orbit':
duration = (2.0*pi*leo_orbit/leo_orbit_vel)/(unit_time*day)
r = leo_orbit/unit_len
v = 0.99732*leo_orbit_vel/unit_vel
theta = 0
x = r*cos(theta)
y = r*sin(theta)
vx = -v*y/r
vy = v*x/r
pos = 0
ang = 0
burn = 0
x0 = earth_pos_x+x
y0 = y
px0 = vx-y0
py0 = vy+x0
elif demo == 'lunar_orbit':
duration = (2.0*pi*lunar_orbit/lunar_orbit_vel)/(unit_time*day)
r = lunar_orbit/unit_len
v = 0.99732*lunar_orbit_vel/unit_vel
theta = 0
x = r*cos(theta)
y = r*sin(theta)
vx = -v*y/r
vy = v*x/r
pos = 0
ang = 0
burn = 0
x0 = moon_pos_x+x
y0 = y
px0 = vx-y0
py0 = vy+x0
elif demo == 'hohmann':
#demo = 'search_refine'
# --------------------------------------------------------------------------
duration = 5/unit_time
pos = -2.086814820119193
ang = -0.000122173047640
burn = 3.111181716545691/unit_vel
x0 = -0.020532317163607
y0 = -0.014769797663479
px0 = 9.302400979050308
py0 = -5.289712560652044
# --------------------------------------------------------------------------
# dV(earth-escape) = 3.111182 km/s
# dV(moon-capture) = 0.800682 km/s
# dV(total) = 3.911863 km/s
# Flight-time = 4.300078 days
# --------------------------------------------------------------------------
elif demo == 'reverse_hohmann':
# --------------------------------------------------------------------------
duration = 4/unit_time
pos = -2.282942228154665
ang = 0.000000000000000
burn = -3.149483130653266/unit_vel
x0 = -0.023249912090507
y0 = -0.012853859046429
px0 = -8.098481905534163
py0 = 6.978997254692934
# --------------------------------------------------------------------------
# dV(earth-escape) = 3.149483 km/s
# dV(moon-capture) = 0.968488 km/s
# dV(total) = 4.117971 km/s
# Flight-time = 3.875497 days
# --------------------------------------------------------------------------
elif demo == 'low_energy_long':
# --------------------------------------------------------------------------
duration = 195/unit_time
pos = 3.794182930145708
ang = 0.023901745288554
burn = 3.090702702702703/unit_vel
x0 = -0.025645129237870
y0 = -0.010311570301966
px0 = 6.539303578815582
py0 = -8.449205705334165
# --------------------------------------------------------------------------
# dV(earth-escape) = 3.090703 km/s
# dV(moon-capture) = 0.704114 km/s
# dV(total) = 3.794816 km/s
# Flight-time = 194.275480 days
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
#demo = 'search_refine'
# duration = 195/unit_time
# pos = 3.794182930145708
# ang = 0.023901745288554
# burn = 3.090702702702703/unit_vel
# x0 = -0.025645129237870
# y0 = -0.010311570301966
# px0 = 6.539303578815583
# py0 = -8.449205705334164
# --------------------------------------------------------------------------
# dV(earth-escape) = 3.090703 km/s
# dV(moon-capture) = 0.704114 km/s
# dV(total) = 3.794817 km/s
# Flight-time = 194.275480 days
# --------------------------------------------------------------------------
elif demo == 'low_energy_short':
#demo = 'search_refine'
# --------------------------------------------------------------------------
duration = 41/unit_time
pos = -0.138042744751570
ang = -0.144259374836607
burn = 3.127288444444444/unit_vel
x0 = 0.004665728429046
y0 = -0.002336647636098
px0 = 1.904735175752430
py0 = 10.504985512873279
# --------------------------------------------------------------------------
# dV(earth-escape) = 3.127288 km/s
# dV(moon-capture) = 0.768534 km/s
# dV(total) = 3.895822 km/s
# Flight-time = 40.617871 days
# --------------------------------------------------------------------------
elif demo == '3_day_hohmann':
#demo = 'search_refine'
# --------------------------------------------------------------------------
duration = 3/unit_time
pos = -2.272183066647597
ang = -0.075821466029764
burn = 3.135519748743719/unit_vel
x0 = -0.023110975767437
y0 = -0.012972499765730
px0 = 8.032228991913522
py0 = -7.100537706154897
# --------------------------------------------------------------------------
# dV(earth-escape) = 3.135520 km/s
# dV(moon-capture) = 0.879826 km/s
# dV(total) = 4.015346 km/s
# Flight-time = 2.999939 days
# --------------------------------------------------------------------------
elif demo == '1_day_hohmann':
#demo = 'search_refine'
duration = 1/unit_time
pos = -2.277654673852600
ang = 0.047996554429844
burn = 3.810000000000000/unit_vel
x0 = -0.023181791813268
y0 = -0.012912351430812
px0 = 8.764829132987316
py0 = -7.263069305305378
# --------------------------------------------------------------------------
# dV(earth-escape) = 3.810000 km/s
# dV(moon-capture) = 3.319455 km/s
# dV(total) = 7.129455 km/s
# Flight-time = 0.997234 days
# --------------------------------------------------------------------------
elif demo == 'earth_to_L1':
demo = 'search_refine'
# --------------------------------------------------------------------------
duration = 191/unit_time
pos = 2.843432239707429
ang = 0.000000000000000
burn = 3.091851851851852/unit_vel
x0 = -0.028385246222264
y0 = 0.004988337832881
px0 = -3.136296304910217
py0 = -10.217405925499762
# --------------------------------------------------------------------------
# dV(earth-escape) = 3.091852 km/s
# dV(at L1) = 0.676226 km/s
# dV(total) = 3.768078 km/s
# Flight-time = 190.001881 days
# --------------------------------------------------------------------------
#################### FUNCTION CALLS ####################
if demo == 'search_hohmann':
tlist,xlist,ylist,pxlist,pylist,errlist,hlist = r3b.hohmann(threads,n)
elif demo == 'search_low_energy':
tlist,xlist,ylist,pxlist,pylist,errlist,hlist = r3b.low_energy(threads,n)
elif demo == 'search_low_energy_parts8':
tlist,xlist,ylist,pxlist,pylist,errlist,hlist = r3b.low_energy_parts8(threads,n)
elif demo == 'search_refine':
tlist,xlist,ylist,pxlist,pylist,errlist,hlist = r3b.refine(threads,n,duration,pos,ang,burn,x0,y0,px0,py0)
else:
tlist,xlist,ylist,pxlist,pylist,errlist,hlist = r3b.trajectory(n,duration,pos,ang,burn,x0,y0,px0,py0)
Hlist = pxlist**2/2 + pylist**2/2 + ylist*pxlist - xlist*pylist - (1-mu)/np.sqrt(np.power(mu+xlist,2)+np.power(ylist,2)) - mu/np.sqrt(np.power(1-mu-xlist,2)+np.power(ylist,2))
print("# Final position: %f %f" %(xlist[n-1],ylist[n-1]))
print("# Final impulse: %f %f" % (pxlist[n-1],pylist[n-1]))
print("# Final H: %f" % (Hlist[n-1]))
runtime = time.time()-runtime
print("# Total runtime = %3.2fs" % (runtime))
print("# --------------------------------------------------------------------------")
print("# --- Done with FUNCTION CALLS")
#exit()
#################### PLOTS: POSITION ####################
xlist1 = xlist[:n/2]
ylist1 = ylist[:n/2]
xlist2 = xlist[n/2:]
ylist2 = ylist[n/2:]
Xlist1 = xlist[:n/2]*np.cos(tlist[:n/2]) - ylist[:n/2]*np.sin(tlist[:n/2])
Ylist1 = xlist[:n/2]*np.sin(tlist[:n/2]) + ylist[:n/2]*np.cos(tlist[:n/2])
Xlist2 = xlist[n/2:]*np.cos(tlist[n/2:]) - ylist[n/2:]*np.sin(tlist[n/2:])
Ylist2 = xlist[n/2:]*np.sin(tlist[n/2:]) + ylist[n/2:]*np.cos(tlist[n/2:])
Xlist_earth = earth_pos_x*np.cos(tlist)
Ylist_earth = -earth_pos_x*np.sin(tlist)
Xlist_moon = moon_pos_x*np.cos(tlist)
Ylist_moon = moon_pos_x*np.sin(tlist)
# Rel. err
plt.figure()
plt.plot(tlist*unit_time, errlist)
plt.xlabel("time (days)")
plt.ylabel("step error")
plt.yscale('log')
# Step sizes
plt.figure()
plt.plot(tlist*unit_time, hlist)
plt.xlabel("time (days)")
plt.ylabel("step size")
plt.yscale('log')
# Total energy
havg = np.sum(Hlist)/n
hrelerr = (Hlist-havg)/havg
plt.figure()
plt.plot(tlist*unit_time, hrelerr)
plt.xlabel("time (days)")
plt.ylabel("Hamiltonian rel. err (arbitrary units)")
# Zoom earth
xlim = 0.02
ylim = 0.02
xmin = earth_pos_x-xlim
xmax = earth_pos_x+xlim
ymin = -ylim
ymax = ylim
plt.figure()
earth=plt.Circle((earth_pos_x,0),earth_radius/unit_len,color='blue')
earthorbit1=plt.Circle((earth_pos_x,0),(leo_orbit-orbit_range)/unit_len,color='g',fill=False)
earthorbit2=plt.Circle((earth_pos_x,0),(leo_orbit+orbit_range)/unit_len,color='g',fill=False)
plt.gcf().gca().add_artist(earth)
plt.gcf().gca().add_artist(earthorbit1)
plt.gcf().gca().add_artist(earthorbit2)
plt.plot(xlist1,ylist1,'r-')
plt.plot(xlist2,ylist2,'k-')
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel("x-position (arbitrary units)")
plt.ylabel("y-position (arbitrary units)")
# Zoom moon
xlim = 0.0055
ylim = 0.0055
xmin = moon_pos_x-xlim
xmax = moon_pos_x+xlim
ymin = -ylim
ymax = ylim
plt.figure()
moon=plt.Circle((moon_pos_x,0),moon_radius/unit_len,color='grey')
moonorbit1=plt.Circle((moon_pos_x,0),(lunar_orbit-orbit_range)/unit_len,color='g',fill=False)
moonorbit2=plt.Circle((moon_pos_x,0),(lunar_orbit+orbit_range)/unit_len,color='g',fill=False)
plt.gcf().gca().add_artist(moon)
plt.gcf().gca().add_artist(moonorbit1)
plt.gcf().gca().add_artist(moonorbit2)
plt.plot(xlist1,ylist1,'r-')
plt.plot(xlist2,ylist2,'k-')
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel("x-position (arbitrary units)")
plt.ylabel("y-position (arbitrary units)")
# View center of mass
xlim = 1.3
ylim = 1.3
xmin = -xlim
xmax = xlim
ymin = -ylim
ymax = ylim
# Position plot (X,Y)
plt.figure()
plt.plot(Xlist1,Ylist1,'r')
plt.plot(Xlist2,Ylist2,'k')
plt.plot(Xlist_earth, Ylist_earth, 'blue')
plt.plot(Xlist_moon, Ylist_moon, 'grey')
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel("x-position (arbitrary units)")
plt.ylabel("y-position (arbitrary units)")
# Position plot (x,y)
plt.figure()
plt.plot(xlist1,ylist1,'r-')
plt.plot(xlist2,ylist2,'k-')
earth=plt.Circle((earth_pos_x,0),earth_radius/unit_len,color='blue')
earthorbit1=plt.Circle((earth_pos_x,0),(leo_orbit-orbit_range)/unit_len,color='g',fill=False)
earthorbit2=plt.Circle((earth_pos_x,0),(leo_orbit+orbit_range)/unit_len,color='g',fill=False)
moon=plt.Circle((moon_pos_x,0),moon_radius/unit_len,color='grey')
moonorbit1=plt.Circle((moon_pos_x,0),(lunar_orbit-orbit_range)/unit_len,color='g',fill=False)
moonorbit2=plt.Circle((moon_pos_x,0),(lunar_orbit+orbit_range)/unit_len,color='g',fill=False)
plt.gcf().gca().add_artist(earth)
plt.gcf().gca().add_artist(earthorbit1)
plt.gcf().gca().add_artist(earthorbit2)
plt.gcf().gca().add_artist(moon)
plt.gcf().gca().add_artist(moonorbit1)
plt.gcf().gca().add_artist(moonorbit2)
plt.plot(L1_pos_x,0,'gx')
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel("x-position (arbitrary units)")
plt.ylabel("y-position (arbitrary units)")
#plt.savefig('fig/r3b/r3b_y(x)_euler_symplectic.pdf',bbox_inches='tight')
plt.show()
plt.close()
print("# --- Done with PLOTS")
# # #################### PLOTS: VELOCITY ####################
# plt.figure()
# plt.plot(tlist, omegalist_e)
# plt.xlabel("time (arbitrary units)")
# plt.ylabel("velocity (arbitrary units)")
# plt.savefig('fig/r3b/r3b_omega(t)_euler_explicit.pdf')
# # plt.show()
# plt.close()
# #################### PHASE-SPACE TRAJECTORY PLOTS ####################
# # Explicit Euler phase-space trajectory
# plt.figure()
# plt.plot(thetalist_e[:len(thetalist_e)/2], omegalist_e[:len(omegalist_e)/2], 'r')
# plt.plot(thetalist_e[len(thetalist_e)/2:], omegalist_e[len(omegalist_e)/2:], 'b')
# plt.xlabel("position (arbitrary units)")
# plt.ylabel("velocity (arbitrary units)")
# plt.savefig('fig/r3b/r3b_phase-space_euler_explicit.pdf',bbox_inches='tight')
# #plt.show()
# plt.close()
# # Implicit Euler phase-space trajectory
# plt.figure()
# plt.plot(thetalist_i[:len(thetalist_i)/2], omegalist_i[:len(omegalist_i)/2], 'r')
# plt.plot(thetalist_i[len(thetalist_i)/2:], omegalist_i[len(omegalist_i)/2:], 'b')
# plt.xlabel("position (arbitrary units)")
# plt.ylabel("velocity (arbitrary units)")
# plt.savefig('fig/r3b/r3b_phase-space_euler_implicit.pdf',bbox_inches='tight')
# #plt.show()
# plt.close()
# # Symplectic Euler phase-space trajectory
# plt.figure()
# plt.plot(thetalist[:len(thetalist)/2], omegalist[:len(omegalist)/2], 'r')
# plt.plot(thetalist[len(thetalist)/2:], omegalist[len(omegalist)/2:], 'b')
# plt.xlabel("position (arbitrary units)")
# plt.ylabel("velocity (arbitrary units)")
# plt.savefig('fig/r3b/r3b_phase-space_euler_symplectic.pdf',bbox_inches='tight')
# #plt.show()
# plt.close()
# print("--- Done with PHASE-SPACE TRAJETORY PLOTS")
| [
"matplotlib.pyplot.ylabel",
"math.cos",
"reduced3body.hohmann",
"reduced3body.low_energy_parts8",
"numpy.sin",
"reduced3body.trajectory",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"reduced3body.refine",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yscale"... | [((414, 425), 'time.time', 'time.time', ([], {}), '()\n', (423, 425), False, 'import time\n'), ((8816, 8828), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8826, 8828), True, 'import matplotlib.pyplot as plt\n'), ((8829, 8865), 'matplotlib.pyplot.plot', 'plt.plot', (['(tlist * unit_time)', 'errlist'], {}), '(tlist * unit_time, errlist)\n', (8837, 8865), True, 'import matplotlib.pyplot as plt\n'), ((8864, 8889), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (days)"""'], {}), "('time (days)')\n", (8874, 8889), True, 'import matplotlib.pyplot as plt\n'), ((8890, 8914), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""step error"""'], {}), "('step error')\n", (8900, 8914), True, 'import matplotlib.pyplot as plt\n'), ((8915, 8932), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (8925, 8932), True, 'import matplotlib.pyplot as plt\n'), ((8947, 8959), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8957, 8959), True, 'import matplotlib.pyplot as plt\n'), ((8960, 8994), 'matplotlib.pyplot.plot', 'plt.plot', (['(tlist * unit_time)', 'hlist'], {}), '(tlist * unit_time, hlist)\n', (8968, 8994), True, 'import matplotlib.pyplot as plt\n'), ((8993, 9018), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (days)"""'], {}), "('time (days)')\n", (9003, 9018), True, 'import matplotlib.pyplot as plt\n'), ((9019, 9042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""step size"""'], {}), "('step size')\n", (9029, 9042), True, 'import matplotlib.pyplot as plt\n'), ((9043, 9060), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (9053, 9060), True, 'import matplotlib.pyplot as plt\n'), ((9128, 9140), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9138, 9140), True, 'import matplotlib.pyplot as plt\n'), ((9141, 9177), 'matplotlib.pyplot.plot', 'plt.plot', (['(tlist * unit_time)', 'hrelerr'], {}), '(tlist * unit_time, hrelerr)\n', (9149, 9177), True, 'import matplotlib.pyplot as plt\n'), ((9176, 9201), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (days)"""'], {}), "('time (days)')\n", (9186, 9201), True, 'import matplotlib.pyplot as plt\n'), ((9202, 9254), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hamiltonian rel. err (arbitrary units)"""'], {}), "('Hamiltonian rel. err (arbitrary units)')\n", (9212, 9254), True, 'import matplotlib.pyplot as plt\n'), ((9366, 9378), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9376, 9378), True, 'import matplotlib.pyplot as plt\n'), ((9385, 9452), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(earth_pos_x, 0)', '(earth_radius / unit_len)'], {'color': '"""blue"""'}), "((earth_pos_x, 0), earth_radius / unit_len, color='blue')\n", (9395, 9452), True, 'import matplotlib.pyplot as plt\n'), ((9460, 9554), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(earth_pos_x, 0)', '((leo_orbit - orbit_range) / unit_len)'], {'color': '"""g"""', 'fill': '(False)'}), "((earth_pos_x, 0), (leo_orbit - orbit_range) / unit_len, color=\n 'g', fill=False)\n", (9470, 9554), True, 'import matplotlib.pyplot as plt\n'), ((9554, 9648), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(earth_pos_x, 0)', '((leo_orbit + orbit_range) / unit_len)'], {'color': '"""g"""', 'fill': '(False)'}), "((earth_pos_x, 0), (leo_orbit + orbit_range) / unit_len, color=\n 'g', fill=False)\n", (9564, 9648), True, 'import matplotlib.pyplot as plt\n'), ((9750, 9780), 'matplotlib.pyplot.plot', 'plt.plot', (['xlist1', 'ylist1', '"""r-"""'], {}), "(xlist1, ylist1, 'r-')\n", (9758, 9780), True, 'import matplotlib.pyplot as plt\n'), ((9779, 9809), 'matplotlib.pyplot.plot', 'plt.plot', (['xlist2', 'ylist2', '"""k-"""'], {}), "(xlist2, ylist2, 'k-')\n", (9787, 9809), True, 'import matplotlib.pyplot as plt\n'), ((9808, 9828), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (9816, 9828), True, 'import matplotlib.pyplot as plt\n'), ((9828, 9848), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (9836, 9848), True, 'import matplotlib.pyplot as plt\n'), ((9896, 9938), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-position (arbitrary units)"""'], {}), "('x-position (arbitrary units)')\n", (9906, 9938), True, 'import matplotlib.pyplot as plt\n'), ((9939, 9981), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-position (arbitrary units)"""'], {}), "('y-position (arbitrary units)')\n", (9949, 9981), True, 'import matplotlib.pyplot as plt\n'), ((10094, 10106), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10104, 10106), True, 'import matplotlib.pyplot as plt\n'), ((10112, 10177), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(moon_pos_x, 0)', '(moon_radius / unit_len)'], {'color': '"""grey"""'}), "((moon_pos_x, 0), moon_radius / unit_len, color='grey')\n", (10122, 10177), True, 'import matplotlib.pyplot as plt\n'), ((10184, 10279), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(moon_pos_x, 0)', '((lunar_orbit - orbit_range) / unit_len)'], {'color': '"""g"""', 'fill': '(False)'}), "((moon_pos_x, 0), (lunar_orbit - orbit_range) / unit_len, color=\n 'g', fill=False)\n", (10194, 10279), True, 'import matplotlib.pyplot as plt\n'), ((10278, 10373), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(moon_pos_x, 0)', '((lunar_orbit + orbit_range) / unit_len)'], {'color': '"""g"""', 'fill': '(False)'}), "((moon_pos_x, 0), (lunar_orbit + orbit_range) / unit_len, color=\n 'g', fill=False)\n", (10288, 10373), True, 'import matplotlib.pyplot as plt\n'), ((10472, 10502), 'matplotlib.pyplot.plot', 'plt.plot', (['xlist1', 'ylist1', '"""r-"""'], {}), "(xlist1, ylist1, 'r-')\n", (10480, 10502), True, 'import matplotlib.pyplot as plt\n'), ((10501, 10531), 'matplotlib.pyplot.plot', 'plt.plot', (['xlist2', 'ylist2', '"""k-"""'], {}), "(xlist2, ylist2, 'k-')\n", (10509, 10531), True, 'import matplotlib.pyplot as plt\n'), ((10530, 10550), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (10538, 10550), True, 'import matplotlib.pyplot as plt\n'), ((10550, 10570), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (10558, 10570), True, 'import matplotlib.pyplot as plt\n'), ((10618, 10660), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-position (arbitrary units)"""'], {}), "('x-position (arbitrary units)')\n", (10628, 10660), True, 'import matplotlib.pyplot as plt\n'), ((10661, 10703), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-position (arbitrary units)"""'], {}), "('y-position (arbitrary units)')\n", (10671, 10703), True, 'import matplotlib.pyplot as plt\n'), ((10822, 10834), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10832, 10834), True, 'import matplotlib.pyplot as plt\n'), ((10835, 10864), 'matplotlib.pyplot.plot', 'plt.plot', (['Xlist1', 'Ylist1', '"""r"""'], {}), "(Xlist1, Ylist1, 'r')\n", (10843, 10864), True, 'import matplotlib.pyplot as plt\n'), ((10863, 10892), 'matplotlib.pyplot.plot', 'plt.plot', (['Xlist2', 'Ylist2', '"""k"""'], {}), "(Xlist2, Ylist2, 'k')\n", (10871, 10892), True, 'import matplotlib.pyplot as plt\n'), ((10891, 10933), 'matplotlib.pyplot.plot', 'plt.plot', (['Xlist_earth', 'Ylist_earth', '"""blue"""'], {}), "(Xlist_earth, Ylist_earth, 'blue')\n", (10899, 10933), True, 'import matplotlib.pyplot as plt\n'), ((10934, 10974), 'matplotlib.pyplot.plot', 'plt.plot', (['Xlist_moon', 'Ylist_moon', '"""grey"""'], {}), "(Xlist_moon, Ylist_moon, 'grey')\n", (10942, 10974), True, 'import matplotlib.pyplot as plt\n'), ((10975, 10995), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (10983, 10995), True, 'import matplotlib.pyplot as plt\n'), ((10995, 11015), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (11003, 11015), True, 'import matplotlib.pyplot as plt\n'), ((11063, 11105), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-position (arbitrary units)"""'], {}), "('x-position (arbitrary units)')\n", (11073, 11105), True, 'import matplotlib.pyplot as plt\n'), ((11106, 11148), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-position (arbitrary units)"""'], {}), "('y-position (arbitrary units)')\n", (11116, 11148), True, 'import matplotlib.pyplot as plt\n'), ((11172, 11184), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11182, 11184), True, 'import matplotlib.pyplot as plt\n'), ((11185, 11215), 'matplotlib.pyplot.plot', 'plt.plot', (['xlist1', 'ylist1', '"""r-"""'], {}), "(xlist1, ylist1, 'r-')\n", (11193, 11215), True, 'import matplotlib.pyplot as plt\n'), ((11214, 11244), 'matplotlib.pyplot.plot', 'plt.plot', (['xlist2', 'ylist2', '"""k-"""'], {}), "(xlist2, ylist2, 'k-')\n", (11222, 11244), True, 'import matplotlib.pyplot as plt\n'), ((11249, 11316), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(earth_pos_x, 0)', '(earth_radius / unit_len)'], {'color': '"""blue"""'}), "((earth_pos_x, 0), earth_radius / unit_len, color='blue')\n", (11259, 11316), True, 'import matplotlib.pyplot as plt\n'), ((11324, 11418), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(earth_pos_x, 0)', '((leo_orbit - orbit_range) / unit_len)'], {'color': '"""g"""', 'fill': '(False)'}), "((earth_pos_x, 0), (leo_orbit - orbit_range) / unit_len, color=\n 'g', fill=False)\n", (11334, 11418), True, 'import matplotlib.pyplot as plt\n'), ((11418, 11512), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(earth_pos_x, 0)', '((leo_orbit + orbit_range) / unit_len)'], {'color': '"""g"""', 'fill': '(False)'}), "((earth_pos_x, 0), (leo_orbit + orbit_range) / unit_len, color=\n 'g', fill=False)\n", (11428, 11512), True, 'import matplotlib.pyplot as plt\n'), ((11505, 11570), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(moon_pos_x, 0)', '(moon_radius / unit_len)'], {'color': '"""grey"""'}), "((moon_pos_x, 0), moon_radius / unit_len, color='grey')\n", (11515, 11570), True, 'import matplotlib.pyplot as plt\n'), ((11577, 11672), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(moon_pos_x, 0)', '((lunar_orbit - orbit_range) / unit_len)'], {'color': '"""g"""', 'fill': '(False)'}), "((moon_pos_x, 0), (lunar_orbit - orbit_range) / unit_len, color=\n 'g', fill=False)\n", (11587, 11672), True, 'import matplotlib.pyplot as plt\n'), ((11671, 11766), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(moon_pos_x, 0)', '((lunar_orbit + orbit_range) / unit_len)'], {'color': '"""g"""', 'fill': '(False)'}), "((moon_pos_x, 0), (lunar_orbit + orbit_range) / unit_len, color=\n 'g', fill=False)\n", (11681, 11766), True, 'import matplotlib.pyplot as plt\n'), ((11979, 12006), 'matplotlib.pyplot.plot', 'plt.plot', (['L1_pos_x', '(0)', '"""gx"""'], {}), "(L1_pos_x, 0, 'gx')\n", (11987, 12006), True, 'import matplotlib.pyplot as plt\n'), ((12005, 12025), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (12013, 12025), True, 'import matplotlib.pyplot as plt\n'), ((12025, 12045), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (12033, 12045), True, 'import matplotlib.pyplot as plt\n'), ((12093, 12135), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-position (arbitrary units)"""'], {}), "('x-position (arbitrary units)')\n", (12103, 12135), True, 'import matplotlib.pyplot as plt\n'), ((12136, 12178), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-position (arbitrary units)"""'], {}), "('y-position (arbitrary units)')\n", (12146, 12178), True, 'import matplotlib.pyplot as plt\n'), ((12253, 12263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12261, 12263), True, 'import matplotlib.pyplot as plt\n'), ((12264, 12275), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12273, 12275), True, 'import matplotlib.pyplot as plt\n'), ((7145, 7168), 'reduced3body.hohmann', 'r3b.hohmann', (['threads', 'n'], {}), '(threads, n)\n', (7156, 7168), True, 'import reduced3body as r3b\n'), ((8000, 8011), 'time.time', 'time.time', ([], {}), '()\n', (8009, 8011), False, 'import time\n'), ((8672, 8685), 'numpy.cos', 'np.cos', (['tlist'], {}), '(tlist)\n', (8678, 8685), True, 'import numpy as np\n'), ((8713, 8726), 'numpy.sin', 'np.sin', (['tlist'], {}), '(tlist)\n', (8719, 8726), True, 'import numpy as np\n'), ((8752, 8765), 'numpy.cos', 'np.cos', (['tlist'], {}), '(tlist)\n', (8758, 8765), True, 'import numpy as np\n'), ((8790, 8803), 'numpy.sin', 'np.sin', (['tlist'], {}), '(tlist)\n', (8796, 8803), True, 'import numpy as np\n'), ((9084, 9097), 'numpy.sum', 'np.sum', (['Hlist'], {}), '(Hlist)\n', (9090, 9097), True, 'import numpy as np\n'), ((1026, 1036), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1029, 1036), False, 'from math import pi, cos, sin\n'), ((1047, 1057), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1050, 1057), False, 'from math import pi, cos, sin\n'), ((7254, 7280), 'reduced3body.low_energy', 'r3b.low_energy', (['threads', 'n'], {}), '(threads, n)\n', (7268, 7280), True, 'import reduced3body as r3b\n'), ((8366, 8387), 'numpy.cos', 'np.cos', (['tlist[:n / 2]'], {}), '(tlist[:n / 2])\n', (8372, 8387), True, 'import numpy as np\n'), ((8400, 8421), 'numpy.sin', 'np.sin', (['tlist[:n / 2]'], {}), '(tlist[:n / 2])\n', (8406, 8421), True, 'import numpy as np\n'), ((8441, 8462), 'numpy.sin', 'np.sin', (['tlist[:n / 2]'], {}), '(tlist[:n / 2])\n', (8447, 8462), True, 'import numpy as np\n'), ((8475, 8496), 'numpy.cos', 'np.cos', (['tlist[:n / 2]'], {}), '(tlist[:n / 2])\n', (8481, 8496), True, 'import numpy as np\n'), ((8516, 8537), 'numpy.cos', 'np.cos', (['tlist[n / 2:]'], {}), '(tlist[n / 2:])\n', (8522, 8537), True, 'import numpy as np\n'), ((8550, 8571), 'numpy.sin', 'np.sin', (['tlist[n / 2:]'], {}), '(tlist[n / 2:])\n', (8556, 8571), True, 'import numpy as np\n'), ((8591, 8612), 'numpy.sin', 'np.sin', (['tlist[n / 2:]'], {}), '(tlist[n / 2:])\n', (8597, 8612), True, 'import numpy as np\n'), ((8625, 8646), 'numpy.cos', 'np.cos', (['tlist[n / 2:]'], {}), '(tlist[n / 2:])\n', (8631, 8646), True, 'import numpy as np\n'), ((9848, 9857), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9855, 9857), True, 'import matplotlib.pyplot as plt\n'), ((10570, 10579), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10577, 10579), True, 'import matplotlib.pyplot as plt\n'), ((11015, 11024), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11022, 11024), True, 'import matplotlib.pyplot as plt\n'), ((12045, 12054), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12052, 12054), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1392), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1385, 1392), False, 'from math import pi, cos, sin\n'), ((1403, 1413), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1406, 1413), False, 'from math import pi, cos, sin\n'), ((7373, 7406), 'reduced3body.low_energy_parts8', 'r3b.low_energy_parts8', (['threads', 'n'], {}), '(threads, n)\n', (7394, 7406), True, 'import reduced3body as r3b\n'), ((7488, 7554), 'reduced3body.refine', 'r3b.refine', (['threads', 'n', 'duration', 'pos', 'ang', 'burn', 'x0', 'y0', 'px0', 'py0'], {}), '(threads, n, duration, pos, ang, burn, x0, y0, px0, py0)\n', (7498, 7554), True, 'import reduced3body as r3b\n'), ((7604, 7665), 'reduced3body.trajectory', 'r3b.trajectory', (['n', 'duration', 'pos', 'ang', 'burn', 'x0', 'y0', 'px0', 'py0'], {}), '(n, duration, pos, ang, burn, x0, y0, px0, py0)\n', (7618, 7665), True, 'import reduced3body as r3b\n'), ((7792, 7819), 'numpy.power', 'np.power', (['(1 - mu - xlist)', '(2)'], {}), '(1 - mu - xlist, 2)\n', (7800, 7819), True, 'import numpy as np\n'), ((7815, 7833), 'numpy.power', 'np.power', (['ylist', '(2)'], {}), '(ylist, 2)\n', (7823, 7833), True, 'import numpy as np\n'), ((9636, 9645), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9643, 9645), True, 'import matplotlib.pyplot as plt\n'), ((9670, 9679), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9677, 9679), True, 'import matplotlib.pyplot as plt\n'), ((9710, 9719), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9717, 9719), True, 'import matplotlib.pyplot as plt\n'), ((10361, 10370), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10368, 10370), True, 'import matplotlib.pyplot as plt\n'), ((10394, 10403), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10401, 10403), True, 'import matplotlib.pyplot as plt\n'), ((10433, 10442), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10440, 10442), True, 'import matplotlib.pyplot as plt\n'), ((11754, 11763), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11761, 11763), True, 'import matplotlib.pyplot as plt\n'), ((11788, 11797), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11795, 11797), True, 'import matplotlib.pyplot as plt\n'), ((11828, 11837), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11835, 11837), True, 'import matplotlib.pyplot as plt\n'), ((11868, 11877), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11875, 11877), True, 'import matplotlib.pyplot as plt\n'), ((11901, 11910), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11908, 11910), True, 'import matplotlib.pyplot as plt\n'), ((11940, 11949), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11947, 11949), True, 'import matplotlib.pyplot as plt\n'), ((7739, 7762), 'numpy.power', 'np.power', (['(mu + xlist)', '(2)'], {}), '(mu + xlist, 2)\n', (7747, 7762), True, 'import numpy as np\n'), ((7760, 7778), 'numpy.power', 'np.power', (['ylist', '(2)'], {}), '(ylist, 2)\n', (7768, 7778), True, 'import numpy as np\n')] |
import logging
import os
import warnings
from collections import defaultdict
from typing import Union, Tuple
from multiprocessing import Pool
import h5py
import numpy as np
import torch
from sklearn.metrics import f1_score, roc_auc_score, confusion_matrix, average_precision_score, auc
from deepethogram import utils
from deepethogram.postprocessing import remove_low_thresholds
log = logging.getLogger(__name__)
# small epsilon to prevent divide by zero
EPS = 1e-7
# using multiprocessing on slurm causes a termination signal
try:
slurm_job_id = os.environ['SLURM_JOB_ID']
slurm = True
except:
slurm = False
def index_to_onehot(index: np.ndarray, n_classes: int) -> np.ndarray:
""" Convert an array if indices to one-hot vectors.
Parameters
----------
index: np.ndarray. shape (N,)
each element is the class of the correct label for that example
n_classes: int
Total number of classes. Necessary because this batch of indices might not have examples from all classes
Returns
-------
onehot: shape (N, n_classes)
Binary array with 1s
Examples
-------
index_to_onehot(np.array([0, 1, 2, 3, 0]).astype(int), 4)
array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0]], dtype=uint16)
"""
onehot = np.zeros((index.shape[0], n_classes), dtype=np.uint16)
onehot[np.arange(onehot.shape[0]), index] = 1
return onehot
def hardmax(probabilities: np.ndarray) -> np.ndarray:
""" Convert probability array to prediction by converting the max of each row to 1
Parameters
----------
probabilities: np.ndarray. Shape (N, K)
probabilities output by some model. Floats between 0 and 1
Returns
-------
array: np.ndarray. Shape (N, K)
binary
Examples
-------
# generate random array
logits = np.random.uniform(size=(6,3))
# dumb convert to probabilities
probabilities = logits / logits.sum(axis=1)[:, np.newaxis]
print(probabilities)
array([[0.2600106 , 0.32258024, 0.41740916],
[0.28634918, 0.4161426 , 0.29750822],
[0.19937796, 0.32040531, 0.48021672],
[0.70646227, 0.01531493, 0.2782228 ],
[0.19636778, 0.35528756, 0.44834465],
[0.78139017, 0.10704456, 0.11156526]])
print(hardmax(probabilities))
[[0 0 1]
[0 1 0]
[0 0 1]
[1 0 0]
[0 0 1]
[1 0 0]]
"""
# make an array of zeros
array = np.zeros(probabilities.shape, dtype=np.uint16)
# index into the array in the column with max probability,change it to 1
array[np.arange(array.shape[0]), np.argmax(probabilities, axis=1)] = 1
return array
def onehot_to_index(onehot: np.ndarray) -> np.ndarray:
"""Convert one-hot array to index by taking the argmax"""
return np.argmax(onehot, axis=1)
def f1(predictions: np.ndarray, labels: np.ndarray, average: str = 'macro') -> np.ndarray:
""" simple wrapper around sklearn.metrics.f1_score
References
-------
[1]: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
[2]: https://en.wikipedia.org/wiki/F1_score
"""
# check to see if predictions are probabilities
if predictions.dtype != np.int64 or predictions.ndim > 1:
predictions = np.argmax(predictions, axis=1)
if labels.ndim > 1:
labels = onehot_to_index(labels)
F1 = f1_score(labels, predictions, average=average)
return F1
def roc_auc(predictions: np.ndarray, labels: np.ndarray, average: str = 'macro') -> np.ndarray:
""" simple wrapper around sklearn.metrics.roc_auc_score
References
-------
.. [1] https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html
.. [2] https://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
if predictions.ndim == 1:
raise ValueError('Predictions must be class probabilities before max!')
if labels.ndim == 1:
labels = index_to_onehot(labels, predictions.shape[1])
score = roc_auc_score(labels, predictions, average=average)
return score
def accuracy(predictions: np.ndarray, labels: np.ndarray):
""" Return the fraction of elements in predictions that are equal to labels """
return np.mean(predictions == labels)
def confusion(predictions: np.ndarray, labels: np.ndarray, K: int = None) -> np.ndarray:
""" Computes confusion matrix. Much faster than sklearn.metrics.confusion_matrix for large numbers of predictions
Parameters
----------
predictions: np.ndarray. shape (N, ) or (N, K)
can be probabilities, hardmax, or indicators
labels: np.ndarray. shape (N,) or (N,K)
can be one-hot or indicator
K: int
number of classes
Returns
-------
cm: np.ndarray. shape (K, K)
confusion matrix
"""
if predictions.ndim > 1:
K = predictions.shape[1]
predictions = hardmax(predictions) # prob -> onehot
predictions = onehot_to_index(predictions) # onehot -> index where 1
if labels.ndim > 1:
K = labels.shape[1]
labels = onehot_to_index(labels) # make sure labels are index
if K is None:
K = max(predictions.max() + 1, labels.max() + 1)
cm = np.zeros((K, K)).astype(int)
for i in range(K):
for j in range(K):
# these_inds = labels==i
# cm[i, j] = np.sum((labels==i)*(predictions==j))
cm[i, j] = np.sum(np.logical_and(labels == i, predictions == j))
return cm
def binary_confusion_matrix(predictions, labels) -> np.ndarray:
# behaviors x thresholds x 2 x 2
# cms = np.zeros((K, N, 2, 2), dtype=int)
ndim = predictions.ndim
if ndim == 3:
# 2 x 2 x K x N
cms = np.zeros((2, 2, predictions.shape[1], predictions.shape[2]), dtype=int)
elif ndim == 2:
# 2 x 2 x K
cms = np.zeros((2, 2, predictions.shape[1]), dtype=int)
elif ndim == 1:
# 2 x 2
cms = np.zeros((2, 2), dtype=int)
else:
raise ValueError('unknown input shape: {}'.format(predictions.shape))
neg_lab = np.logical_not(labels)
neg_pred = np.logical_not(predictions)
cms[0, 0] = (neg_lab * neg_pred).sum(axis=0)
cms[0, 1] = (neg_lab * predictions).sum(axis=0)
cms[1, 0] = (labels * neg_pred).sum(axis=0)
cms[1, 1] = (labels * predictions).sum(axis=0)
if ndim == 3:
# output of shape 2 x 2 x N x K
return cms.transpose(0, 1, 3, 2)
# either 2 x 2 x K or just 2 x 2
return cms
def binary_confusion_matrix_multiple_thresholds(probabilities, labels, thresholds):
# this is the fastest I could possibly write it
K = probabilities.shape[1]
N = len(thresholds)
pred = np.greater(probabilities.reshape(-1, 1), thresholds.reshape(1, -1)).reshape(-1, K, N)
lab = labels.reshape(-1, 1).repeat(N, 1).reshape(-1, K, N)
return binary_confusion_matrix(pred, lab)
def confusion_multiple_thresholds_alias(inp):
# alias so that binary_confusion_matrix_multiple_thresholds only needs one tuple as input
return binary_confusion_matrix_multiple_thresholds(*inp)
def confusion_alias(inp):
return binary_confusion_matrix(*inp)
def binary_confusion_matrix_parallel(probs_or_preds,
labels,
thresholds=None,
chunk_size: int = 100,
num_workers: int = 4,
parallel_chunk: int = 100):
# log.info('num workers binary confusion parallel: {}'.format(num_workers))
if slurm:
parallel_chunk = 1
num_workers = 1
N = probs_or_preds.shape[0]
starts = np.arange(0, N, chunk_size)
ends = np.concatenate((starts[1:], [N]))
if thresholds is not None:
# probabilities
iterator = ((probs_or_preds[start:end], labels[start:end], thresholds) for start, end in zip(starts, ends))
cm = np.zeros((2, 2, len(thresholds), probs_or_preds.shape[1]), dtype=int)
func = confusion_multiple_thresholds_alias
else:
# predictions
iterator = ((probs_or_preds[start:end], labels[start:end]) for start, end in zip(starts, ends))
if probs_or_preds.ndim == 2:
cm = np.zeros((2, 2, probs_or_preds.shape[1]), dtype=int)
elif probs_or_preds.ndim == 1:
cm = np.zeros((2, 2), dtype=int)
else:
raise ValueError('weird shape in probs_or_preds: {}'.format(probs_or_preds.shape))
func = confusion_alias
# log.info('parallel start')
if num_workers > 1:
with Pool(num_workers) as pool:
for res in pool.imap_unordered(func, iterator, parallel_chunk):
cm += res
else:
for args in iterator:
cm += func(args)
# log.info('parallel end')
return cm
def compute_binary_confusion(predictions: np.ndarray, labels: np.ndarray,
thresholds: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
""" compute binary confusion matrices for input probabilities, labels, and thresholds. See confusion """
estimates = postprocess(predictions, thresholds, valid_bg=False)
K = predictions.shape[1]
cms = []
for i in range(K):
cm = confusion(estimates[:, i], labels[:, i], K=2)
cms.append(cm)
estimates = postprocess(predictions, thresholds, valid_bg=True)
cms_valid_bg = []
for i in range(K):
cm = confusion(estimates[:, i], labels[:, i], K=2)
cms_valid_bg.append(cm)
return np.stack(cms), np.stack(cms_valid_bg)
def mean_class_accuracy(predictions, labels):
""" computes the mean of diagonal elements of a confusion matrix """
if predictions.ndim > 1:
predictions = onehot_to_index(hardmax(predictions))
if labels.ndim > 1:
labels = onehot_to_index(labels)
cm = confusion_matrix(labels, predictions)
cm = cm.astype('float') / (cm.sum(axis=1)[:, np.newaxis] + 1e-9)
on_diag = cm[np.where(np.eye(cm.shape[0], dtype=np.uint32))]
return on_diag.mean()
def remove_invalid_values_predictions_and_labels(predictions: np.ndarray, labels: np.ndarray,
invalid_value: Union[int, float] = -1) -> \
Tuple[np.ndarray, np.ndarray]:
""" remove any rows where labels are equal to invalid_value.
Used when (for example) the last sequence in a video is padded to have the proper sequence length. the padded inputs
are paired with -1 labels, indicating that loss and metrics should not be applied there
"""
is_invalid = labels == invalid_value
valid_rows = np.logical_not(np.any(is_invalid, axis=1))
predictions = predictions[valid_rows, :]
labels = labels[valid_rows, :]
return predictions, labels
def auc_on_array(x, y):
if x.ndim < 2 or y.ndim < 2:
return np.nan
K = x.shape[1]
assert K == y.shape[1]
area_under_curve = np.zeros((K,), dtype=np.float32)
for i in range(K):
area_under_curve[i] = auc(x[:, i], y[:, i])
return area_under_curve
def compute_metrics_by_threshold(probabilities, labels, thresholds, num_workers: int = 4, cm=None):
# if we've computed cms elsewhere
if cm is None:
cm = binary_confusion_matrix_parallel(probabilities, labels, thresholds, num_workers=num_workers)
acc = (cm[0, 0] + cm[1, 1]) / cm.sum(axis=0).sum(axis=0)
p, r = compute_precision_recall(cm)
tp, fp = compute_tpr_fpr(cm)
info = compute_informedness(cm)
f1 = compute_f1(p, r)
fbeta_2 = compute_f1(p, r, beta=2.0)
auroc = auc_on_array(fp, tp)
mAP = auc_on_array(r, p)
metrics_by_threshold = {
'thresholds': thresholds,
'accuracy': acc,
'f1': f1,
'precision': p,
'recall': r,
'fbeta_2': fbeta_2,
'informedness': info,
'tpr': tp,
'fpr': fp,
'auroc': auroc,
'mAP': mAP,
'confusion': cm
}
return metrics_by_threshold
def fast_auc(y_true, y_prob):
if y_true.ndim == 2:
return np.array([fast_auc(y_true[:, i], y_prob[:, i]) for i in range(y_true.shape[1])])
# https://www.kaggle.com/c/microsoft-malware-prediction/discussion/76013
y_true = np.asarray(y_true)
y_true = y_true[np.argsort(y_prob)]
n = len(y_true)
nfalse = np.cumsum(1 - y_true)
auc = np.cumsum((y_true * nfalse))[-1]
# print(auc)
auc /= (nfalse[-1] * (n - nfalse[-1]))
return auc
# @profile
def evaluate_thresholds(probabilities: np.ndarray,
labels: np.ndarray,
thresholds: np.ndarray = None,
num_workers: int = 4) -> Tuple[dict, dict]:
""" Given probabilities and labels, compute a bunch of metrics at each possible threshold value
Also computes a number of metrics for which there is a single value for the input predictions / labels, something
like the maximum F1 score across thresholds.
Metrics computed for each threshold:
Parameters
----------
probabilities: np.ndarray. shape (N,K)
output probabilities from some classifier
labels: np.ndarray. shape (N, K) or (N,)
binary or indicator labels. indicator labels will be converted to one-hot
thresholds: np.ndarray. shape (M, )
thresholds at which to convert probabilities into binary predictions.
default value: np.linspace(0, 1, 101)
Returns
-------
metrics_by_threshold: dict
each value is an array of shape (M, ) or (M,K), with a value (or set of values) computed for each threshold
epoch_metrics: dict
each value is only a single float for the entire prediction / label set.
"""
# log.info('evaluating thresholds. P: {} lab: {} n_workers: {}'.format(probabilities.shape, labels.shape, num_workers))
# log.info('SLURM in metrics file: {}'.format(slurm))
if slurm and num_workers != 1:
warnings.warn('using multiprocessing on slurm can cause issues. setting num_workers to 1')
num_workers = 1
if thresholds is None:
# using 200 means that approximated mAP, AUROC is almost exactly the same as exact
thresholds = np.linspace(1e-4, 1, 200)
# log.info('num workers in evaluate thresholds: {}'.format(num_workers))
# log.debug('probabilities shape in metrics calc: {}'.format(probabilities.shape))
metrics_by_threshold = {}
if probabilities.ndim == 1:
raise ValueError('To calc threshold, predictions must be probabilities, not classes')
K = probabilities.shape[1]
if labels.ndim == 1:
labels = index_to_onehot(labels, K)
probabilities, labels = remove_invalid_values_predictions_and_labels(probabilities, labels)
# log.info('first metrics call')
metrics_by_threshold = compute_metrics_by_threshold(probabilities, labels, thresholds, num_workers)
# log.info('first metrics call finished')
# log.info('finished computing binary confusion matrices')
# optimum threshold: one that maximizes F1
optimum_indices = np.argmax(metrics_by_threshold['f1'], axis=0)
optimum_thresholds = thresholds[optimum_indices]
# if the threshold or the F1 is very low, these are erroneous: set to 0.5
optimum_f1s = metrics_by_threshold['f1'][optimum_indices, range(len(optimum_indices))]
optimum_thresholds = remove_low_thresholds(optimum_thresholds, f1s=optimum_f1s)
# optimum info: maximizes informedness
optimum_indices_info = np.argmax(metrics_by_threshold['informedness'], axis=0)
optimum_thresholds_info = thresholds[optimum_indices_info]
optimum_info = metrics_by_threshold['informedness'][optimum_indices_info, range(len(optimum_indices_info))]
optimum_thresholds_info = remove_low_thresholds(optimum_thresholds_info, f1s=optimum_info)
metrics_by_threshold['optimum'] = optimum_thresholds
metrics_by_threshold['optimum_info'] = optimum_thresholds_info
# vectorized
predictions = probabilities > optimum_thresholds
# ALWAYS REPORT THE PERFORMANCE WITH "VALID" BACKGROUND
predictions[:, 0] = np.logical_not(np.any(predictions[:, 1:], axis=1))
# log.info('computing metric thresholds again')
# re-use our confusion matrix calculation. returns N x N x K values
# log.info('second metircs call')
metrics_by_class = compute_metrics_by_threshold(predictions, labels, None, num_workers)
# log.info('second metrics call ended')
# summing over classes is the same as flattening the array. ugly syntax
# TODO: make function that computes metrics from a stack of confusion matrices rather than this none None business
# log.info('third metrics call')
overall_metrics = compute_metrics_by_threshold(None,
None,
thresholds=None,
num_workers=num_workers,
cm=metrics_by_class['confusion'].sum(axis=2))
# log.info('third metrics call ended')
epoch_metrics = {
'accuracy_overall': overall_metrics['accuracy'],
'accuracy_by_class': metrics_by_class['accuracy'],
'f1_overall': overall_metrics['f1'],
'f1_class_mean': metrics_by_class['f1'].mean(),
'f1_class_mean_nobg': metrics_by_class['f1'][1:].mean(),
'f1_by_class': metrics_by_class['f1'],
'binary_confusion': metrics_by_class['confusion'].transpose(2, 0, 1),
'auroc_by_class': metrics_by_threshold['auroc'],
'auroc_class_mean': metrics_by_threshold['auroc'].mean(),
'mAP_by_class': metrics_by_threshold['mAP'],
'mAP_class_mean': metrics_by_threshold['mAP'].mean(),
# to compute these, would need to make confusion matrices on flattened array, which is slow
'auroc_overall': np.nan,
'mAP_overall': np.nan
}
# it is too much of a pain to increase the speed on roc_auc_score and mAP
# try:
# epoch_metrics['auroc_overall'] = roc_auc_score(labels, probabilities, average='micro')
# epoch_metrics['auroc_by_class'] = roc_auc_score(labels, probabilities, average=None)
# # small perf improvement is not worth worrying about bugs
# # epoch_metrics['auroc_overall'] = fast_auc(labels.flatten(), probabilities.flatten())
# # epoch_metrics['auroc_by_class'] = fast_auc(labels, probabilities)
# epoch_metrics['auroc_class_mean'] = epoch_metrics['auroc_by_class'].mean()
# except ValueError:
# # only one class in labels...
# epoch_metrics['auroc_overall'] = np.nan
# epoch_metrics['auroc_class_mean'] = np.nan
# epoch_metrics['auroc_by_class'] = np.array([np.nan for _ in range(K)])
#
# epoch_metrics['mAP_overall'] = average_precision_score(labels, probabilities, average='micro')
# epoch_metrics['mAP_by_class'] = average_precision_score(labels, probabilities, average=None)
# # this is a misnomer: mAP by class is just AP
# epoch_metrics['mAP_class_mean'] = epoch_metrics['mAP_by_class'].mean()
# log.info('returning metrics')
return metrics_by_threshold, epoch_metrics
def compute_tpr_fpr(cm: np.ndarray) -> Tuple[float, float]:
""" compute true positives and false positives from a non-normalized confusion matrix """
# normalize so that each are rates
cm_normalized = cm.astype('float') / (cm.sum(axis=1)[:, np.newaxis] + 1e-9)
fp = cm_normalized[0, 1]
tp = cm_normalized[1, 1]
return tp, fp
def get_denominator(expression: Union[float, np.ndarray]):
if isinstance(expression, (int, np.integer, float, np.floating)):
return max(EPS, expression)
# it's an array
# convert to floating point type-- if it's integer, it will just ignore the eps and not throw an error
expression = expression.astype(np.float32)
expression[expression < EPS] = EPS
return expression
def compute_f1(precision: float, recall: float, beta: float = 1.0) -> float:
""" compute f1 if you already have precison and recall. Prevents re-computing confusion matrix, etc """
num = (1 + beta**2) * (precision * recall)
denom = get_denominator((beta**2) * precision + recall)
return num / denom
def compute_precision_recall(cm: np.ndarray) -> Tuple[float, float]:
""" computes precision and recall from a confusion matrix """
tn = cm[0, 0]
tp = cm[1, 1]
fp = cm[0, 1]
fn = cm[1, 0]
precision = tp / get_denominator(tp + fp)
recall = tp / get_denominator(tp + fn)
return precision, recall
def compute_mean_accuracy(cm: np.ndarray) -> float:
""" compute the mean of true positive rate and true negative rate from a confusion matrix """
cm = cm.astype('float') / get_denominator(cm.sum(axis=1)[:, np.newaxis])
tp = cm[1, 1]
tn = cm[0, 0]
return np.mean([tp, tn])
def compute_informedness(cm: np.ndarray, eps: float = 1e-7) -> float:
""" compute informedness from a confusion matrix. Also known as Youden's J statistic
Parameters
----------
cm: np.ndarray
confusion matrix
eps: float
small value to prevent divide by zero
Returns
-------
informedness: float
Ranges from 0 to 1. Gives equal weight to false positives and false negatives.
References
-------
.. [1]: https://en.wikipedia.org/wiki/Youden%27s_J_statistic
"""
tn = cm[0, 0]
tp = cm[1, 1]
fp = cm[0, 1]
fn = cm[1, 0]
sensitivity = tp / get_denominator(tp + fn)
specificity = tn / get_denominator(fp + tn)
return sensitivity + specificity - 1
def postprocess(predictions: np.ndarray, thresholds: np.ndarray, valid_bg: bool = True) -> np.ndarray:
""" Turn probabilities into predictions, with special handling of background.
TODO: Should be removed in favor of deepethogram.prostprocessing
"""
N, n_classes = predictions.shape
assert (len(thresholds) == n_classes)
estimates = np.zeros((N, n_classes), dtype=np.int64)
for i in range(0, n_classes):
estimates[:, i] = (predictions[:, i] > thresholds[i]).astype(int)
if valid_bg:
estimates[:, 0] = np.logical_not(np.any(estimates[:, 1:], axis=1)).astype(int)
return estimates
all_metrics = {
'accuracy': accuracy,
'mean_class_accuracy': mean_class_accuracy,
'f1': f1,
'roc_auc': roc_auc,
'confusion': binary_confusion_matrix
}
def list_to_mean(values):
if type(values[0]) == torch.Tensor:
value = utils.tensor_to_np(torch.stack(values).mean())
elif type(values[0]) == np.ndarray:
if values[0].size == 1:
value = np.stack(np.array(values)).mean()
else:
value = np.concatenate(np.array(values)).mean()
else:
raise TypeError('Input should be numpy array or torch tensor. Type: ', type(values[0]))
return value
def append_to_hdf5(f, name, value, axis=0):
""" resizes an HDF5 dataset and appends value """
f[name].resize(f[name].shape[axis] + 1, axis=axis)
f[name][-1] = value
class Buffer:
def __init__(self):
self.data = {}
self.splits = ['train', 'val', 'test', 'speedtest']
for split in self.splits:
self.initialize(split)
def initialize(self, split):
self.data[split] = defaultdict(list)
def append(self, split: str, data: dict):
for key, value in data.items():
if isinstance(value, torch.Tensor):
# don't convert to numpy for speed
value = value.detach().cpu()
self.data[split][key].append(value)
def stack(self, split):
stacked = {}
keys = list(self.data[split].keys())
# go by key so we can delete each value from memory after stacking
for key in keys:
value = self.data[split][key]
first_element = value[0]
if isinstance(value, (int, float, np.integer, np.floating, np.ndarray, list)):
try:
# default is concatenating along the batch dimension
value = np.concatenate(value)
except ValueError:
# input is likely just a list
value = np.stack(value)
elif isinstance(first_element, torch.Tensor):
value = torch.stack(value)
stacked[key] = value
del self.data[split][key]
self.initialize(split)
return stacked
def clear(self, split=None):
if split is None:
for split in self.data.keys():
self.clear(split)
keys = list(self.data[split].keys())
for key in keys:
del self.data[split][key]
self.data[split] = defaultdict(list)
class EmptyBuffer:
def __init__(self):
self.data = {}
self.splits = ['train', 'val', 'test', 'speedtest']
for split in self.splits:
self.initialize(split)
def initialize(self, split):
self.data[split] = defaultdict(list)
def append(self, split: str, data: dict):
pass
def stack(self, split):
pass
def clear(self, split=None):
pass
class Metrics:
"""Class for saving a list of per-epoch metrics to disk as an HDF5 file"""
def __init__(self,
run_dir: Union[str, bytes, os.PathLike],
key_metric: str,
name: str,
num_parameters: int,
splits: list = ['train', 'val'],
num_workers: int = 4):
""" Metrics constructor
Parameters
----------
run_dir: str, os.PathLike
directory into which to save metrics file
key_metric: str
which metric is considered the "key". This can be used for determining when a model has converged, etc.
name: str
filename will be /run_dir/{name}_metrics.h5
num_parameters: int
number of parameters in your model. useful to save this for later
splits: list
either ['train', 'val'] or ['train', 'val', 'test']
"""
assert (os.path.isdir(run_dir))
self.fname = os.path.join(run_dir, '{}_metrics.h5'.format(name))
log.debug('making metrics file at {}'.format(self.fname))
self.key_metric = key_metric
self.splits = splits
self.num_parameters = num_parameters
self.learning_rate = None
self.initialize_file()
self.num_workers = num_workers
self.buffer = Buffer()
self.latest_key = {}
self.latest_loss = {}
def update_lr(self, lr):
self.learning_rate = lr
def compute(self, data: dict) -> dict:
""" Computes metrics from one epoch's batch of data
Args:
data: dict
dict of Numpy arrays containing any data needed to compute metrics
Returns:
metrics: dict
dict of numpy arrays / floats containing metrics to be written to disk
"""
metrics = {}
keys = list(data.keys())
if 'loss' in keys:
metrics['loss'] = np.mean(data['loss'])
if 'time' in keys:
# assume it's seconds per image
FPS = 1 / get_denominator(np.mean(data['time']))
metrics['fps'] = FPS
elif 'fps' in keys:
FPS = np.mean(data['fps'])
metrics['fps'] = FPS
if 'lr' in keys:
# note: this should always be a scalar, but set to mean just in case there's multiple
metrics['lr'] = np.mean(data['lr'])
return metrics
def initialize_file(self):
mode = 'r+' if os.path.isfile(self.fname) else 'w'
with h5py.File(self.fname, mode) as f:
f.attrs['num_parameters'] = self.num_parameters
f.attrs['key_metric'] = self.key_metric
# make an HDF5 group for each split
for split in self.splits:
group = f.create_group(split)
# all splits and datasets will have loss values-- others will come from self.compute()
group.create_dataset('loss', (0,), maxshape=(None,), dtype=np.float32)
def save_metrics_to_disk(self, metrics: dict, split: str) -> None:
with h5py.File(self.fname, 'r+') as f:
# utils.print_hdf5(f)
if split not in f.keys():
# should've created top-level groups in initialize_file; this is for nesting
f.create_group(split)
group = f[split]
datasets = list(group.keys())
for key, array in metrics.items():
if isinstance(array, (int, float, np.integer, np.floating)):
array = np.array(array)
# ALLOW FOR NESTING
if isinstance(array, dict):
group_name = split + '/' + key
self.save_metrics_to_disk(array, group_name)
elif isinstance(array, np.ndarray):
if key in datasets:
# expand along the epoch dimension
group[key].resize(group[key].shape[0] + 1, axis=0)
else:
# create dataset
shape = (1, *array.shape)
maxshape = (None, *array.shape)
log.debug('creating dataset {}/{}: shape {}'.format(split, key, shape))
group.create_dataset(key, shape, maxshape=maxshape, dtype=array.dtype)
group[key][-1] = array
else:
raise ValueError('Metrics must contain dicts of np.ndarrays, not {} of type {}'.format(
array, type(array)))
def end_epoch(self, split: str):
""" End the current training epoch. Saves any metrics in memory to disk
Parameters
----------
split: str
which epoch just ended. train, validation, test, and speedtest are treated differently
"""
data = self.buffer.stack(split)
metrics = self.compute(data)
# import pdb; pdb.set_trace()
if split != 'speedtest':
assert 'loss' in data.keys()
# store most recent loss and key metric as attributes, for use in scheduling, stopping, etc.
self.latest_loss[split] = metrics['loss']
self.latest_key[split] = metrics[self.key_metric]
self.save_metrics_to_disk(metrics, split)
return metrics, split
def __getitem__(self, inp: tuple) -> np.ndarray:
split, metric_name, epoch_number = inp
with h5py.File(self.fname, 'r') as f:
assert split in f.keys(), 'split {} not found in file: {}'.format(split, list(f.keys()))
group = f[split]
assert metric_name in group.keys(), 'metric {} not found in group: {}'.format(
metric_name, list(group.keys()))
data = group[metric_name][epoch_number, ...]
return data
class EmptyMetrics(Metrics):
def __init__(self, *args, **kwargs):
super().__init__(os.getcwd(), [], 'loss', 'empty', 0)
self.buffer = EmptyBuffer()
self.key_metric = 'loss'
def end_epoch(self, split, *args, **kwargs):
# calling this clears the buffer
self.buffer.clear(split)
def initialize_file(self):
pass
class Classification(Metrics):
""" Metrics class for saving multiclass or multilabel classifcation metrics to disk """
def __init__(self,
run_dir: Union[str, bytes, os.PathLike],
key_metric: str,
num_parameters: int,
num_classes: int = None,
splits: list = ['train', 'val'],
ignore_index: int = -1,
evaluate_threshold: bool = False,
num_workers: int = 4):
""" Constructor for classification metrics class
Parameters
----------
run_dir
see Metrics
key_metric
see Metrics
num_parameters
see Metrics
num_classes: int
number of classes (behaviors) in your classification problem
metrics: list
each string in this list corresponds to a function that operates on probabilities and labels
splits: list
see Metrics
ignore_index: int
labels with this index will be masked for the purposes of computing metrics
evaluate_threshold: bool
Hack for multi-label classification problems. If True, at each epoch will compute a bunch of metrics for
each potential threshold. See evaluate_thresholds
"""
super().__init__(run_dir, key_metric, 'classification', num_parameters, splits, num_workers)
self.metric_funcs = all_metrics
self.num_classes = num_classes
self.ignore_index = ignore_index
self.evaluate_threshold = evaluate_threshold
# if self.evaluate_threshold:
# self.thresholds = np.linspace(0, 1, 101)
def stack_sequence_data(self, array: np.ndarray) -> np.ndarray:
# if probs or labels are one-hot N x K or indicator N, return
if array.ndim < 3:
return array
assert array.ndim == 3
if array.shape[1] < array.shape[2]:
N, K, T = array.shape
array = array.transpose(0, 2, 1).reshape(N * T, K)
else:
N, T, K = array.shape
array = array.reshape(N * T, K)
return array
def compute(self, data: dict):
# computes mean loss, etc
metrics = super().compute(data)
if 'probs' not in data.keys():
# might happen during speedtest
return metrics
# automatically handle loss components
for key in data.keys():
if 'loss' in key and key != 'loss':
metrics[key] = np.mean(data[key])
# if data are from sequence models, stack into N*T x K not N x K x T
probs = self.stack_sequence_data(data['probs'])
if data['probs'].ndim == 3 and data['labels'].ndim == 2:
# special case for sequence models with final_activation==softmax, aka multiclass classification
labels = data['labels'].transpose(0, 1).flatten()
else:
labels = self.stack_sequence_data(data['labels'])
num_classes = probs.shape[1]
one_hot = probs.shape[-1] == labels.shape[-1]
if one_hot:
rows_with_false_labels = np.any(labels == self.ignore_index, axis=1)
else:
rows_with_false_labels = labels == self.ignore_index
true_rows = np.logical_not(rows_with_false_labels)
probs = probs[true_rows, :]
labels = labels[true_rows, :] if one_hot else labels[true_rows]
if self.evaluate_threshold:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
metrics_by_threshold, epoch_metrics = evaluate_thresholds(probs, labels, None, self.num_workers)
metrics['metrics_by_threshold'] = metrics_by_threshold
for key, value in epoch_metrics.items():
metrics[key] = value
else:
# multiclass classification, not multilabel
if one_hot:
labels = onehot_to_index(labels)
predictions = np.argmax(probs, axis=1)
with warnings.catch_warnings():
for metric in self.metrics:
if metric == 'confusion':
warnings.simplefilter("ignore")
metrics[metric] = confusion(predictions, labels, K=self.num_classes)
# import pdb
# pdb.set_trace()
elif metric == 'binary_confusion':
pass
else:
warnings.simplefilter("ignore")
metrics[metric] = self.metric_funcs[metric](predictions, labels)
return metrics
class OpticalFlow(Metrics):
""" Metrics class for saving optic flow metrics to disk """
def __init__(self, run_dir, key_metric, num_parameters, splits=['train', 'val']):
super().__init__(run_dir, key_metric, 'opticalflow', num_parameters, splits)
def compute(self, data: dict) -> dict:
""" Computes metrics from one epoch's batch of data
Args:
data: dict
dict of Numpy arrays containing any data needed to compute metrics
Returns:
metrics: dict
dict of numpy arrays / floats containing metrics to be written to disk
"""
metrics = super().compute(data)
for key in ['reg_loss', 'SSIM', 'L1', 'smoothness', 'sparsity', 'L1']:
if key in data.keys():
metrics[key] = data[key].mean()
return metrics
| [
"logging.getLogger",
"sklearn.metrics.auc",
"numpy.logical_not",
"sklearn.metrics.roc_auc_score",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.asarray",
"deepethogram.postprocessing.remove_low_thresholds",
"numpy.stack",
"numpy.linspace",
"os.path.isdir",
"numpy.con... | [((388, 415), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (405, 415), False, 'import logging\n'), ((1384, 1438), 'numpy.zeros', 'np.zeros', (['(index.shape[0], n_classes)'], {'dtype': 'np.uint16'}), '((index.shape[0], n_classes), dtype=np.uint16)\n', (1392, 1438), True, 'import numpy as np\n'), ((2526, 2572), 'numpy.zeros', 'np.zeros', (['probabilities.shape'], {'dtype': 'np.uint16'}), '(probabilities.shape, dtype=np.uint16)\n', (2534, 2572), True, 'import numpy as np\n'), ((2872, 2897), 'numpy.argmax', 'np.argmax', (['onehot'], {'axis': '(1)'}), '(onehot, axis=1)\n', (2881, 2897), True, 'import numpy as np\n'), ((3460, 3506), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'predictions'], {'average': 'average'}), '(labels, predictions, average=average)\n', (3468, 3506), False, 'from sklearn.metrics import f1_score, roc_auc_score, confusion_matrix, average_precision_score, auc\n'), ((4096, 4147), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'predictions'], {'average': 'average'}), '(labels, predictions, average=average)\n', (4109, 4147), False, 'from sklearn.metrics import f1_score, roc_auc_score, confusion_matrix, average_precision_score, auc\n'), ((4321, 4351), 'numpy.mean', 'np.mean', (['(predictions == labels)'], {}), '(predictions == labels)\n', (4328, 4351), True, 'import numpy as np\n'), ((6172, 6194), 'numpy.logical_not', 'np.logical_not', (['labels'], {}), '(labels)\n', (6186, 6194), True, 'import numpy as np\n'), ((6210, 6237), 'numpy.logical_not', 'np.logical_not', (['predictions'], {}), '(predictions)\n', (6224, 6237), True, 'import numpy as np\n'), ((7793, 7820), 'numpy.arange', 'np.arange', (['(0)', 'N', 'chunk_size'], {}), '(0, N, chunk_size)\n', (7802, 7820), True, 'import numpy as np\n'), ((7832, 7865), 'numpy.concatenate', 'np.concatenate', (['(starts[1:], [N])'], {}), '((starts[1:], [N]))\n', (7846, 7865), True, 'import numpy as np\n'), ((9980, 10017), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels', 'predictions'], {}), '(labels, predictions)\n', (9996, 10017), False, 'from sklearn.metrics import f1_score, roc_auc_score, confusion_matrix, average_precision_score, auc\n'), ((11055, 11087), 'numpy.zeros', 'np.zeros', (['(K,)'], {'dtype': 'np.float32'}), '((K,), dtype=np.float32)\n', (11063, 11087), True, 'import numpy as np\n'), ((12351, 12369), 'numpy.asarray', 'np.asarray', (['y_true'], {}), '(y_true)\n', (12361, 12369), True, 'import numpy as np\n'), ((12445, 12466), 'numpy.cumsum', 'np.cumsum', (['(1 - y_true)'], {}), '(1 - y_true)\n', (12454, 12466), True, 'import numpy as np\n'), ((15167, 15212), 'numpy.argmax', 'np.argmax', (["metrics_by_threshold['f1']"], {'axis': '(0)'}), "(metrics_by_threshold['f1'], axis=0)\n", (15176, 15212), True, 'import numpy as np\n'), ((15460, 15518), 'deepethogram.postprocessing.remove_low_thresholds', 'remove_low_thresholds', (['optimum_thresholds'], {'f1s': 'optimum_f1s'}), '(optimum_thresholds, f1s=optimum_f1s)\n', (15481, 15518), False, 'from deepethogram.postprocessing import remove_low_thresholds\n'), ((15590, 15645), 'numpy.argmax', 'np.argmax', (["metrics_by_threshold['informedness']"], {'axis': '(0)'}), "(metrics_by_threshold['informedness'], axis=0)\n", (15599, 15645), True, 'import numpy as np\n'), ((15851, 15915), 'deepethogram.postprocessing.remove_low_thresholds', 'remove_low_thresholds', (['optimum_thresholds_info'], {'f1s': 'optimum_info'}), '(optimum_thresholds_info, f1s=optimum_info)\n', (15872, 15915), False, 'from deepethogram.postprocessing import remove_low_thresholds\n'), ((20964, 20981), 'numpy.mean', 'np.mean', (['[tp, tn]'], {}), '([tp, tn])\n', (20971, 20981), True, 'import numpy as np\n'), ((22085, 22125), 'numpy.zeros', 'np.zeros', (['(N, n_classes)'], {'dtype': 'np.int64'}), '((N, n_classes), dtype=np.int64)\n', (22093, 22125), True, 'import numpy as np\n'), ((3355, 3385), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (3364, 3385), True, 'import numpy as np\n'), ((5815, 5886), 'numpy.zeros', 'np.zeros', (['(2, 2, predictions.shape[1], predictions.shape[2])'], {'dtype': 'int'}), '((2, 2, predictions.shape[1], predictions.shape[2]), dtype=int)\n', (5823, 5886), True, 'import numpy as np\n'), ((9658, 9671), 'numpy.stack', 'np.stack', (['cms'], {}), '(cms)\n', (9666, 9671), True, 'import numpy as np\n'), ((9673, 9695), 'numpy.stack', 'np.stack', (['cms_valid_bg'], {}), '(cms_valid_bg)\n', (9681, 9695), True, 'import numpy as np\n'), ((10766, 10792), 'numpy.any', 'np.any', (['is_invalid'], {'axis': '(1)'}), '(is_invalid, axis=1)\n', (10772, 10792), True, 'import numpy as np\n'), ((11141, 11162), 'sklearn.metrics.auc', 'auc', (['x[:, i]', 'y[:, i]'], {}), '(x[:, i], y[:, i])\n', (11144, 11162), False, 'from sklearn.metrics import f1_score, roc_auc_score, confusion_matrix, average_precision_score, auc\n'), ((12390, 12408), 'numpy.argsort', 'np.argsort', (['y_prob'], {}), '(y_prob)\n', (12400, 12408), True, 'import numpy as np\n'), ((12477, 12503), 'numpy.cumsum', 'np.cumsum', (['(y_true * nfalse)'], {}), '(y_true * nfalse)\n', (12486, 12503), True, 'import numpy as np\n'), ((14050, 14150), 'warnings.warn', 'warnings.warn', (['"""using multiprocessing on slurm can cause issues. setting num_workers to 1"""'], {}), "(\n 'using multiprocessing on slurm can cause issues. setting num_workers to 1'\n )\n", (14063, 14150), False, 'import warnings\n'), ((14305, 14332), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(1)', '(200)'], {}), '(0.0001, 1, 200)\n', (14316, 14332), True, 'import numpy as np\n'), ((16211, 16245), 'numpy.any', 'np.any', (['predictions[:, 1:]'], {'axis': '(1)'}), '(predictions[:, 1:], axis=1)\n', (16217, 16245), True, 'import numpy as np\n'), ((23420, 23437), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23431, 23437), False, 'from collections import defaultdict\n'), ((24854, 24871), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (24865, 24871), False, 'from collections import defaultdict\n'), ((25131, 25148), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (25142, 25148), False, 'from collections import defaultdict\n'), ((26258, 26280), 'os.path.isdir', 'os.path.isdir', (['run_dir'], {}), '(run_dir)\n', (26271, 26280), False, 'import os\n'), ((34854, 34892), 'numpy.logical_not', 'np.logical_not', (['rows_with_false_labels'], {}), '(rows_with_false_labels)\n', (34868, 34892), True, 'import numpy as np\n'), ((1450, 1476), 'numpy.arange', 'np.arange', (['onehot.shape[0]'], {}), '(onehot.shape[0])\n', (1459, 1476), True, 'import numpy as np\n'), ((2660, 2685), 'numpy.arange', 'np.arange', (['array.shape[0]'], {}), '(array.shape[0])\n', (2669, 2685), True, 'import numpy as np\n'), ((2687, 2719), 'numpy.argmax', 'np.argmax', (['probabilities'], {'axis': '(1)'}), '(probabilities, axis=1)\n', (2696, 2719), True, 'import numpy as np\n'), ((5312, 5328), 'numpy.zeros', 'np.zeros', (['(K, K)'], {}), '((K, K))\n', (5320, 5328), True, 'import numpy as np\n'), ((5941, 5990), 'numpy.zeros', 'np.zeros', (['(2, 2, predictions.shape[1])'], {'dtype': 'int'}), '((2, 2, predictions.shape[1]), dtype=int)\n', (5949, 5990), True, 'import numpy as np\n'), ((8362, 8414), 'numpy.zeros', 'np.zeros', (['(2, 2, probs_or_preds.shape[1])'], {'dtype': 'int'}), '((2, 2, probs_or_preds.shape[1]), dtype=int)\n', (8370, 8414), True, 'import numpy as np\n'), ((8709, 8726), 'multiprocessing.Pool', 'Pool', (['num_workers'], {}), '(num_workers)\n', (8713, 8726), False, 'from multiprocessing import Pool\n'), ((10113, 10149), 'numpy.eye', 'np.eye', (['cm.shape[0]'], {'dtype': 'np.uint32'}), '(cm.shape[0], dtype=np.uint32)\n', (10119, 10149), True, 'import numpy as np\n'), ((27268, 27289), 'numpy.mean', 'np.mean', (["data['loss']"], {}), "(data['loss'])\n", (27275, 27289), True, 'import numpy as np\n'), ((27706, 27725), 'numpy.mean', 'np.mean', (["data['lr']"], {}), "(data['lr'])\n", (27713, 27725), True, 'import numpy as np\n'), ((27804, 27830), 'os.path.isfile', 'os.path.isfile', (['self.fname'], {}), '(self.fname)\n', (27818, 27830), False, 'import os\n'), ((27853, 27880), 'h5py.File', 'h5py.File', (['self.fname', 'mode'], {}), '(self.fname, mode)\n', (27862, 27880), False, 'import h5py\n'), ((28406, 28433), 'h5py.File', 'h5py.File', (['self.fname', '"""r+"""'], {}), "(self.fname, 'r+')\n", (28415, 28433), False, 'import h5py\n'), ((30780, 30806), 'h5py.File', 'h5py.File', (['self.fname', '"""r"""'], {}), "(self.fname, 'r')\n", (30789, 30806), False, 'import h5py\n'), ((31258, 31269), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (31267, 31269), False, 'import os\n'), ((34710, 34753), 'numpy.any', 'np.any', (['(labels == self.ignore_index)'], {'axis': '(1)'}), '(labels == self.ignore_index, axis=1)\n', (34716, 34753), True, 'import numpy as np\n'), ((35582, 35606), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (35591, 35606), True, 'import numpy as np\n'), ((5520, 5565), 'numpy.logical_and', 'np.logical_and', (['(labels == i)', '(predictions == j)'], {}), '(labels == i, predictions == j)\n', (5534, 5565), True, 'import numpy as np\n'), ((6041, 6068), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'int'}), '((2, 2), dtype=int)\n', (6049, 6068), True, 'import numpy as np\n'), ((8471, 8498), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'int'}), '((2, 2), dtype=int)\n', (8479, 8498), True, 'import numpy as np\n'), ((27501, 27521), 'numpy.mean', 'np.mean', (["data['fps']"], {}), "(data['fps'])\n", (27508, 27521), True, 'import numpy as np\n'), ((34096, 34114), 'numpy.mean', 'np.mean', (['data[key]'], {}), '(data[key])\n', (34103, 34114), True, 'import numpy as np\n'), ((35055, 35080), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (35078, 35080), False, 'import warnings\n'), ((35098, 35129), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (35119, 35129), False, 'import warnings\n'), ((35625, 35650), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (35648, 35650), False, 'import warnings\n'), ((22293, 22325), 'numpy.any', 'np.any', (['estimates[:, 1:]'], {'axis': '(1)'}), '(estimates[:, 1:], axis=1)\n', (22299, 22325), True, 'import numpy as np\n'), ((22636, 22655), 'torch.stack', 'torch.stack', (['values'], {}), '(values)\n', (22647, 22655), False, 'import torch\n'), ((24204, 24225), 'numpy.concatenate', 'np.concatenate', (['value'], {}), '(value)\n', (24218, 24225), True, 'import numpy as np\n'), ((24437, 24455), 'torch.stack', 'torch.stack', (['value'], {}), '(value)\n', (24448, 24455), False, 'import torch\n'), ((27399, 27420), 'numpy.mean', 'np.mean', (["data['time']"], {}), "(data['time'])\n", (27406, 27420), True, 'import numpy as np\n'), ((28866, 28881), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (28874, 28881), True, 'import numpy as np\n'), ((24339, 24354), 'numpy.stack', 'np.stack', (['value'], {}), '(value)\n', (24347, 24354), True, 'import numpy as np\n'), ((35766, 35797), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (35787, 35797), False, 'import warnings\n'), ((22765, 22781), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (22773, 22781), True, 'import numpy as np\n'), ((22839, 22855), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (22847, 22855), True, 'import numpy as np\n'), ((36104, 36135), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (36125, 36135), False, 'import warnings\n')] |
# -*- coding: utf-8 -*-
# author: <NAME>
"""pyplt.
绘图函数接口
# ## matplotlib
# matplotlib 提供了较为完整的matlab式绘图API,这种绘图代码简洁;
#
# 一般语法为plt.func
#
# 对于复杂绘图的支持,matplotlib 可以用面向对象的API接口实现
#
# 通过图层一步步搭建图形 figure->axes->axis,对于子图的设置为axes.set_prop
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
import mpl_toolkits.mplot3d as plt3
# 全局布局参数设置
plt.rcParams.update({
'font.size': 18,
'font.family': 'Serif',
'text.usetex': False
})
# 中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 格式设置
# 科学计数
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1, 2))
# x和y轴的距离和坐标轴上的数字
plt.rcParams['xtick.major.pad'] = 5
plt.rcParams['ytick.major.pad'] = 5
x1 = np.arange(0, np.pi, 0.01 * np.pi)
y1 = x1**np.sin(x1)
fig, ax = plt.subplots(figsize=(9, 6), num=1)
ax.plot(x1, y1, 'r')
plt.title("$y = x^{sin(x)}$", fontsize=21)
plt.legend(labels=["$x^{sin(x)}$"], loc="best", fontsize=21)
plt.xlim(0, np.pi + 0.1)
plt.ylim(0.3, 2.1)
xtick = ["0", "1/4π", "1/2π", "3/4π", "π"]
plt.xticks(ticks=np.arange(0, np.pi + 0.1, 0.25 * np.pi),
labels=xtick,
fontsize=17)
plt.yticks(ticks=np.arange(0.5, 2.1, 0.5),
labels=["0.5", "1", "1.5", "2"],
fontsize=17)
plt.box(False)
plt.grid(True, color="c", which="major", axis="both")
x = np.linspace(0, np.pi, 101)
y = np.exp(-1 / (np.power(x, 2) + 1))
# plt.figure(num=1,figsize=(8,6),dpi=80)
fig, ax = plt.subplots(ncols=1, nrows=1, num=2, figsize=(8, 6), dpi=80)
fig.suptitle("figure", fontsize=21)
ax.plot(x, y, color="#000FFF", lw=1.5, ls="-", label=r"$e^{-\frac{1}{x^2+1}}$")
ax.legend(loc="best", fontsize=21)
ax.grid(b=True, which="both", color="c", ls="-.")
ax.grid(b=False)
plt.xlabel(xlabel="X", fontsize=17)
plt.ylabel(ylabel="Y", fontsize=17)
plt.xlim(0, np.pi)
plt.ylim(0.3, 1)
plt.xticks(np.linspace(0, np.pi, 5), ["0", "1/4π", "1/2π", "3/4π", "π"],
fontsize=17)
plt.yticks(np.linspace(0.3, 1, 8), fontsize=17)
# fig.add_axes((0.7,0.7,0.3,0.3))
plt.show(fig)
x = np.linspace(0, 2 * np.pi, 101)
y = np.sin(x)
plt.figure(figsize=(8, 6), num=3)
plt.plot(x, y, color="r", ls="--", lw=1.5, marker="o")
plt.axis([0, 2 * np.pi, -1, 1])
plt.xlabel("x 轴", fontsize=17)
plt.ylabel("y 轴", fontsize=17)
plt.title("正弦函数", fontsize=21)
plt.grid(True, color="c", ls="-.")
plt.annotate(":我是(π,0)",
xy=(np.pi, 0),
xytext=(3 / 2 * np.pi, 0.5),
fontsize=13,
arrowprops=dict(facecolor="b", shrink=0.1))
plt.show()
θ = np.linspace(0, 2 * np.pi, 361)
ρ = np.sin(θ)**2 + np.cos(θ)**2
fig, ax = plt.subplots(num=4)
plt.polar(θ, ρ)
plt.show()
x = np.linspace(0, 10 * np.pi, 370)
y = x**2
fig = plt.figure(num=5, figsize=(8, 6), dpi=80)
fig.suptitle(t="OOP", x=0.5, y=0.9, fontsize=21)
ax1 = fig.add_axes([0.1, 0.1, 0.6, 0.6]) # 左侧间距,底部间距,宽度,高度([0,1]),为fig图框的比例值
ax1.plot(x,
y,
color='r',
ls='-',
marker='.',
markerfacecolor='blue',
markersize=0.5,
markeredgewidth=2,
markeredgecolor='black',
label="$x^2$")
ax1.set_xscale('log')
ax1.yaxis.set_major_formatter(formatter)
ax1.set_xlabel("x")
ax1.set_ylabel("Y")
# 坐标轴标签和坐标轴数字的距离
ax1.xaxis.labelpad = -5
ax1.yaxis.labelpad = -5
ax1.set_title("$x^2$", fontsize=17)
ax1.grid(b=True, color='c', ls='--', alpha=1, lw=1.5)
ax1.legend(loc=0, fontsize=17)
ax2 = fig.add_axes([0.7, 0.7, 0.2, 0.2])
ax2.plot(x, np.sin(x))
ax2.set_xlabel("x")
ax2.set_ylabel("Y")
ax2.set_title("$sin(x)$")
ax2.grid(b=True, color='c')
# fig.tight_layout()
# fig.show() # 可用于绘图后端GUI
# plt.show(fig)
# fig.savefig("OOP.png")
# plt.savefig("OOP.tiff")
# 坐标轴独立设置
x = np.linspace(0, 2 * np.pi, 101)
y = np.exp(-x**2)
fig, ax = plt.subplots(num=6)
ax.spines['bottom'].set_color('blue')
ax.spines['left'].set_color('blue')
ax.spines['left'].set_linewidth(2)
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
# 多轴绘图
x = np.linspace(0, 2 * np.pi, 101)
y = 1 / (np.log(np.sqrt(x) + 1) + 1) * np.sin(x)
fig, ax1 = plt.subplots(num=7)
ax1.plot(x, y)
ax1.set_xlim([0, 2 * np.pi])
ax1.set_ylim(ymin=-1 / 2, ymax=1)
ax1.set_ylabel(r"$\frac{sin(x)}{ln(\sqrt{x}+1)+1)}$", fontsize=17)
ax2 = ax1.twinx()
ax2.plot(
x,
y * np.sin(x),
color='r',
)
ax2.set_ylabel(r"$\frac{sin^2(x)}{ln(\sqrt{x}+1)+1)}$", fontsize=17)
# ax3 = ax2.twinx()
x = np.linspace(0, 2 * np.pi, 101)
y = np.cos(x)
fig, ax = plt.subplots(num=8)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# ax.xaxis.set_ticks_position('bottom')
# ax.spines['bottom'].set_position(('axes',0.5))
ax.spines['bottom'].set_position(('data', -1))
# ax.spines['left'].set_position(('axes',0.5))
ax.spines['left'].set_position(('data', 0))
ax.plot(x, y)
# fig.show()
# ## **kargs 调用
# 通过关键字参数的解析调用,可以将绘图参数集中到字典中,统一处理,可以用于GUI编程中的可选参数调节
x = np.linspace(0, 2 * np.pi, 361)
y = np.sin(x)
fig, ax = plt.subplots(num=9)
kargs = {'color': '#BB00CC', 'linewidth': 1.5}
plt.plot(x, y, **kargs)
# ## 3D 绘图
fig = plt.figure(num=10)
ax3 = plt3.Axes3D(fig)
x = np.linspace(-np.pi, np.pi, 361)
y = np.linspace(-np.pi, np.pi, 361)
X, Y = np.meshgrid(x, y)
Z = np.exp(-(X**2 + Y**2)) + np.exp((X**2 + Y**2) / (2 * np.pi**2))
ax3.plot_surface(X, Y, Z, cmap='rainbow')
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.ticker.ScalarFormatter",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.ylim",
"numpy.meshgrid",
"... | [((369, 457), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18, 'font.family': 'Serif', 'text.usetex': False}"], {}), "({'font.size': 18, 'font.family': 'Serif', 'text.usetex':\n False})\n", (388, 457), True, 'import matplotlib.pyplot as plt\n'), ((590, 630), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {'useMathText': '(True)'}), '(useMathText=True)\n', (612, 630), False, 'from matplotlib import ticker\n'), ((793, 826), 'numpy.arange', 'np.arange', (['(0)', 'np.pi', '(0.01 * np.pi)'], {}), '(0, np.pi, 0.01 * np.pi)\n', (802, 826), True, 'import numpy as np\n'), ((857, 892), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 6)', 'num': '(1)'}), '(figsize=(9, 6), num=1)\n', (869, 892), True, 'import matplotlib.pyplot as plt\n'), ((914, 956), 'matplotlib.pyplot.title', 'plt.title', (['"""$y = x^{sin(x)}$"""'], {'fontsize': '(21)'}), "('$y = x^{sin(x)}$', fontsize=21)\n", (923, 956), True, 'import matplotlib.pyplot as plt\n'), ((957, 1017), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['$x^{sin(x)}$']", 'loc': '"""best"""', 'fontsize': '(21)'}), "(labels=['$x^{sin(x)}$'], loc='best', fontsize=21)\n", (967, 1017), True, 'import matplotlib.pyplot as plt\n'), ((1018, 1042), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(np.pi + 0.1)'], {}), '(0, np.pi + 0.1)\n', (1026, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1061), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.3)', '(2.1)'], {}), '(0.3, 2.1)\n', (1051, 1061), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1337), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (1330, 1337), True, 'import matplotlib.pyplot as plt\n'), ((1338, 1391), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'color': '"""c"""', 'which': '"""major"""', 'axis': '"""both"""'}), "(True, color='c', which='major', axis='both')\n", (1346, 1391), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1423), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(101)'], {}), '(0, np.pi, 101)\n', (1408, 1423), True, 'import numpy as np\n'), ((1513, 1574), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(1)', 'num': '(2)', 'figsize': '(8, 6)', 'dpi': '(80)'}), '(ncols=1, nrows=1, num=2, figsize=(8, 6), dpi=80)\n', (1525, 1574), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1828), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ([], {'xlabel': '"""X"""', 'fontsize': '(17)'}), "(xlabel='X', fontsize=17)\n", (1803, 1828), True, 'import matplotlib.pyplot as plt\n'), ((1829, 1864), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ([], {'ylabel': '"""Y"""', 'fontsize': '(17)'}), "(ylabel='Y', fontsize=17)\n", (1839, 1864), True, 'import matplotlib.pyplot as plt\n'), ((1865, 1883), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'np.pi'], {}), '(0, np.pi)\n', (1873, 1883), True, 'import matplotlib.pyplot as plt\n'), ((1884, 1900), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.3)', '(1)'], {}), '(0.3, 1)\n', (1892, 1900), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2093), 'matplotlib.pyplot.show', 'plt.show', (['fig'], {}), '(fig)\n', (2088, 2093), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2129), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(101)'], {}), '(0, 2 * np.pi, 101)\n', (2110, 2129), True, 'import numpy as np\n'), ((2134, 2143), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2140, 2143), True, 'import numpy as np\n'), ((2144, 2177), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)', 'num': '(3)'}), '(figsize=(8, 6), num=3)\n', (2154, 2177), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2232), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""r"""', 'ls': '"""--"""', 'lw': '(1.5)', 'marker': '"""o"""'}), "(x, y, color='r', ls='--', lw=1.5, marker='o')\n", (2186, 2232), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2264), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 2 * np.pi, -1, 1]'], {}), '([0, 2 * np.pi, -1, 1])\n', (2241, 2264), True, 'import matplotlib.pyplot as plt\n'), ((2265, 2295), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x 轴"""'], {'fontsize': '(17)'}), "('x 轴', fontsize=17)\n", (2275, 2295), True, 'import matplotlib.pyplot as plt\n'), ((2296, 2326), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y 轴"""'], {'fontsize': '(17)'}), "('y 轴', fontsize=17)\n", (2306, 2326), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2357), 'matplotlib.pyplot.title', 'plt.title', (['"""正弦函数"""'], {'fontsize': '(21)'}), "('正弦函数', fontsize=21)\n", (2336, 2357), True, 'import matplotlib.pyplot as plt\n'), ((2358, 2392), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'color': '"""c"""', 'ls': '"""-."""'}), "(True, color='c', ls='-.')\n", (2366, 2392), True, 'import matplotlib.pyplot as plt\n'), ((2571, 2581), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2579, 2581), True, 'import matplotlib.pyplot as plt\n'), ((2588, 2618), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(361)'], {}), '(0, 2 * np.pi, 361)\n', (2599, 2618), True, 'import numpy as np\n'), ((2660, 2679), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(4)'}), '(num=4)\n', (2672, 2679), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2695), 'matplotlib.pyplot.polar', 'plt.polar', (['θ', 'ρ'], {}), '(θ, ρ)\n', (2689, 2695), True, 'import matplotlib.pyplot as plt\n'), ((2696, 2706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2704, 2706), True, 'import matplotlib.pyplot as plt\n'), ((2712, 2743), 'numpy.linspace', 'np.linspace', (['(0)', '(10 * np.pi)', '(370)'], {}), '(0, 10 * np.pi, 370)\n', (2723, 2743), True, 'import numpy as np\n'), ((2759, 2800), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(5)', 'figsize': '(8, 6)', 'dpi': '(80)'}), '(num=5, figsize=(8, 6), dpi=80)\n', (2769, 2800), True, 'import matplotlib.pyplot as plt\n'), ((3735, 3765), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(101)'], {}), '(0, 2 * np.pi, 101)\n', (3746, 3765), True, 'import numpy as np\n'), ((3770, 3785), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (3776, 3785), True, 'import numpy as np\n'), ((3794, 3813), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(6)'}), '(num=6)\n', (3806, 3813), True, 'import matplotlib.pyplot as plt\n'), ((3993, 4023), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(101)'], {}), '(0, 2 * np.pi, 101)\n', (4004, 4023), True, 'import numpy as np\n'), ((4085, 4104), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(7)'}), '(num=7)\n', (4097, 4104), True, 'import matplotlib.pyplot as plt\n'), ((4415, 4445), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(101)'], {}), '(0, 2 * np.pi, 101)\n', (4426, 4445), True, 'import numpy as np\n'), ((4450, 4459), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (4456, 4459), True, 'import numpy as np\n'), ((4470, 4489), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(8)'}), '(num=8)\n', (4482, 4489), True, 'import matplotlib.pyplot as plt\n'), ((4891, 4921), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(361)'], {}), '(0, 2 * np.pi, 361)\n', (4902, 4921), True, 'import numpy as np\n'), ((4926, 4935), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (4932, 4935), True, 'import numpy as np\n'), ((4946, 4965), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(9)'}), '(num=9)\n', (4958, 4965), True, 'import matplotlib.pyplot as plt\n'), ((5013, 5036), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y, **kargs)\n', (5021, 5036), True, 'import matplotlib.pyplot as plt\n'), ((5056, 5074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(10)'}), '(num=10)\n', (5066, 5074), True, 'import matplotlib.pyplot as plt\n'), ((5081, 5097), 'mpl_toolkits.mplot3d.Axes3D', 'plt3.Axes3D', (['fig'], {}), '(fig)\n', (5092, 5097), True, 'import mpl_toolkits.mplot3d as plt3\n'), ((5103, 5134), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(361)'], {}), '(-np.pi, np.pi, 361)\n', (5114, 5134), True, 'import numpy as np\n'), ((5139, 5170), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(361)'], {}), '(-np.pi, np.pi, 361)\n', (5150, 5170), True, 'import numpy as np\n'), ((5178, 5195), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5189, 5195), True, 'import numpy as np\n'), ((836, 846), 'numpy.sin', 'np.sin', (['x1'], {}), '(x1)\n', (842, 846), True, 'import numpy as np\n'), ((1912, 1936), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(5)'], {}), '(0, np.pi, 5)\n', (1923, 1936), True, 'import numpy as np\n'), ((2009, 2031), 'numpy.linspace', 'np.linspace', (['(0.3)', '(1)', '(8)'], {}), '(0.3, 1, 8)\n', (2020, 2031), True, 'import numpy as np\n'), ((3497, 3506), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3503, 3506), True, 'import numpy as np\n'), ((4063, 4072), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (4069, 4072), True, 'import numpy as np\n'), ((5200, 5226), 'numpy.exp', 'np.exp', (['(-(X ** 2 + Y ** 2))'], {}), '(-(X ** 2 + Y ** 2))\n', (5206, 5226), True, 'import numpy as np\n'), ((5225, 5269), 'numpy.exp', 'np.exp', (['((X ** 2 + Y ** 2) / (2 * np.pi ** 2))'], {}), '((X ** 2 + Y ** 2) / (2 * np.pi ** 2))\n', (5231, 5269), True, 'import numpy as np\n'), ((1122, 1161), 'numpy.arange', 'np.arange', (['(0)', '(np.pi + 0.1)', '(0.25 * np.pi)'], {}), '(0, np.pi + 0.1, 0.25 * np.pi)\n', (1131, 1161), True, 'import numpy as np\n'), ((1229, 1253), 'numpy.arange', 'np.arange', (['(0.5)', '(2.1)', '(0.5)'], {}), '(0.5, 2.1, 0.5)\n', (1238, 1253), True, 'import numpy as np\n'), ((2623, 2632), 'numpy.sin', 'np.sin', (['θ'], {}), '(θ)\n', (2629, 2632), True, 'import numpy as np\n'), ((2639, 2648), 'numpy.cos', 'np.cos', (['θ'], {}), '(θ)\n', (2645, 2648), True, 'import numpy as np\n'), ((4293, 4302), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (4299, 4302), True, 'import numpy as np\n'), ((1441, 1455), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (1449, 1455), True, 'import numpy as np\n'), ((4040, 4050), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (4047, 4050), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# encoding: utf-8
import time
import numpy as np
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.callbacks import EarlyStopping
from keras.layers.core import Dense, Activation, Dropout
from utils import read_dataset, split_dataset
from nn_common import plot_result, store_model, load_model
from evaluation import mase
def compile_model(nneurons, loss_fn, dropout=(0.0, 0.0)):
model = Sequential()
model.add(LSTM(nneurons[0], input_dim=1, return_sequences=True))
if dropout[0] > 0:
model.add(Dropout(dropout[0]))
model.add(LSTM(nneurons[1], return_sequences=False))
if dropout[1] > 0:
model.add(Dropout(dropout[1]))
model.add(Dense(1))
model.add(Activation('linear'))
model.compile(optimizer='rmsprop', loss=loss_fn)
return model
def run_network(window, model=None, show_plot=False, save_model=False):
start_time = time.time()
print('loading and prepare data set...')
data = read_dataset('../datasets/internet-traffic-data-5minutes.csv')
X_train, y_train, X_test, y_test, mean, std = split_dataset(
data, window, ratio=0.90, standardize=True)
# reshape s.t. the data has the form (#examples, #values in sequences,
# dim. of each value in the sequence)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
if model is None:
print('initialize model...')
model = compile_model(
nneurons=(19, 5), dropout=(0.0, 0.0), loss_fn='mse')
print('model ', model.summary())
print('train model...')
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
model.fit(X_train, y_train, nb_epoch=100, validation_split=0.10,
callbacks=[early_stopping])
print('make predictions...')
prediction = model.predict(X_test).flatten()
if show_plot:
plot_result(prediction, y_test, mean, std)
print('mase = ', mase(y_train, y_test, prediction))
if save_model:
store_model(model)
print('totoal duration: {:.2f} seconds'.format(time.time() - start_time))
def hyper_parameter_search(max_evals=200):
from hyperopt import fmin, tpe, hp, STATUS_OK, STATUS_FAIL
data = read_dataset('../datasets/internet-traffic-data-5minutes.csv')
space = {
'nneurons1': hp.randint('nneurons1', 15),
'nneurons2': hp.randint('nneurons2', 15),
'window': hp.randint('window', 15)
}
def objective(params):
nneurons1 = params['nneurons1']
nneurons2 = params['nneurons2']
window = params['window']
if nneurons1 < 1 or nneurons2 < 1 or window < 1:
return {'status': STATUS_FAIL}
X_train, y_train, *_ = split_dataset(
data, [1] * window, ratio=0.90, standardize=True)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
model = compile_model(
(nneurons1, nneurons2), loss_fn='mse', dropout=(0.0, 0.0))
hist = model.fit(
X_train, y_train, nb_epoch=100, validation_split=0.10,
callbacks=[EarlyStopping(monitor='val_loss', patience=2)],
verbose=0)
return {'loss': hist.history['val_loss'][-1], 'status': STATUS_OK}
return fmin(objective, space=space, algo=tpe.suggest, max_evals=max_evals)
if __name__ == '__main__':
print('run hyper param search')
print(hyper_parameter_search(100))
| [
"utils.split_dataset",
"hyperopt.fmin",
"nn_common.store_model",
"numpy.reshape",
"keras.layers.core.Activation",
"nn_common.plot_result",
"hyperopt.hp.randint",
"keras.models.Sequential",
"keras.layers.core.Dense",
"utils.read_dataset",
"keras.callbacks.EarlyStopping",
"keras.layers.core.Drop... | [((453, 465), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (463, 465), False, 'from keras.models import Sequential\n'), ((937, 948), 'time.time', 'time.time', ([], {}), '()\n', (946, 948), False, 'import time\n'), ((1006, 1068), 'utils.read_dataset', 'read_dataset', (['"""../datasets/internet-traffic-data-5minutes.csv"""'], {}), "('../datasets/internet-traffic-data-5minutes.csv')\n", (1018, 1068), False, 'from utils import read_dataset, split_dataset\n'), ((1119, 1175), 'utils.split_dataset', 'split_dataset', (['data', 'window'], {'ratio': '(0.9)', 'standardize': '(True)'}), '(data, window, ratio=0.9, standardize=True)\n', (1132, 1175), False, 'from utils import read_dataset, split_dataset\n'), ((1318, 1378), 'numpy.reshape', 'np.reshape', (['X_train', '(X_train.shape[0], X_train.shape[1], 1)'], {}), '(X_train, (X_train.shape[0], X_train.shape[1], 1))\n', (1328, 1378), True, 'import numpy as np\n'), ((1392, 1449), 'numpy.reshape', 'np.reshape', (['X_test', '(X_test.shape[0], X_test.shape[1], 1)'], {}), '(X_test, (X_test.shape[0], X_test.shape[1], 1))\n', (1402, 1449), True, 'import numpy as np\n'), ((2328, 2390), 'utils.read_dataset', 'read_dataset', (['"""../datasets/internet-traffic-data-5minutes.csv"""'], {}), "('../datasets/internet-traffic-data-5minutes.csv')\n", (2340, 2390), False, 'from utils import read_dataset, split_dataset\n'), ((3362, 3429), 'hyperopt.fmin', 'fmin', (['objective'], {'space': 'space', 'algo': 'tpe.suggest', 'max_evals': 'max_evals'}), '(objective, space=space, algo=tpe.suggest, max_evals=max_evals)\n', (3366, 3429), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, STATUS_FAIL\n'), ((480, 533), 'keras.layers.recurrent.LSTM', 'LSTM', (['nneurons[0]'], {'input_dim': '(1)', 'return_sequences': '(True)'}), '(nneurons[0], input_dim=1, return_sequences=True)\n', (484, 533), False, 'from keras.layers.recurrent import LSTM\n'), ((611, 652), 'keras.layers.recurrent.LSTM', 'LSTM', (['nneurons[1]'], {'return_sequences': '(False)'}), '(nneurons[1], return_sequences=False)\n', (615, 652), False, 'from keras.layers.recurrent import LSTM\n'), ((730, 738), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (735, 738), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((754, 774), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (764, 774), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1704, 1749), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(2)'}), "(monitor='val_loss', patience=2)\n", (1717, 1749), False, 'from keras.callbacks import EarlyStopping\n'), ((1979, 2021), 'nn_common.plot_result', 'plot_result', (['prediction', 'y_test', 'mean', 'std'], {}), '(prediction, y_test, mean, std)\n', (1990, 2021), False, 'from nn_common import plot_result, store_model, load_model\n'), ((2110, 2128), 'nn_common.store_model', 'store_model', (['model'], {}), '(model)\n', (2121, 2128), False, 'from nn_common import plot_result, store_model, load_model\n'), ((2426, 2453), 'hyperopt.hp.randint', 'hp.randint', (['"""nneurons1"""', '(15)'], {}), "('nneurons1', 15)\n", (2436, 2453), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, STATUS_FAIL\n'), ((2476, 2503), 'hyperopt.hp.randint', 'hp.randint', (['"""nneurons2"""', '(15)'], {}), "('nneurons2', 15)\n", (2486, 2503), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, STATUS_FAIL\n'), ((2523, 2547), 'hyperopt.hp.randint', 'hp.randint', (['"""window"""', '(15)'], {}), "('window', 15)\n", (2533, 2547), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, STATUS_FAIL\n'), ((2829, 2891), 'utils.split_dataset', 'split_dataset', (['data', '([1] * window)'], {'ratio': '(0.9)', 'standardize': '(True)'}), '(data, [1] * window, ratio=0.9, standardize=True)\n', (2842, 2891), False, 'from utils import read_dataset, split_dataset\n'), ((2924, 2984), 'numpy.reshape', 'np.reshape', (['X_train', '(X_train.shape[0], X_train.shape[1], 1)'], {}), '(X_train, (X_train.shape[0], X_train.shape[1], 1))\n', (2934, 2984), True, 'import numpy as np\n'), ((576, 595), 'keras.layers.core.Dropout', 'Dropout', (['dropout[0]'], {}), '(dropout[0])\n', (583, 595), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((695, 714), 'keras.layers.core.Dropout', 'Dropout', (['dropout[1]'], {}), '(dropout[1])\n', (702, 714), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2047, 2080), 'evaluation.mase', 'mase', (['y_train', 'y_test', 'prediction'], {}), '(y_train, y_test, prediction)\n', (2051, 2080), False, 'from evaluation import mase\n'), ((2181, 2192), 'time.time', 'time.time', ([], {}), '()\n', (2190, 2192), False, 'import time\n'), ((3203, 3248), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(2)'}), "(monitor='val_loss', patience=2)\n", (3216, 3248), False, 'from keras.callbacks import EarlyStopping\n')] |
# import the necessary packages
import cv2
import joblib
import numpy as np
import tkinter as tk
import time
from core import extract_signature
from PIL import Image, ImageTk
from tkinter import filedialog
def resize(image, size):
w, h = image.size
if w == 0 or h == 0:
return Image.fromarray(np.ones(size) * 255.0)
_w, _h = size
if w > h:
h = int(h * float(_w) / w)
w = int(_w)
else:
w = int(w * float(_h) / h)
h = int(_h)
image = image.resize((w, h), Image.ANTIALIAS)
max_w, max_h = size
img_w, img_h = image.size
img = np.array(image)
canvas = np.ones(shape=(max_h, max_w, 3), dtype=img.dtype) * 255
x = int((max_w - img_w) / 2)
y = int((max_h - img_h) / 2)
canvas[y:y + img_h, x:x + img_w, :] = img[0:img_h, 0:img_w, :]
return Image.fromarray(canvas)
def detect_signature():
global app
if len(app.current_file) > 0:
clf = app.model
app.status("Extracting signature...")
start_time = time.time()
im = cv2.imread(app.current_file, 0)
mask = extract_signature(im, clf, preprocess=True)
im = cv2.imread(app.current_file)
im[np.where(mask==255)] = (0, 0, 255)
# Draw bounding box on image
points = np.argwhere(mask==255) # find where the black pixels are
points = np.fliplr(points) # store them in x,y coordinates instead of row,col indices
x, y, w, h = cv2.boundingRect(points) # create a rectangle around those points
cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
app.show(im, app.input_view)
app.status("Done in %.2fs." % (time.time() - start_time))
def open_image():
global app
# open a file chooser dialog and allow the user to select an input image
current_file = filedialog.askopenfilename()
# ensure a file path was selected
if len(current_file) > 0:
app.status("Opening " + current_file.split("/")[-1] + "...")
app.current_file = current_file
# Open and display selected image
src = cv2.imread(app.current_file)
src = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
app.show(src, app.input_view)
app.status("Step 2: Detect Signature")
class SignatureExtractor:
def __init__(self):
self.__root = tk.Tk()
self.__root.configure(background="white")
self.__root.title("Signature Extractor")
self.__root.resizable(width=False, height=False)
self.__root.geometry('{}x{}'.format(960, 720))
tk.Grid.rowconfigure(self.__root, 0, weight=1)
tk.Grid.columnconfigure(self.__root, 0, weight=1)
self.__center()
# Add a grid
mainframe = tk.Frame(self.__root)
mainframe.grid(rowspan=12, columnspan=4, sticky=(tk.N, tk.W, tk.E, tk.S))
tk.Grid.rowconfigure(mainframe, 0, weight=1)
tk.Grid.columnconfigure(mainframe, 0, weight=1)
# Create a Tkinter variable
self.model = joblib.load("models/decision-tree.pkl")
tk.Button(mainframe, text="Open an Image", command=open_image).grid(row=0, column=0, sticky=tk.E)
tk.Button(mainframe, text="Detect Signature", command=detect_signature).grid(row=0, column=1, sticky=tk.E)
# Create canvas where source image will be displayed
self.input_view = tk.Label(mainframe)
self.input_view.grid(row=1, column=0, columnspan=2)
self.show(np.ones((100, 100))*255, self.input_view)
self.__status = tk.Label(mainframe, text="Step 1: Open an Image")
self.__status.grid(row=2, column=0, sticky=tk.W)
self.current_file = ""
def __center(self):
self.__root.update_idletasks()
w = self.__root.winfo_screenwidth()
h = self.__root.winfo_screenheight()
size = tuple(int(_) for _ in self.__root.geometry().split('+')[0].split('x'))
x = w / 2 - size[0] / 2
y = h / 2 - size[1] / 2
self.__root.geometry("%dx%d+%d+%d" % (size + (x, y)))
def show(self, im, target):
try:
im = Image.fromarray(im).convert("RGB")
im = resize(im, (960, 640))
except Exception as ex:
im = Image.fromarray(np.ones((960, 640)) * 255.0)
im = ImageTk.PhotoImage(im)
target.configure(image=im)
target.image = im
def status(self, text):
self.__status['text'] = text
def start(self):
self.__root.mainloop()
if __name__ == '__main__':
app = SignatureExtractor()
app.start()
| [
"cv2.rectangle",
"core.extract_signature",
"tkinter.Button",
"numpy.array",
"tkinter.Label",
"tkinter.Frame",
"tkinter.Grid.rowconfigure",
"numpy.where",
"joblib.load",
"PIL.ImageTk.PhotoImage",
"tkinter.filedialog.askopenfilename",
"numpy.ones",
"numpy.fliplr",
"cv2.cvtColor",
"time.tim... | [((604, 619), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (612, 619), True, 'import numpy as np\n'), ((834, 857), 'PIL.Image.fromarray', 'Image.fromarray', (['canvas'], {}), '(canvas)\n', (849, 857), False, 'from PIL import Image, ImageTk\n'), ((1872, 1900), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (1898, 1900), False, 'from tkinter import filedialog\n'), ((633, 682), 'numpy.ones', 'np.ones', ([], {'shape': '(max_h, max_w, 3)', 'dtype': 'img.dtype'}), '(shape=(max_h, max_w, 3), dtype=img.dtype)\n', (640, 682), True, 'import numpy as np\n'), ((1026, 1037), 'time.time', 'time.time', ([], {}), '()\n', (1035, 1037), False, 'import time\n'), ((1052, 1083), 'cv2.imread', 'cv2.imread', (['app.current_file', '(0)'], {}), '(app.current_file, 0)\n', (1062, 1083), False, 'import cv2\n'), ((1099, 1142), 'core.extract_signature', 'extract_signature', (['im', 'clf'], {'preprocess': '(True)'}), '(im, clf, preprocess=True)\n', (1116, 1142), False, 'from core import extract_signature\n'), ((1157, 1185), 'cv2.imread', 'cv2.imread', (['app.current_file'], {}), '(app.current_file)\n', (1167, 1185), False, 'import cv2\n'), ((1287, 1311), 'numpy.argwhere', 'np.argwhere', (['(mask == 255)'], {}), '(mask == 255)\n', (1298, 1311), True, 'import numpy as np\n'), ((1362, 1379), 'numpy.fliplr', 'np.fliplr', (['points'], {}), '(points)\n', (1371, 1379), True, 'import numpy as np\n'), ((1466, 1490), 'cv2.boundingRect', 'cv2.boundingRect', (['points'], {}), '(points)\n', (1482, 1490), False, 'import cv2\n'), ((1541, 1598), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(im, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (1554, 1598), False, 'import cv2\n'), ((1601, 1636), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (1613, 1636), False, 'import cv2\n'), ((2136, 2164), 'cv2.imread', 'cv2.imread', (['app.current_file'], {}), '(app.current_file)\n', (2146, 2164), False, 'import cv2\n'), ((2179, 2215), 'cv2.cvtColor', 'cv2.cvtColor', (['src', 'cv2.COLOR_BGR2RGB'], {}), '(src, cv2.COLOR_BGR2RGB)\n', (2191, 2215), False, 'import cv2\n'), ((2375, 2382), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (2380, 2382), True, 'import tkinter as tk\n'), ((2602, 2648), 'tkinter.Grid.rowconfigure', 'tk.Grid.rowconfigure', (['self.__root', '(0)'], {'weight': '(1)'}), '(self.__root, 0, weight=1)\n', (2622, 2648), True, 'import tkinter as tk\n'), ((2657, 2706), 'tkinter.Grid.columnconfigure', 'tk.Grid.columnconfigure', (['self.__root', '(0)'], {'weight': '(1)'}), '(self.__root, 0, weight=1)\n', (2680, 2706), True, 'import tkinter as tk\n'), ((2773, 2794), 'tkinter.Frame', 'tk.Frame', (['self.__root'], {}), '(self.__root)\n', (2781, 2794), True, 'import tkinter as tk\n'), ((2885, 2929), 'tkinter.Grid.rowconfigure', 'tk.Grid.rowconfigure', (['mainframe', '(0)'], {'weight': '(1)'}), '(mainframe, 0, weight=1)\n', (2905, 2929), True, 'import tkinter as tk\n'), ((2938, 2985), 'tkinter.Grid.columnconfigure', 'tk.Grid.columnconfigure', (['mainframe', '(0)'], {'weight': '(1)'}), '(mainframe, 0, weight=1)\n', (2961, 2985), True, 'import tkinter as tk\n'), ((3044, 3083), 'joblib.load', 'joblib.load', (['"""models/decision-tree.pkl"""'], {}), "('models/decision-tree.pkl')\n", (3055, 3083), False, 'import joblib\n'), ((3394, 3413), 'tkinter.Label', 'tk.Label', (['mainframe'], {}), '(mainframe)\n', (3402, 3413), True, 'import tkinter as tk\n'), ((3559, 3608), 'tkinter.Label', 'tk.Label', (['mainframe'], {'text': '"""Step 1: Open an Image"""'}), "(mainframe, text='Step 1: Open an Image')\n", (3567, 3608), True, 'import tkinter as tk\n'), ((4309, 4331), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['im'], {}), '(im)\n', (4327, 4331), False, 'from PIL import Image, ImageTk\n'), ((1197, 1218), 'numpy.where', 'np.where', (['(mask == 255)'], {}), '(mask == 255)\n', (1205, 1218), True, 'import numpy as np\n'), ((312, 325), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (319, 325), True, 'import numpy as np\n'), ((3093, 3155), 'tkinter.Button', 'tk.Button', (['mainframe'], {'text': '"""Open an Image"""', 'command': 'open_image'}), "(mainframe, text='Open an Image', command=open_image)\n", (3102, 3155), True, 'import tkinter as tk\n'), ((3199, 3270), 'tkinter.Button', 'tk.Button', (['mainframe'], {'text': '"""Detect Signature"""', 'command': 'detect_signature'}), "(mainframe, text='Detect Signature', command=detect_signature)\n", (3208, 3270), True, 'import tkinter as tk\n'), ((3492, 3511), 'numpy.ones', 'np.ones', (['(100, 100)'], {}), '((100, 100))\n', (3499, 3511), True, 'import numpy as np\n'), ((1713, 1724), 'time.time', 'time.time', ([], {}), '()\n', (1722, 1724), False, 'import time\n'), ((4126, 4145), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (4141, 4145), False, 'from PIL import Image, ImageTk\n'), ((4266, 4285), 'numpy.ones', 'np.ones', (['(960, 640)'], {}), '((960, 640))\n', (4273, 4285), True, 'import numpy as np\n')] |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.parameterize(
{'in_size': 10, 'out_size': 10},
{'in_size': 10, 'out_size': 40},
)
class TestLSTM(unittest.TestCase):
def setUp(self):
self.link = links.LSTM(self.in_size, self.out_size)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
self.link.zerograds()
self.upward = upward.copy() # fixed on CPU
self.lateral = lateral.copy() # fixed on CPU
x_shape = (4, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
def check_forward(self, x_data):
xp = self.link.xp
x = chainer.Variable(x_data)
h1 = self.link(x)
c0 = chainer.Variable(xp.zeros((len(self.x), self.out_size),
dtype=self.x.dtype))
c1_expect, h1_expect = functions.lstm(c0, self.link.upward(x))
gradient_check.assert_allclose(h1.data, h1_expect.data)
gradient_check.assert_allclose(self.link.h.data, h1_expect.data)
gradient_check.assert_allclose(self.link.c.data, c1_expect.data)
h2 = self.link(x)
c2_expect, h2_expect = \
functions.lstm(c1_expect,
self.link.upward(x) + self.link.lateral(h1))
gradient_check.assert_allclose(h2.data, h2_expect.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
| [
"chainer.testing.parameterize",
"chainer.Variable",
"chainer.gradient_check.assert_allclose",
"chainer.links.LSTM",
"numpy.random.uniform",
"chainer.cuda.to_gpu"
] | [((226, 316), 'chainer.testing.parameterize', 'testing.parameterize', (["{'in_size': 10, 'out_size': 10}", "{'in_size': 10, 'out_size': 40}"], {}), "({'in_size': 10, 'out_size': 10}, {'in_size': 10,\n 'out_size': 40})\n", (246, 316), False, 'from chainer import testing\n'), ((401, 440), 'chainer.links.LSTM', 'links.LSTM', (['self.in_size', 'self.out_size'], {}), '(self.in_size, self.out_size)\n', (411, 440), False, 'from chainer import links\n'), ((504, 545), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'upward.shape'], {}), '(-1, 1, upward.shape)\n', (524, 545), False, 'import numpy\n'), ((612, 654), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'lateral.shape'], {}), '(-1, 1, lateral.shape)\n', (632, 654), False, 'import numpy\n'), ((981, 1005), 'chainer.Variable', 'chainer.Variable', (['x_data'], {}), '(x_data)\n', (997, 1005), False, 'import chainer\n'), ((1240, 1295), 'chainer.gradient_check.assert_allclose', 'gradient_check.assert_allclose', (['h1.data', 'h1_expect.data'], {}), '(h1.data, h1_expect.data)\n', (1270, 1295), False, 'from chainer import gradient_check\n'), ((1304, 1368), 'chainer.gradient_check.assert_allclose', 'gradient_check.assert_allclose', (['self.link.h.data', 'h1_expect.data'], {}), '(self.link.h.data, h1_expect.data)\n', (1334, 1368), False, 'from chainer import gradient_check\n'), ((1377, 1441), 'chainer.gradient_check.assert_allclose', 'gradient_check.assert_allclose', (['self.link.c.data', 'c1_expect.data'], {}), '(self.link.c.data, c1_expect.data)\n', (1407, 1441), False, 'from chainer import gradient_check\n'), ((1620, 1675), 'chainer.gradient_check.assert_allclose', 'gradient_check.assert_allclose', (['h2.data', 'h2_expect.data'], {}), '(h2.data, h2_expect.data)\n', (1650, 1675), False, 'from chainer import gradient_check\n'), ((1845, 1864), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1856, 1864), False, 'from chainer import cuda\n'), ((846, 882), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'x_shape'], {}), '(-1, 1, x_shape)\n', (866, 882), False, 'import numpy\n')] |
import cv2
import numpy as np
from generic_dataset.dataset_folder_manager import DatasetFolderManager
from generic_dataset.utilities.color import Color
from generic_dataset.dataset_manager import DatasetManager
from gibson_env_utilities.doors_dataset.door_sample import DoorSample
dataset_path ='/home/michele/myfiles/doors_dataset'
# Create the DatasetFolderManager instance and read sample
folder_manager = DatasetFolderManager(dataset_path=dataset_path, folder_name='house1', sample_class=DoorSample)
# Load a sample (positive, label = 1)
sample: DoorSample = folder_manager.load_sample_using_relative_count(label=1, relative_count=0, use_thread=False)
sample.set_pretty_semantic_image(sample.get_semantic_image().copy())
sample.pipeline_depth_data_to_image().run(use_gpu=False).get_data()
sample.create_pretty_semantic_image(color=Color(red=0, green=255, blue=0))
display_image_0 = np.concatenate((sample.get_bgr_image(), cv2.cvtColor(sample.get_depth_image(), cv2.COLOR_GRAY2BGR)), axis=1)
display_image_1 = np.concatenate((sample.get_semantic_image(), sample.get_pretty_semantic_image()), axis=1)
cv2.imshow('sample', np.concatenate((display_image_0, display_image_1), axis=0))
cv2.waitKey()
# Create DatasetManager instance and display dataset information
dataset = DatasetManager(dataset_path=dataset_path, sample_class=DoorSample)
# Save the folders' metadata to disk
dataset.save_metadata()
print('The total amount of examples are')
for label, count in dataset.get_sample_count().items():
print(' - {0} -> {1} samples'.format(label, count)) | [
"generic_dataset.dataset_folder_manager.DatasetFolderManager",
"generic_dataset.utilities.color.Color",
"generic_dataset.dataset_manager.DatasetManager",
"numpy.concatenate",
"cv2.waitKey"
] | [((413, 511), 'generic_dataset.dataset_folder_manager.DatasetFolderManager', 'DatasetFolderManager', ([], {'dataset_path': 'dataset_path', 'folder_name': '"""house1"""', 'sample_class': 'DoorSample'}), "(dataset_path=dataset_path, folder_name='house1',\n sample_class=DoorSample)\n", (433, 511), False, 'from generic_dataset.dataset_folder_manager import DatasetFolderManager\n'), ((1191, 1204), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1202, 1204), False, 'import cv2\n'), ((1281, 1347), 'generic_dataset.dataset_manager.DatasetManager', 'DatasetManager', ([], {'dataset_path': 'dataset_path', 'sample_class': 'DoorSample'}), '(dataset_path=dataset_path, sample_class=DoorSample)\n', (1295, 1347), False, 'from generic_dataset.dataset_manager import DatasetManager\n'), ((1131, 1189), 'numpy.concatenate', 'np.concatenate', (['(display_image_0, display_image_1)'], {'axis': '(0)'}), '((display_image_0, display_image_1), axis=0)\n', (1145, 1189), True, 'import numpy as np\n'), ((840, 871), 'generic_dataset.utilities.color.Color', 'Color', ([], {'red': '(0)', 'green': '(255)', 'blue': '(0)'}), '(red=0, green=255, blue=0)\n', (845, 871), False, 'from generic_dataset.utilities.color import Color\n')] |
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
# Read in the image
image = mpimg.imread('./images/waymo_car.jpg')
# Print out the image dimensions
print('Image dimensions:', image.shape)
# Change from color to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
plt.imshow(gray_image, cmap='gray')
# Print specific grayscale pixel values
# What is the pixel value at x = 400 and y = 300 (on the body of the car)?
x = 400
y = 300
print(gray_image[y,x])
#Find the maximum and minimum grayscale values in this image
max_val = np.amax(gray_image)
min_val = np.amin(gray_image)
print('Max: ', max_val)
print('Min: ', min_val)
# Create a 5x5 image using just grayscale, numerical values
tiny_image = np.array([[0, 20, 30, 150, 120],
[200, 200, 250, 70, 3],
[50, 180, 85, 40, 90],
[240, 100, 50, 255, 10],
[30, 0, 75, 190, 220]])
# To show the pixel grid, use matshow
plt.matshow(tiny_image, cmap='gray')
plt.show() | [
"matplotlib.pyplot.imshow",
"numpy.amin",
"matplotlib.image.imread",
"numpy.array",
"cv2.cvtColor",
"matplotlib.pyplot.matshow",
"numpy.amax",
"matplotlib.pyplot.show"
] | [((124, 162), 'matplotlib.image.imread', 'mpimg.imread', (['"""./images/waymo_car.jpg"""'], {}), "('./images/waymo_car.jpg')\n", (136, 162), True, 'import matplotlib.image as mpimg\n'), ((283, 322), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (295, 322), False, 'import cv2\n'), ((323, 358), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gray_image'], {'cmap': '"""gray"""'}), "(gray_image, cmap='gray')\n", (333, 358), True, 'import matplotlib.pyplot as plt\n'), ((587, 606), 'numpy.amax', 'np.amax', (['gray_image'], {}), '(gray_image)\n', (594, 606), True, 'import numpy as np\n'), ((617, 636), 'numpy.amin', 'np.amin', (['gray_image'], {}), '(gray_image)\n', (624, 636), True, 'import numpy as np\n'), ((760, 893), 'numpy.array', 'np.array', (['[[0, 20, 30, 150, 120], [200, 200, 250, 70, 3], [50, 180, 85, 40, 90], [240,\n 100, 50, 255, 10], [30, 0, 75, 190, 220]]'], {}), '([[0, 20, 30, 150, 120], [200, 200, 250, 70, 3], [50, 180, 85, 40, \n 90], [240, 100, 50, 255, 10], [30, 0, 75, 190, 220]])\n', (768, 893), True, 'import numpy as np\n'), ((1016, 1052), 'matplotlib.pyplot.matshow', 'plt.matshow', (['tiny_image'], {'cmap': '"""gray"""'}), "(tiny_image, cmap='gray')\n", (1027, 1052), True, 'import matplotlib.pyplot as plt\n'), ((1053, 1063), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1061, 1063), True, 'import matplotlib.pyplot as plt\n')] |
# pylint: disable=C,R,E1101
import torch
import numpy as np
class NormActivation(torch.nn.Module):
def __init__(self, dimensionalities, tensor_act=None, scalar_act=None, eps=1e-6, bias_min=.5, bias_max=2):
'''
:param dimensionalities: list of dimensionalities of the capsules
:param scalar_act: activation function applied to scalar capsules - in last layer often set to None
:param eps: regularazier added to norm to prevent division by zero
:param bias_min: lower cutoff of uniform bias initialization
:param bias_max: upper cutoff of uniform bias initialization
scalar capsules are acted on by a ReLU nonlinearity, higher order capsules with a nonlinearity acting on their norm
'''
super().__init__()
self.dimensionalities = dimensionalities
self.tensor_act = torch.nn.Softplus(beta=1, threshold=20) if not tensor_act else tensor_act
self.scalar_act = scalar_act
self.is_scalar = [dim == 1 for dim in dimensionalities]
nbias = int(np.sum(np.array(dimensionalities) != 1))
self.bias = torch.nn.Parameter(torch.Tensor(nbias)) if nbias > 0 else None
self.eps = eps
self.bias_min = bias_min
self.bias_max = bias_max
self.reset_parameters()
def reset_parameters(self):
if self.bias is not None:
self.bias.data.uniform_(self.bias_min, self.bias_max)
def forward(self, input): # pylint: disable=W
'''
:param input: [batch, feature, x, y, z]
'''
capsule_activations = []
idx_capsule_begin = 0
idx_bias = 0
for dim, scalar_bool in zip(self.dimensionalities, self.is_scalar):
# take capsule out of input
capsule = input[:, idx_capsule_begin:idx_capsule_begin + dim]
# act on scalar capsules with scalar activation
if scalar_bool:
if self.scalar_act is None:
capsule_activ = capsule
else:
capsule_activ = self.scalar_act(capsule)
# act on norms of higher order capsules
else:
norm = torch.norm(capsule, p=2, dim=1, keepdim=True) + self.eps # [batch, 1, x, y, z]
b = self.bias[idx_bias].expand_as(norm) # [batch, 1, x, y, z]
activ_factor = self.tensor_act(norm - b) # [batch, 1, x, y, z]
# activ_factor = 1 + torch.nn.ELU(norm - b.expand_as(norm)) # add 1 to make scaling factor positive
capsule_activ = activ_factor * (capsule / norm)
idx_bias += 1
# append to list of nonlinearly transformed capsules
capsule_activations.append(capsule_activ)
idx_capsule_begin += dim
assert idx_capsule_begin == input.size(1)
if self.bias is not None:
assert idx_bias == self.bias.size(0)
return torch.cat(capsule_activations, dim=1)
class NormSoftplus(torch.nn.Module):
def __init__(self, dimensionalities, scalar_act, eps=1e-6, bias_min=.5, bias_max=2):
'''
:param dimensionalities: list of dimensionalities of the capsules
:param scalar_act: activation function applied to scalar capsules - in last layer often set to None
:param eps: regularazier added to norm to prevent division by zero
:param bias_min: lower cutoff of uniform bias initialization
:param bias_max: upper cutoff of uniform bias initialization
scalar capsules are acted on by a ReLU nonlinearity, higher order capsules with a nonlinearity acting on their norm
'''
super().__init__()
self.dimensionalities = dimensionalities
self.scalar_act = scalar_act
self.is_scalar = [dim == 1 for dim in dimensionalities]
nbias = int(np.sum(np.array(dimensionalities) != 1))
self.bias = torch.nn.Parameter(torch.Tensor(nbias)) if nbias > 0 else None
self.eps = eps
self.bias_min = bias_min
self.bias_max = bias_max
self.reset_parameters()
def reset_parameters(self):
if self.bias is not None:
self.bias.data.uniform_(self.bias_min, self.bias_max)
def forward(self, input): # pylint: disable=W
'''
:param input: [batch, feature, x, y, z]
'''
capsule_activations = []
idx_capsule_begin = 0
idx_bias = 0
for dim, scalar_bool in zip(self.dimensionalities, self.is_scalar):
# take capsule out of input
capsule = input[:, idx_capsule_begin:idx_capsule_begin + dim]
# act on scalar capsules with scalar activation
if scalar_bool:
if self.scalar_act is None:
capsule_activ = capsule
else:
capsule_activ = self.scalar_act(capsule)
# act on norms of higher order capsules
else:
norm = torch.norm(capsule, p=2, dim=1, keepdim=True) + self.eps # [batch, 1, x, y, z]
b = self.bias[idx_bias].expand_as(norm) # [batch, 1, x, y, z]
activ_factor = torch.nn.Softplus(beta=1, threshold=20)(norm - b) # [batch, 1, x, y, z]
# activ_factor = 1 + torch.nn.ELU(norm - b.expand_as(norm)) # add 1 to make scaling factor positive
capsule_activ = activ_factor * (capsule / norm)
idx_bias += 1
# append to list of nonlinearly transformed capsules
capsule_activations.append(capsule_activ)
idx_capsule_begin += dim
assert idx_capsule_begin == input.size(1)
if self.bias is not None:
assert idx_bias == self.bias.size(0)
return torch.cat(capsule_activations, dim=1)
class NormRelu(torch.nn.Module):
def __init__(self, enable):
'''
:param enable: list of tuple (dimension, boolean)
If boolean is True a bias and relu will be applied
'''
super().__init__()
self.enable = enable
nbias = sum([1 for d, on in self.enable if on])
self.bias = torch.nn.Parameter(torch.FloatTensor(nbias)) if nbias > 0 else None
self.reset_parameters()
def reset_parameters(self):
if self.bias is not None:
self.bias.data[:] = 0.1
def forward(self, input): # pylint: disable=W
'''
:param input: [batch, feature, x, y, z]
'''
if self.bias is None:
return input
xs = []
begin1 = 0
begin2 = 0
for d, on in self.enable:
x = input[:, begin1:begin1 + d]
if on:
x = NormReluFunction()(x, self.bias[begin2:begin2 + 1])
begin2 += 1
xs.append(x)
begin1 += d
assert begin1 == input.size(1)
assert begin2 == self.bias.size(0)
return torch.cat(xs, dim=1)
class NormReluFunction(torch.autograd.Function):
def forward(self, x, b): # pylint: disable=W
norm = torch.sqrt(torch.sum(x * x, dim=1)) + 1e-8 # [batch, x, y, z]
newnorm = norm - b.expand_as(norm) # [batch, x, y, z]
newnorm[newnorm < 0] = 0
ratio = newnorm / norm
ratio = ratio.reshape(x.size(0), 1, x.size(2), x.size(3), x.size(4)).expand_as(x)
self.save_for_backward(x, b)
r = x * ratio
return r
def backward(self, grad_out): # pylint: disable=W
x, b = self.saved_tensors
norm = torch.sqrt(torch.sum(x * x, dim=1)) + 1e-8 # [batch, x, y, z]
grad_x = grad_b = None
if self.needs_input_grad[0]:
newnorm = norm - b.expand_as(norm) # [batch, x, y, z]
newnorm[newnorm < 0] = 0
ratio = newnorm / norm
ratio = ratio.reshape(x.size(0), 1, x.size(2), x.size(3), x.size(4)).expand_as(x)
grad_x = grad_out * ratio
grad_x += torch.sum(grad_out * x, dim=1, keepdim=True).expand_as(x) * x / \
(norm ** 2).reshape(x.size(0), 1, x.size(2), x.size(3), x.size(4)).expand_as(x) * (1 - ratio)
grad_x[ratio <= 0] = 0
if self.needs_input_grad[1]:
grad_b = -torch.sum(grad_out * x, dim=1) / norm
grad_b[norm < b] = 0
grad_b = torch.sum(grad_b.reshape(-1), dim=0)
return grad_x, grad_b
def test_norm_relu_gradient():
x = torch.autograd.Variable(torch.rand(1, 5, 3, 3, 3), requires_grad=True)
b = torch.autograd.Variable(torch.rand(1), requires_grad=True)
torch.autograd.gradcheck(NormReluFunction(), (x, b), eps=1e-3, rtol=1e-2)
| [
"torch.nn.Softplus",
"torch.Tensor",
"torch.cat",
"numpy.array",
"torch.norm",
"torch.sum",
"torch.FloatTensor",
"torch.rand"
] | [((2936, 2973), 'torch.cat', 'torch.cat', (['capsule_activations'], {'dim': '(1)'}), '(capsule_activations, dim=1)\n', (2945, 2973), False, 'import torch\n'), ((5750, 5787), 'torch.cat', 'torch.cat', (['capsule_activations'], {'dim': '(1)'}), '(capsule_activations, dim=1)\n', (5759, 5787), False, 'import torch\n'), ((6917, 6937), 'torch.cat', 'torch.cat', (['xs'], {'dim': '(1)'}), '(xs, dim=1)\n', (6926, 6937), False, 'import torch\n'), ((8440, 8465), 'torch.rand', 'torch.rand', (['(1)', '(5)', '(3)', '(3)', '(3)'], {}), '(1, 5, 3, 3, 3)\n', (8450, 8465), False, 'import torch\n'), ((8519, 8532), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (8529, 8532), False, 'import torch\n'), ((859, 898), 'torch.nn.Softplus', 'torch.nn.Softplus', ([], {'beta': '(1)', 'threshold': '(20)'}), '(beta=1, threshold=20)\n', (876, 898), False, 'import torch\n'), ((1134, 1153), 'torch.Tensor', 'torch.Tensor', (['nbias'], {}), '(nbias)\n', (1146, 1153), False, 'import torch\n'), ((3924, 3943), 'torch.Tensor', 'torch.Tensor', (['nbias'], {}), '(nbias)\n', (3936, 3943), False, 'import torch\n'), ((6149, 6173), 'torch.FloatTensor', 'torch.FloatTensor', (['nbias'], {}), '(nbias)\n', (6166, 6173), False, 'import torch\n'), ((7065, 7088), 'torch.sum', 'torch.sum', (['(x * x)'], {'dim': '(1)'}), '(x * x, dim=1)\n', (7074, 7088), False, 'import torch\n'), ((7528, 7551), 'torch.sum', 'torch.sum', (['(x * x)'], {'dim': '(1)'}), '(x * x, dim=1)\n', (7537, 7551), False, 'import torch\n'), ((1061, 1087), 'numpy.array', 'np.array', (['dimensionalities'], {}), '(dimensionalities)\n', (1069, 1087), True, 'import numpy as np\n'), ((2183, 2228), 'torch.norm', 'torch.norm', (['capsule'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(capsule, p=2, dim=1, keepdim=True)\n', (2193, 2228), False, 'import torch\n'), ((3851, 3877), 'numpy.array', 'np.array', (['dimensionalities'], {}), '(dimensionalities)\n', (3859, 3877), True, 'import numpy as np\n'), ((4973, 5018), 'torch.norm', 'torch.norm', (['capsule'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(capsule, p=2, dim=1, keepdim=True)\n', (4983, 5018), False, 'import torch\n'), ((5163, 5202), 'torch.nn.Softplus', 'torch.nn.Softplus', ([], {'beta': '(1)', 'threshold': '(20)'}), '(beta=1, threshold=20)\n', (5180, 5202), False, 'import torch\n'), ((8215, 8245), 'torch.sum', 'torch.sum', (['(grad_out * x)'], {'dim': '(1)'}), '(grad_out * x, dim=1)\n', (8224, 8245), False, 'import torch\n'), ((7944, 7988), 'torch.sum', 'torch.sum', (['(grad_out * x)'], {'dim': '(1)', 'keepdim': '(True)'}), '(grad_out * x, dim=1, keepdim=True)\n', (7953, 7988), False, 'import torch\n')] |
# remove warning message
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# required library
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from local_utils import detect_lp
from os.path import splitext,basename
from keras.models import model_from_json
from keras.preprocessing.image import load_img, img_to_array
from keras.applications.mobilenet_v2 import preprocess_input
from sklearn.preprocessing import LabelEncoder
import glob
import pytesseract
import argparse
def load_model(path):
try:
path = splitext(path)[0]
with open('%s.json' % path, 'r') as json_file:
model_json = json_file.read()
model = model_from_json(model_json, custom_objects={})
model.load_weights('%s.h5' % path)
print("Loading model successfully...")
return model
except Exception as e:
print(e)
def preprocess_image(image_path,resize=False):
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255
if resize:
img = cv2.resize(img, (224,224))
return img
def get_plate(image_path,wpod_net, Dmax=608, Dmin = 256):
vehicle = preprocess_image(image_path)
ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2])
side = int(ratio * Dmin)
bound_dim = min(side, Dmax)
_ , LpImg, _, cor = detect_lp(wpod_net, vehicle, bound_dim, lp_threshold=0.5)
return vehicle, LpImg, cor
def sort_contours(cnts,reverse = False):
i = 0
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
return cnts
def predict_from_model(image,model,labels):
image = cv2.resize(image,(80,80))
image = np.stack((image,)*3, axis=-1)
prediction = labels.inverse_transform([np.argmax(model.predict(image[np.newaxis,:]))])
return prediction
def predict_by_tesseract(plate_image):
pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files/Tesseract-OCR/tesseract.exe'
text = pytesseract.image_to_string(plate_image, config='--psm 13')
#processed_text = ''.join([ch for ch in text if ch.isalnum()])
#return processed_text
return text
def predict_using_character_segmentation(plate_image,gray,blur,binary,thre_mor):
cont, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
# creat a copy version "test_roi" of plat_image to draw bounding box
test_roi = plate_image.copy()
# Initialize a list which will be used to append charater image
crop_characters = []
# define standard width and height of character
digit_w, digit_h = 30, 60
for c in sort_contours(cont):
(x, y, w, h) = cv2.boundingRect(c)
ratio = h/w
print(ratio)
if 1<=ratio<=3.5: # Only select contour with defined ratio
if h/plate_image.shape[0]>=0.5: # Select contour which has the height larger than 50% of the plate
# Draw bounding box arroung digit number
cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255,0), 2)
# Sperate number and gibe prediction
curr_num = thre_mor[y:y+h,x:x+w]
#curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h))
_, curr_num = cv2.threshold(curr_num, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
crop_characters.append(curr_num)
print("Detect {} letters...".format(len(crop_characters)))
if len(crop_characters) == 0:
return None
json_file = open('MobileNets_character_recognition.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights("License_character_recognition_weight.h5")
#print("[INFO] Model loaded successfully...")
labels = LabelEncoder()
labels.classes_ = np.load('license_character_classes.npy')
#print("[INFO] Labels loaded successfully...")
cols = len(crop_characters)
final_string = ''
for i,character in enumerate(crop_characters):
title = np.array2string(predict_from_model(character,model,labels))
final_string+=title.strip("'[]")
return final_string
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True,
help="path to input image")
args = vars(ap.parse_args())
wpod_net_path = "wpod-net.json"
wpod_net = load_model(wpod_net_path)
test_image_path = args['path']
vehicle, LpImg,cor = get_plate(test_image_path,wpod_net)
if (len(LpImg)): #check if there is at least one license image
# Scales, calculates absolute values, and converts the result to 8-bit.
plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0))
# convert to grayscale and blur the image
gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(7,7),0)
# Applied inversed thresh_binary
binary = cv2.threshold(blur, 180, 255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3)
segment_predict = predict_using_character_segmentation(plate_image,gray,blur,binary,thre_mor)
tess_predict = predict_by_tesseract(plate_image)
print('Character Segmentation prediction: ',segment_predict)
print('Tesseract Predict: ',tess_predict)
cv2.imshow("Vehicle Image", vehicle)
cv2.imshow("Number plate", plate_image)
cv2.waitKey(0) #Wait for user input before closing the images displayed | [
"cv2.rectangle",
"sklearn.preprocessing.LabelEncoder",
"cv2.convertScaleAbs",
"cv2.imshow",
"argparse.ArgumentParser",
"cv2.threshold",
"numpy.stack",
"cv2.waitKey",
"os.path.splitext",
"cv2.morphologyEx",
"cv2.cvtColor",
"cv2.resize",
"cv2.GaussianBlur",
"cv2.imread",
"keras.models.mode... | [((4434, 4459), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4457, 4459), False, 'import argparse\n'), ((5640, 5676), 'cv2.imshow', 'cv2.imshow', (['"""Vehicle Image"""', 'vehicle'], {}), "('Vehicle Image', vehicle)\n", (5650, 5676), False, 'import cv2\n'), ((5678, 5717), 'cv2.imshow', 'cv2.imshow', (['"""Number plate"""', 'plate_image'], {}), "('Number plate', plate_image)\n", (5688, 5717), False, 'import cv2\n'), ((5721, 5735), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5732, 5735), False, 'import cv2\n'), ((996, 1018), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1006, 1018), False, 'import cv2\n'), ((1030, 1066), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1042, 1066), False, 'import cv2\n'), ((1423, 1480), 'local_utils.detect_lp', 'detect_lp', (['wpod_net', 'vehicle', 'bound_dim'], {'lp_threshold': '(0.5)'}), '(wpod_net, vehicle, bound_dim, lp_threshold=0.5)\n', (1432, 1480), False, 'from local_utils import detect_lp\n'), ((1851, 1878), 'cv2.resize', 'cv2.resize', (['image', '(80, 80)'], {}), '(image, (80, 80))\n', (1861, 1878), False, 'import cv2\n'), ((1890, 1921), 'numpy.stack', 'np.stack', (['((image,) * 3)'], {'axis': '(-1)'}), '((image,) * 3, axis=-1)\n', (1898, 1921), True, 'import numpy as np\n'), ((2181, 2240), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['plate_image'], {'config': '"""--psm 13"""'}), "(plate_image, config='--psm 13')\n", (2208, 2240), False, 'import pytesseract\n'), ((3868, 3902), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (3883, 3902), False, 'from keras.models import model_from_json\n'), ((4037, 4051), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4049, 4051), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4075, 4115), 'numpy.load', 'np.load', (['"""license_character_classes.npy"""'], {}), "('license_character_classes.npy')\n", (4082, 4115), True, 'import numpy as np\n'), ((4898, 4940), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['LpImg[0]'], {'alpha': '(255.0)'}), '(LpImg[0], alpha=255.0)\n', (4917, 4940), False, 'import cv2\n'), ((5004, 5049), 'cv2.cvtColor', 'cv2.cvtColor', (['plate_image', 'cv2.COLOR_BGR2GRAY'], {}), '(plate_image, cv2.COLOR_BGR2GRAY)\n', (5016, 5049), False, 'import cv2\n'), ((5062, 5095), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (5078, 5095), False, 'import cv2\n'), ((5265, 5314), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (5290, 5314), False, 'import cv2\n'), ((5331, 5382), 'cv2.morphologyEx', 'cv2.morphologyEx', (['binary', 'cv2.MORPH_DILATE', 'kernel3'], {}), '(binary, cv2.MORPH_DILATE, kernel3)\n', (5347, 5382), False, 'import cv2\n'), ((728, 774), 'keras.models.model_from_json', 'model_from_json', (['model_json'], {'custom_objects': '{}'}), '(model_json, custom_objects={})\n', (743, 774), False, 'from keras.models import model_from_json\n'), ((1119, 1146), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (1129, 1146), False, 'import cv2\n'), ((1590, 1609), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (1606, 1609), False, 'import cv2\n'), ((2454, 2522), 'cv2.findContours', 'cv2.findContours', (['binary', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (2470, 2522), False, 'import cv2\n'), ((2883, 2902), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (2899, 2902), False, 'import cv2\n'), ((5148, 5218), 'cv2.threshold', 'cv2.threshold', (['blur', '(180)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(blur, 180, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (5161, 5218), False, 'import cv2\n'), ((594, 608), 'os.path.splitext', 'splitext', (['path'], {}), '(path)\n', (602, 608), False, 'from os.path import splitext, basename\n'), ((3201, 3264), 'cv2.rectangle', 'cv2.rectangle', (['test_roi', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(test_roi, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (3214, 3264), False, 'import cv2\n'), ((3477, 3547), 'cv2.threshold', 'cv2.threshold', (['curr_num', '(220)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(curr_num, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3490, 3547), False, 'import cv2\n')] |
#!/usr/bin/python
# Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from builtins import range
from future.utils import iteritems
import unittest
import test_util as tu
import numpy as np
import tritonclient.http as httpclient
from tritonclient.utils import np_to_triton_dtype
from tritonclient.utils import InferenceServerException
class IONamingConvention(tu.TestResultCollector):
def _infer_helper(self, model_name, io_names, reversed_order=False):
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=False)
# Create the data for the two inputs. Initialize the first to unique
# integers and the second to all ones.
input0_data = np.arange(start=0, stop=16, dtype=np.float32)
input0_data = np.expand_dims(input0_data, axis=0)
input1_data = np.full(shape=(1, 16), fill_value=-1, dtype=np.float32)
inputs = []
output_req = []
inputs.append(
httpclient.InferInput(
io_names[0] if not reversed_order else io_names[1], [1, 16],
"FP32"))
inputs[-1].set_data_from_numpy(input0_data)
inputs.append(
httpclient.InferInput(
io_names[1] if not reversed_order else io_names[0], [1, 16],
"FP32"))
inputs[-1].set_data_from_numpy(input1_data)
output_req.append(
httpclient.InferRequestedOutput(io_names[2], binary_data=True))
output_req.append(
httpclient.InferRequestedOutput(io_names[3], binary_data=True))
results = triton_client.infer(model_name, inputs, outputs=output_req)
output0_data = results.as_numpy(
io_names[2] if not reversed_order else io_names[3])
output1_data = results.as_numpy(
io_names[3] if not reversed_order else io_names[2])
for i in range(16):
self.assertEqual(input0_data[0][i] - input1_data[0][i],
output0_data[0][i])
self.assertEqual(input0_data[0][i] + input1_data[0][i],
output1_data[0][i])
def test_io_index(self):
io_names = ["INPUT__0", "INPUT__1", "OUTPUT__0", "OUTPUT__1"]
self._infer_helper("libtorch_io_index", io_names)
def test_output_index(self):
io_names = ["INPUT0", "INPUT1", "OUTPUT__0", "OUTPUT__1"]
self._infer_helper("libtorch_output_index", io_names)
def test_no_output_index(self):
io_names = ["INPUT0", "INPUT1", "OUTPUT0", "OUTPUT1"]
self._infer_helper("libtorch_no_output_index", io_names)
def test_no_arguments_no_output_index(self):
io_names = ["INPUTA", "INPUTB", "OUTPUTA", "OUTPUTB"]
self._infer_helper("libtorch_no_arguments_output_index", io_names)
def test_mix_index(self):
io_names = ["INPUTA", "INPUT__1", "OUTPUTA", "OUTPUT__1"]
self._infer_helper("libtorch_mix_index", io_names)
def test_mix_arguments(self):
io_names = ["INPUT0", "INPUTB", "OUTPUTA", "OUTPUT__1"]
self._infer_helper("libtorch_mix_arguments", io_names)
def test_mix_arguments_index(self):
io_names = ["INPUT0", "INPUT__1", "OUTPUT0", "OUTPUT__1"]
self._infer_helper("libtorch_mix_arguments_index", io_names)
def test_unordered_index(self):
io_names = ["INPUT1", "INPUT0", "OUT__1", "OUT__0"]
self._infer_helper("libtorch_unordered_index",
io_names,
reversed_order=True)
if __name__ == '__main__':
unittest.main()
| [
"tritonclient.http.InferInput",
"tritonclient.http.InferenceServerClient",
"builtins.range",
"tritonclient.http.InferRequestedOutput",
"numpy.expand_dims",
"unittest.main",
"numpy.full",
"sys.path.append",
"numpy.arange"
] | [((1572, 1600), 'sys.path.append', 'sys.path.append', (['"""../common"""'], {}), "('../common')\n", (1587, 1600), False, 'import sys\n'), ((5132, 5147), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5145, 5147), False, 'import unittest\n'), ((2018, 2083), 'tritonclient.http.InferenceServerClient', 'httpclient.InferenceServerClient', (['"""localhost:8000"""'], {'verbose': '(False)'}), "('localhost:8000', verbose=False)\n", (2050, 2083), True, 'import tritonclient.http as httpclient\n'), ((2288, 2333), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(16)', 'dtype': 'np.float32'}), '(start=0, stop=16, dtype=np.float32)\n', (2297, 2333), True, 'import numpy as np\n'), ((2356, 2391), 'numpy.expand_dims', 'np.expand_dims', (['input0_data'], {'axis': '(0)'}), '(input0_data, axis=0)\n', (2370, 2391), True, 'import numpy as np\n'), ((2414, 2469), 'numpy.full', 'np.full', ([], {'shape': '(1, 16)', 'fill_value': '(-1)', 'dtype': 'np.float32'}), '(shape=(1, 16), fill_value=-1, dtype=np.float32)\n', (2421, 2469), True, 'import numpy as np\n'), ((3452, 3461), 'builtins.range', 'range', (['(16)'], {}), '(16)\n', (3457, 3461), False, 'from builtins import range\n'), ((2550, 2645), 'tritonclient.http.InferInput', 'httpclient.InferInput', (['(io_names[0] if not reversed_order else io_names[1])', '[1, 16]', '"""FP32"""'], {}), "(io_names[0] if not reversed_order else io_names[1], [\n 1, 16], 'FP32')\n", (2571, 2645), True, 'import tritonclient.http as httpclient\n'), ((2762, 2857), 'tritonclient.http.InferInput', 'httpclient.InferInput', (['(io_names[1] if not reversed_order else io_names[0])', '[1, 16]', '"""FP32"""'], {}), "(io_names[1] if not reversed_order else io_names[0], [\n 1, 16], 'FP32')\n", (2783, 2857), True, 'import tritonclient.http as httpclient\n'), ((2978, 3040), 'tritonclient.http.InferRequestedOutput', 'httpclient.InferRequestedOutput', (['io_names[2]'], {'binary_data': '(True)'}), '(io_names[2], binary_data=True)\n', (3009, 3040), True, 'import tritonclient.http as httpclient\n'), ((3081, 3143), 'tritonclient.http.InferRequestedOutput', 'httpclient.InferRequestedOutput', (['io_names[3]'], {'binary_data': '(True)'}), '(io_names[3], binary_data=True)\n', (3112, 3143), True, 'import tritonclient.http as httpclient\n')] |
# coding: utf8
"""
Ensemble de fonctions pour manipuler une instance du problème "Le jardinier et
les taupes", dans le cas où l'objectif est :
- qu'aucune taupe ne puisse pénétrer dans le jardin ;
- que le nombre de pièges soit minimal.
"""
import os
import itertools
import numpy as np
import pulp
def _dimcheck(grid, threshold):
"""
Indique si la grille dont `grid` est la complémentaire est admissible
dans sa dernière dimension.
Paramètres :
------------
- grid : tableau numpy
Tableau contenant des 0 (espace occupé) et des 1 (espace libre).
- threshold : entier positif
Nombre d'espaces libres adjacents à partir duquel la grille est
considérée comme non admissible.
Exemples :
----------
# On crée une grille avec deux espaces libres adjacents :
>>> grid = np.array([0, 1, 1, 0, 0])
>>> _dimcheck(grid, 2)
False
>>> _dimcheck(grid, 3)
True
"""
dsize = grid.shape[-1]
if threshold > dsize:
return True
elif threshold < 0:
raise ValueError("threshold must be positive.")
check = 0
for start in range(threshold):
check += grid[..., start:(dsize - threshold + 1 + start)]
if np.any(check >= threshold):
return False
else:
return True
def admissible(grid, threshold):
"""
Indique si la grille `grid` est admissible. Une grille est admissible si
elle ne contient jamais plus de `threshold` - 1 espaces libres adjacents.
Paramètres :
------------
- grid : tableau numpy
Tableau contenant des 0 (espace libre) et des 1 (espace occupé).
- threshold : entier positif
Nombre d'espaces libres adjacents à partir duquel la grille est
considérée comme non admissible.
Exemples :
----------
>>> grid = np.array([0, 1, 1, 0, 0, 1]).reshape((2, 3))
>>> admissible(grid, 2)
False
>>> admissible(grid, 3)
True
"""
# La méthode de calcul est bourrine.
comp = np.where(grid, 0, 1) # On travaille sur le complémentaire de grid.
res = True
for _ in range(comp.ndim):
res = (res and _dimcheck(comp, threshold))
if res is False:
break
# Permutation circulaire des axes :
comp = comp.transpose(comp.ndim - 1, *range(comp.ndim - 1))
return res
def score(grid, threshold):
"""
Calcule le score associé à la grille `grid`. Plus le score est faible,
meilleur il est ; si la grille n'est pas admissible, renvoie un score
égal à l'infini.
Paramètres :
------------
- grid : tableau numpy
Tableau contenant des 0 (espace libre) et des 1 (espace occupé).
- threshold : entier positif
Nombre d'espaces libres adjacents à partir duquel la grille est
considérée comme non admissible.
"""
if admissible(grid, threshold):
return grid.sum()
else:
return np.inf
def generate(shape, npoints):
"""
Génère une grille ayant la forme `shape` et contenant `npoints` pièges.
Paramètres :
------------
- shape : entier positif, tuple d'entiers positifs
Dimensions de la grille.
- npoints : entier positif
Nombre de pièges imposés à placer aléatoirement dans la grille.
"""
size = np.product(shape)
if size <= 0:
raise ValueError("the shape %s should contain positive values only."\
% str(shape))
points = np.random.choice(np.arange(size), npoints, replace=False)
grid = np.zeros(size, dtype=np.int)
grid[points] = 1
return grid.reshape(shape)
def _key(indexes, fromstr=False):
"""
Convertit `indexes` :
- d'un tuple d'indices vers une chaîne de caractères (`fromstr == False`) ;
- d'une chaîne de caractères vers un tuple d'indices (`fromstr == True`) .
"""
sep = '_'
if fromstr:
return tuple([int(idx) for idx in indexes.split(sep)])
else:
return sep.join((str(idx) for idx in indexes))
def _slice(index, i, nneighbors, lim):
"""
A partir de l'indice `index`, renvoie les `nneighbors` indices suivants
dans la dimension `i` (y compris `index`, qui est le premier élément de
la liste).
Si certains des indices calculés sont supérieurs à `lim` dans la dimension
`i`, renvoie une liste vide.
Paramètres :
------------
- index : tuple d'indices
- i : entier
Dimension à considérer.
- nneighbors : entier positif
Nombre d'indices à considérer dans la dimension `i` à partir de
l'indice `index`.
- lim : entier positif
Indice maximal dans la dimension `i`.
Exemples :
----------
>>> index = (1, 2, 3)
>>> _slice(index, i=0, nneighbors=4, lim=4)
[(1, 2, 3), (2, 2, 3), (3, 2, 3), (4, 2, 3)]
>>> _slice(index, i=0, nneighbors=4, lim=3)
[]
>>> _slice(index, i=1, nneighbors=2, lim=10)
[(1, 2, 3), (1, 3, 3)]
"""
rng = range(nneighbors)
if index[i] + rng[-1] > lim:
return []
return [index[:i] + (index[i] + j,) + index[(i+1):] for j in rng]
def solve(grid, threshold, name, compdir=None):
"""
Résout le problème "Le jardinier et les taupes" pour la grille `grid`,
avec des taupes de taille `threshold`.
Paramètres :
------------
- grid : tableau numpy
Tableau contenant des 0 (espace libre) et des 1 (piège déjà posé).
- threshold : entier positif
Nombre d'espaces libres adjacents à partir duquel la grille est
considérée comme non admissible (= taille des taupes).
- name : chaîne de caractères
Nom de l'instance à résoudre.
- compdir : chaîne de caractères, None par défaut
Dossier dans lequel effectuer les calculs (il s'agit du répertoire
courant si `compdir` vaut `None`).
Remarque : le nom de l'instance est utilisée comme nom pour le fichier
d'instruction du solveur. Si la fonction doit être exécutée plusieurs
fois en parallèle, il est nécessaire que `name` soit unique.
"""
# Initialisation du problème :
prob = pulp.LpProblem(name, pulp.LpMinimize)
# Déclaration des variables :
varnames = []
for index in itertools.product(*(range(size) for size in grid.shape)):
if grid[index] == 0:
varnames.append(_key(index))
varprefix = "Cells"
cells = pulp.LpVariable.dicts(varprefix, varnames, 0, 1, 'Integer')
# Déclaration de la fonction objectif :
prob += pulp.lpSum([cells[idx] for idx in varnames]), "Non empty points"
# Déclaration des variables et des contraintes :
# TODO : sans faire compliqué, il y a des leviers d'amélioration :
# - dans la boucle, il n'est pas nécessaire d'itérer jusqu'à `size` ;
# - on peut éliminer à l'avance certaines variables (à moins que PuLP
# ne le fasse déjà).
it = itertools.product(*(range(size) for size in grid.shape))
it = filter(lambda x: grid[x] == 0, it)
for index in it:
for i in range(len(index)): # Itération sur toutes les dimensions.
neighbors = _slice(index, i, threshold, grid.shape[i] - 1)
# Si on est proche de l'extrémité de la grille (`neighbors` est
# vide) ou s'il y a un piège proche dans toutes les directions,
# inutile d'aller plus loin :
if neighbors and all([_key(j) in varnames for j in neighbors]):
prob += pulp.lpSum([cells[_key(j)] for j in neighbors]) >= 1,\
"Cell_%s_dim_%d" % (_key(index), i)
# Résolution du problème:
fname = "%s.lp" % name
if compdir is not None:
fname = os.path.join(compdir, fname)
prob.writeLP(fname)
try:
prob.solve()
finally:
os.remove(fname)
# Vérification de l'optimalité de la solution :
status = pulp.constants.LpStatus[prob.status].lower()
if status != 'optimal':
raise ValueError("optimization %s did not converge." % name)
# Mise en forme du résultat :
res = grid.copy()
for cell in prob.variables():
# PuLP renomme les variables de manière assez malcommode :
name = cell.name.replace(varprefix + '_', '')
if name in varnames:
res[_key(name, fromstr=True)] = cell.varValue
return res
| [
"pulp.LpProblem",
"numpy.product",
"pulp.LpVariable.dicts",
"pulp.lpSum",
"numpy.where",
"os.path.join",
"numpy.any",
"numpy.zeros",
"numpy.arange",
"os.remove"
] | [((1222, 1248), 'numpy.any', 'np.any', (['(check >= threshold)'], {}), '(check >= threshold)\n', (1228, 1248), True, 'import numpy as np\n'), ((2005, 2025), 'numpy.where', 'np.where', (['grid', '(0)', '(1)'], {}), '(grid, 0, 1)\n', (2013, 2025), True, 'import numpy as np\n'), ((3289, 3306), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (3299, 3306), True, 'import numpy as np\n'), ((3524, 3552), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.int'}), '(size, dtype=np.int)\n', (3532, 3552), True, 'import numpy as np\n'), ((6081, 6118), 'pulp.LpProblem', 'pulp.LpProblem', (['name', 'pulp.LpMinimize'], {}), '(name, pulp.LpMinimize)\n', (6095, 6118), False, 'import pulp\n'), ((6352, 6411), 'pulp.LpVariable.dicts', 'pulp.LpVariable.dicts', (['varprefix', 'varnames', '(0)', '(1)', '"""Integer"""'], {}), "(varprefix, varnames, 0, 1, 'Integer')\n", (6373, 6411), False, 'import pulp\n'), ((3472, 3487), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (3481, 3487), True, 'import numpy as np\n'), ((6468, 6512), 'pulp.lpSum', 'pulp.lpSum', (['[cells[idx] for idx in varnames]'], {}), '([cells[idx] for idx in varnames])\n', (6478, 6512), False, 'import pulp\n'), ((7619, 7647), 'os.path.join', 'os.path.join', (['compdir', 'fname'], {}), '(compdir, fname)\n', (7631, 7647), False, 'import os\n'), ((7723, 7739), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (7732, 7739), False, 'import os\n')] |
#!/usr/bin/env python
# coding=utf-8
"""
Evaluation (t-tree comparison functions).
"""
from __future__ import unicode_literals
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
from collections import defaultdict
from enum import Enum
from tgen.logf import log_debug, log_warn, log_info
from tgen.tree import TreeData, TreeNode
from tgen.futil import add_bundle_text
import numpy as np
try:
from pytreex.core.node import T
except ImportError:
log_warn('Pytreex modules not available, will not be able to evaluate trees.')
EvalTypes = Enum('EvalTypes', 'TOKEN NODE DEP')
EvalTypes.__doc__ = """Evaluation flavors (tokens, tree node-only, tree dependency)"""
def collect_counts(sent, eval_type=EvalTypes.NODE):
"""Collects counts of different node/dependency types in the given t-tree.
@param sent: the tree/sentence to collect counts from
@param eval_type: if set to EvalTypes.NODE (default), count nodes (formemes, lemmas, dependency \
direction), if set to EvalTypes.DEP, count dependencies (including parent's formeme, lemma, \
dependency direction), if set to EvalTypes.TOKEN, count just word forms (in list of tokens).
@rtype: defaultdict
"""
counts = defaultdict(int)
nodes = sent if isinstance(sent, list) else sent.get_descendants()
for node in nodes:
if eval_type == EvalTypes.TOKEN:
node_id = node[0] # for tokens, use form only (ignore tag)
elif eval_type == EvalTypes.NODE:
node_id = (node.formeme, node.t_lemma, node > node.parent)
else:
parent = node.parent
node_id = (node.formeme, node.t_lemma, node > node.parent,
parent.formeme, parent.t_lemma, (parent.parent is not None and parent > parent.parent))
counts[node_id] += 1
return counts
def corr_pred_gold(gold, pred, eval_type=EvalTypes.NODE):
"""Given a golden tree/sentence and a predicted tree/sentence, this counts correctly
predicted nodes/tokens (true positives), all predicted nodes/tokens (true + false
positives), and all golden nodes/tokens (true positives + false negatives).
@param gold: a golden t-tree/sentence
@param pred: a predicted t-tree/sentence
@param eval_type: type of matching (see EvalTypes)
@rtype: tuple
@return: numbers of correctly predicted, total predicted, and total golden nodes/tokens
"""
gold_counts = collect_counts(gold, eval_type)
pred_counts = collect_counts(pred, eval_type)
ccount, pcount = 0, 0
for node_id, node_count in pred_counts.items():
pcount += node_count
ccount += min(node_count, gold_counts[node_id])
gcount = sum(node_count for node_count in gold_counts.values())
return ccount, pcount, gcount
def precision(gold, pred, eval_type=EvalTypes.NODE):
ccount, pcount, _ = corr_pred_gold(gold, pred, eval_type)
return ccount / float(pcount)
def recall(gold, pred, eval_type=EvalTypes.NODE):
# # correct / # gold
ccount, _, gcount = corr_pred_gold(gold, pred, eval_type)
return ccount / float(gcount)
def f1(gold, pred, eval_type=EvalTypes.NODE):
return f1_from_counts(corr_pred_gold(gold, pred, eval_type))
def f1_from_counts(correct, predicted, gold):
return p_r_f1_from_counts(correct, predicted, gold)[2]
def p_r_f1_from_counts(correct, predicted, gold):
"""Return precision, recall, and F1 given counts of true positives (correct),
total predicted nodes, and total gold nodes.
@param correct: true positives (correctly predicted nodes/tokens)
@param predicted: true + false positives (all predicted nodes/tokens)
@param gold: true positives + false negatives (all golden nodes/tokens)
@rtype: tuple
@return: precision, recall, F1
"""
if correct == 0.0: # escape division by zero
return 0.0, 0.0, 0.0
precision = correct / float(predicted)
recall = correct / float(gold)
return precision, recall, old_div((2 * precision * recall), (precision + recall))
def to_treedata(t):
if isinstance(t, TreeNode):
return t.tree
elif isinstance(t, T):
return TreeData.from_ttree(t)
def common_subtree_size(a, b):
a = to_treedata(a)
b = to_treedata(b)
return a.common_subtree_size(b)
def max_common_subphrase_length(a, b):
"""Return the length of the longest common subphrase of a and b; where a and b are
lists of tokens (form+tag)."""
longest = 0
for sp_a in range(len(a)):
for sp_b in range(len(b)):
pos_a = sp_a
pos_b = sp_b
# disregard tags for comparison
while pos_a < len(a) and pos_b < len(b) and a[pos_a][0] == b[pos_b][0]:
pos_a += 1
pos_b += 1
if pos_a - sp_a > longest:
longest = pos_a - sp_a
return longest
class Stats(object):
"""A set of important statistic values, with simple access and printing."""
def __init__(self, data):
self.mean = np.mean(data)
self.median = np.median(data)
self.min = min(data)
self.max = max(data)
self.perc25 = np.percentile(data, 25)
self.perc75 = np.percentile(data, 75)
def __str__(self):
return "\t".join("%s: %9.3f" % (key.capitalize(), getattr(self, key))
for key in ['mean', 'median', 'min', 'max', 'perc25', 'perc75'])
class Evaluator(object):
"""A fancy object-oriented interface to computing node F-scores.
Accumulates scores over trees/sentences using append(), then can return
a total score using f1(), precision(), recall(), and p_r_f1()."""
def __init__(self):
self.reset()
def reset(self):
"""Zero out all current statistics, start from scratch."""
self.correct = {eval_type: 0 for eval_type in EvalTypes}
self.predicted = {eval_type: 0 for eval_type in EvalTypes}
self.gold = {eval_type: 0 for eval_type in EvalTypes}
self.sizes = []
self.scores = []
def process_eval_doc(self, eval_doc, gen_trees, language, ref_selector, target_selector):
"""Evaluate generated trees against a reference document; save per-tree statistics
in the reference document and print out global statistics.
Does not reset statistics at the beginning (must be reset manually if needed).
@param eval_doc: reference t-tree document
@param gen_trees: a list of generated TreeData objects
@param language: language for the reference document
@param ref_selector: selector for reference trees in the reference document
@param target_selector: selector for generated trees (used to save statistics)
"""
log_info('Evaluating...')
for eval_bundle, gen_tree, in zip(eval_doc.bundles, gen_trees):
# add some stats about the tree directly into the output file
eval_ttree = eval_bundle.get_zone(language, ref_selector).ttree
gen_ttree = TreeNode(gen_tree)
add_bundle_text(eval_bundle, language, target_selector + 'Xscore',
"P: %.4f R: %.4f F1: %.4f" %
p_r_f1_from_counts(*corr_pred_gold(eval_ttree, gen_ttree)))
# collect overall stats
# TODO maybe add cost somehow?
self.append(eval_ttree, gen_ttree)
# print out the overall stats
log_info("NODE precision: %.4f, Recall: %.4f, F1: %.4f" % self.p_r_f1())
log_info("DEP precision: %.4f, Recall: %.4f, F1: %.4f" % self.p_r_f1(EvalTypes.DEP))
log_info("Tree size stats:\n * GOLD %s\n * PRED %s\n * DIFF %s" % self.size_stats())
log_info("Score stats:\n * GOLD %s\n * PRED %s\n * DIFF %s" % self.score_stats())
log_info("Common subtree stats:\n -- SIZE: %s\n -- ΔGLD: %s\n -- ΔPRD: %s" %
self.common_substruct_stats())
def append(self, gold, pred, gold_score=0.0, pred_score=0.0):
"""Add a pair of golden and predicted tree/sentence to the current statistics.
@param gold: a T or TreeNode object representing the golden tree, or list of golden tokens
@param pred: a T or TreeNode object representing the predicted tree, or list of predicted \
tokens
"""
if isinstance(gold, list): # tokens
eval_types = [EvalTypes.TOKEN]
gold_len = len(gold)
pred_len = len(pred)
css = max_common_subphrase_length(gold, pred)
else: # trees
eval_types = [EvalTypes.NODE, EvalTypes.DEP]
gold_len = len(gold.get_descendants())
pred_len = len(pred.get_descendants())
css = common_subtree_size(gold, pred)
self.sizes.append((gold_len, pred_len, css))
for eval_type in eval_types:
ccount, pcount, gcount = corr_pred_gold(gold, pred, eval_type)
self.correct[eval_type] += ccount
self.predicted[eval_type] += pcount
self.gold[eval_type] += gcount
self.scores.append((gold_score, pred_score))
def merge(self, other):
"""Merge in statistics from another Evaluator object."""
for eval_type in EvalTypes:
self.correct[eval_type] += other.correct[eval_type]
self.predicted[eval_type] += other.predicted[eval_type]
self.gold[eval_type] += other.gold[eval_type]
self.sizes.extend(other.sizes)
self.scores.extend(other.scores)
def f1(self, eval_type=EvalTypes.NODE):
return self.p_r_f1(eval_type)[2]
def precision(self, eval_type=EvalTypes.NODE):
return self.p_r_f1(eval_type)[0]
def recall(self, eval_type=EvalTypes.NODE):
return self.p_r_f1(eval_type)[1]
def p_r_f1(self, eval_type=EvalTypes.NODE):
return p_r_f1_from_counts(self.correct[eval_type],
self.predicted[eval_type],
self.gold[eval_type])
def size_stats(self):
"""Return current tree/sentence size statistics.
@rtype: a 3-tuple of Stats objects
@return: statistics for golden trees/sentences, predicted trees/sentences, and differences
"""
return (Stats([inst[0] for inst in self.sizes]),
Stats([inst[1] for inst in self.sizes]),
Stats([inst[0] - inst[1] for inst in self.sizes]))
def common_substruct_stats(self):
"""Return common subtree/subphrase size statistics.
@rtype: a 3-tuple of Stats objects
@return: statistics for common subtree/subphrase size + sizes of what's missing to full \
gold/predicted tree/sentence
"""
return (Stats([inst[2] for inst in self.sizes]),
Stats([inst[0] - inst[2] for inst in self.sizes]),
Stats([inst[1] - inst[2] for inst in self.sizes]))
def score_stats(self):
"""Return tree/sentence score statistics.
@rtype: a 3-tuple of Stats objects
@return: statistics for golden trees/sentences, predicted trees/sentences, and differences
"""
return (Stats([inst[0] for inst in self.scores]),
Stats([inst[1] for inst in self.scores]),
Stats([inst[0] - inst[1] for inst in self.scores]))
def tree_accuracy(self):
"""Return tree-level accuracy (percentage of gold trees scored higher or equal to
the best predicted tree."""
return (sum(1 for gold_score, pred_score in self.scores if gold_score >= pred_score) /
float(len(self.scores)))
class ASearchListsAnalyzer(object):
"""Analysis of the final open and close lists of the A*search generator."""
def __init__(self):
self.reset()
def reset(self):
"""Zero all statistics."""
self.total = 0
self.gold_best = 0
self.gold_on_close = 0
self.gold_on_open = 0
def append(self, gold_tree, open_list, close_list):
"""Analyze the open and close lists of a generator for the presence of the gold-standard
tree and add the results to statistics."""
self.total += 1
best_tree = close_list.peek()[0]
if gold_tree == best_tree:
self.gold_best += 1
log_debug('GOLD TREE IS BEST')
if gold_tree in close_list:
self.gold_on_close += 1
log_debug('GOLD TREE IS ON CLOSE LIST')
if gold_tree in open_list:
self.gold_on_open += 1
log_debug('GOLD TREE IS ON OPEN LIST')
def merge(self, other):
"""Merge in another ASearchListsAnalyzer object."""
self.total += other.total
self.gold_best += other.gold_best
self.gold_on_close += other.gold_on_close
self.gold_on_open += other.gold_on_open
def stats(self):
"""Return statistics (as percentages): gold tree was best, gold tree was on
close list, gold tree was on open list.
@rtype: tuple
"""
if self.total == 0:
return (0.0, 0.0, 0.0)
tot = float(self.total)
return (old_div(self.gold_best, tot),
old_div(self.gold_on_close, tot),
old_div((self.gold_on_close + self.gold_on_open), tot))
class SlotErrAnalyzer(object):
"""Analyze slot error (as in Wen 2015 EMNLP paper), accumulator object."""
def __init__(self, delex_slots=set()):
self.reset()
self.delex_slots = delex_slots
def reset(self):
"""Zero all statistics."""
self.missing = 0
self.superfluous = 0
self.total = 0
def append(self, da, sent):
"""Include statistics from the given sentence (assuming tokens, not trees)."""
if sent and isinstance(sent[0], tuple):
sent = [form for form, pos in sent] # ignore POS
if isinstance(da, tuple):
da = da[1] # ignore contexts
if self.delex_slots:
da = da.get_delexicalized(self.delex_slots)
slots_in_da = set([dai.value for dai in da if dai.value and dai.value.startswith('X-')])
slots_in_sent = set([tok for tok in sent if tok.startswith('X-')])
self.total += len(slots_in_da)
self.missing += len(slots_in_da - slots_in_sent)
self.superfluous += len(slots_in_sent - slots_in_da)
def slot_error(self):
"""Return the currently accumulated slot error."""
if self.total == 0: # avoid zero division error
return 0
return (self.missing + self.superfluous) / float(self.total)
| [
"tgen.logf.log_debug",
"numpy.mean",
"numpy.median",
"tgen.logf.log_warn",
"tgen.tree.TreeData.from_ttree",
"tgen.logf.log_info",
"past.utils.old_div",
"builtins.zip",
"collections.defaultdict",
"enum.Enum",
"numpy.percentile",
"tgen.tree.TreeNode"
] | [((640, 675), 'enum.Enum', 'Enum', (['"""EvalTypes"""', '"""TOKEN NODE DEP"""'], {}), "('EvalTypes', 'TOKEN NODE DEP')\n", (644, 675), False, 'from enum import Enum\n'), ((1305, 1321), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1316, 1321), False, 'from collections import defaultdict\n'), ((547, 625), 'tgen.logf.log_warn', 'log_warn', (['"""Pytreex modules not available, will not be able to evaluate trees."""'], {}), "('Pytreex modules not available, will not be able to evaluate trees.')\n", (555, 625), False, 'from tgen.logf import log_debug, log_warn, log_info\n'), ((4055, 4106), 'past.utils.old_div', 'old_div', (['(2 * precision * recall)', '(precision + recall)'], {}), '(2 * precision * recall, precision + recall)\n', (4062, 4106), False, 'from past.utils import old_div\n'), ((5095, 5108), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (5102, 5108), True, 'import numpy as np\n'), ((5131, 5146), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (5140, 5146), True, 'import numpy as np\n'), ((5227, 5250), 'numpy.percentile', 'np.percentile', (['data', '(25)'], {}), '(data, 25)\n', (5240, 5250), True, 'import numpy as np\n'), ((5273, 5296), 'numpy.percentile', 'np.percentile', (['data', '(75)'], {}), '(data, 75)\n', (5286, 5296), True, 'import numpy as np\n'), ((6818, 6843), 'tgen.logf.log_info', 'log_info', (['"""Evaluating..."""'], {}), "('Evaluating...')\n", (6826, 6843), False, 'from tgen.logf import log_debug, log_warn, log_info\n'), ((6882, 6914), 'builtins.zip', 'zip', (['eval_doc.bundles', 'gen_trees'], {}), '(eval_doc.bundles, gen_trees)\n', (6885, 6914), False, 'from builtins import zip\n'), ((4229, 4251), 'tgen.tree.TreeData.from_ttree', 'TreeData.from_ttree', (['t'], {}), '(t)\n', (4248, 4251), False, 'from tgen.tree import TreeData, TreeNode\n'), ((7090, 7108), 'tgen.tree.TreeNode', 'TreeNode', (['gen_tree'], {}), '(gen_tree)\n', (7098, 7108), False, 'from tgen.tree import TreeData, TreeNode\n'), ((12360, 12390), 'tgen.logf.log_debug', 'log_debug', (['"""GOLD TREE IS BEST"""'], {}), "('GOLD TREE IS BEST')\n", (12369, 12390), False, 'from tgen.logf import log_debug, log_warn, log_info\n'), ((12475, 12514), 'tgen.logf.log_debug', 'log_debug', (['"""GOLD TREE IS ON CLOSE LIST"""'], {}), "('GOLD TREE IS ON CLOSE LIST')\n", (12484, 12514), False, 'from tgen.logf import log_debug, log_warn, log_info\n'), ((12597, 12635), 'tgen.logf.log_debug', 'log_debug', (['"""GOLD TREE IS ON OPEN LIST"""'], {}), "('GOLD TREE IS ON OPEN LIST')\n", (12606, 12635), False, 'from tgen.logf import log_debug, log_warn, log_info\n'), ((13198, 13226), 'past.utils.old_div', 'old_div', (['self.gold_best', 'tot'], {}), '(self.gold_best, tot)\n', (13205, 13226), False, 'from past.utils import old_div\n'), ((13244, 13276), 'past.utils.old_div', 'old_div', (['self.gold_on_close', 'tot'], {}), '(self.gold_on_close, tot)\n', (13251, 13276), False, 'from past.utils import old_div\n'), ((13294, 13346), 'past.utils.old_div', 'old_div', (['(self.gold_on_close + self.gold_on_open)', 'tot'], {}), '(self.gold_on_close + self.gold_on_open, tot)\n', (13301, 13346), False, 'from past.utils import old_div\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Train normalising flow.
Created: June 2021
Author: <NAME>
"""
import numpy as np
import sys
sys.path.append("../../src")
from constants import kpc
from ml import train_flow
from utils import get_rescaled_tensor
if __name__ == '__main__':
# load data
data = get_rescaled_tensor(
dfile="../../data/perturbed_t5/dset.npz",
u_pos=kpc, u_vel=100000,
cen=np.array([8 * kpc, 0, 0, 220000, 0]),
R_cut=1 * kpc, z_cut=2.5 * kpc
)
print(f"Found {data.shape[0]} stars", flush=True)
# parse arguments
assert len(sys.argv) == 2
seed = int(sys.argv[1])
# train flow
train_flow(data, seed, n_layers=8, n_hidden=64)
| [
"numpy.array",
"ml.train_flow",
"sys.path.append"
] | [((145, 173), 'sys.path.append', 'sys.path.append', (['"""../../src"""'], {}), "('../../src')\n", (160, 173), False, 'import sys\n'), ((677, 724), 'ml.train_flow', 'train_flow', (['data', 'seed'], {'n_layers': '(8)', 'n_hidden': '(64)'}), '(data, seed, n_layers=8, n_hidden=64)\n', (687, 724), False, 'from ml import train_flow\n'), ((437, 473), 'numpy.array', 'np.array', (['[8 * kpc, 0, 0, 220000, 0]'], {}), '([8 * kpc, 0, 0, 220000, 0])\n', (445, 473), True, 'import numpy as np\n')] |
'''
Created on Oct 23, 2017
@author: ronaldmaceachern
modules for methods to clean dirty .csv using a set of rules
'''
import re
import os
import numpy as np
import pandas as pd
from difflib import SequenceMatcher
def makeRowDf( x, desc_name = '', cols = None):
'''a wrapper function to make it neater to make a row table for column summary
'''
ncol = len(cols)
return pd.DataFrame( np.array([desc_name] + x).reshape(1,ncol), columns = cols)
def checkIfColLikeRow(df, n = 5):
cols = list(df.columns)
#increment down a few rows
out = []
for i in np.arange(n):
#extract a row and convert to str
rw = np.array([ str(x) for x in df.iloc[i,:].values])
#see how similar to columns
sim = np.array([ SequenceMatcher(None, rw[i], cols[i]).ratio() for i in np.arange(df.shape[1])])
out += [sim.reshape(1, len(sim))]
#stack the results
out = np.concatenate(out, axis = 0)
#average over the rows
out = np.mean(out, axis = 0)
return out
def checkIfColInFirstRow(df):
'''
check if columns are in first rows
'''
res = None
## column fix - could be a class
##rule1: check if first row belongs as column name
r1 = df.iloc[0,:]
#drop the NaN's
r1 = r1.dropna()
#check if non NaNs are strings
#are they all?
allStr = np.array([isinstance(x,str) for x in r1]).all()
#if they're all str, combine with column names - this is
if allStr:
print('column names found in first row, putting into columns')
cols = list(r1.index)
oldCol = np.array(df.columns)
newcol = df.columns[df.columns.isin(cols)] +'_' +r1.values
oldCol[df.columns.isin(cols)] = newcol
res = oldCol
#drop row with bad values
#df = df.drop(0)
return res
def checkForExpression(df, expression = '', return_bool = False):
'''check each element to see if matches a regular expression
if return_bool is True return an array of bools (same size as df)
other wise return percentages that matched
if an element is null (according to pd.isnull()) False is given
Example:
--------
expression for floats: "^\d+?\.\d+?$"
expression for less than "<"
'''
#store results in an array
out = []
#increment over the columns
for i in np.arange(df.shape[1]):
#extract column values
x = df.iloc[:,i].values
# if it's not null and expression is matched
y = np.array([ False if pd.isnull(element) else not re.match(expression,element) is None for element in x ])
#if return bool, return an array of bool
if return_bool:
out += [y.reshape(len(y), 1)]
else:
out += [y.mean()]
if return_bool:
out = np.concatenate(out, axis = 1)
else:
out = np.array(out)
return out
def checkNanPerRow(df):
out = []
#increment over each row, count the number not null
for i in np.arange(df.shape[0]):
out += [np.array([ pd.isnull(x) for x in df.iloc[i,:].values]).sum()]
return np.array(out)
def checkNanPerCol(df):
out = []
#increment over each row, count the number not null
for i in np.arange(df.shape[1]):
out += [np.array([ pd.isnull(x) for x in df.iloc[:,i].values]).sum()]
return np.array(out)
if __name__ == "__main__":
#TODO: need to number rules, provide a short description
#read in system config
from config import *
#directory to raw tables
table_dir = base_dir + '/tables'
#get the tables
tabs = os.listdir(table_dir)
#TODO: want to group tables by there name
#get the names the come before the (last) underscore
table_groups = np.unique(np.array([ re.sub('_.*', '', f) for f in tabs]))
#for each table_group get the number (assumption of relation between subsequent tables)
tab_nums = {}
for tg in table_groups:
tab_nums[tg] = np.sort(np.array([ int(re.sub('^.*_|.csv', '', t)) for t in tabs if bool(re.search(tg, t))]))
result = []
#start by reading in a table
for t in tab_nums.keys():
#increment over the pages
for p in tab_nums[t]:
#TODO: may want to keep track of the rules that were implemented
print('working on: %s_%d.csv'%(t,p))
#read in data
df = pd.read_csv(table_dir + '/' + t + '_' + str(p) + '.csv')
##########
### Description on original table - by column
##########
#Goals:
#1) have attributes per column (which exactly?)
#2) have attributes of entire table (which exactly?)
#get the original columns
orgCol = df.columns
#get the dtypes (data types) of the data.frame
dt = df.dtypes
#check the number of NaNs per column
numNan = [ 'NA' if dt[i] == 'object' else np.isnan(df.iloc[:,i].values).mean() for i in np.arange(len(dt))]
#create an empty data.frame (not really used - drop?)
descDf = pd.DataFrame(columns = ['DESC'] + list(orgCol))
#how similar are the first n rows to the column names
simRowtoCol = checkIfColLikeRow(df, n = 5)
#put column descriptions into a single row table and combine
nn = makeRowDf( numNan, desc_name = 'percent_nans', cols = list(descDf.columns))
dtyp = makeRowDf( list(dt), desc_name = 'data_type', cols = list(descDf.columns))
simRow = makeRowDf( list(simRowtoCol), desc_name = 'data_similar_to_colname', cols = list(descDf.columns))
#check if values can be turned into floats (returns a percent)
perToFloat = checkForExpression(df, expression = "^\d+?\.\d+?$", return_bool = False)
perToFloat= makeRowDf( list(perToFloat), desc_name = 'turn_to_float', cols = list(descDf.columns))
#has '<'
hasLessThan = checkForExpression(df, expression = "<", return_bool = False)
hasLessThan = makeRowDf( list(hasLessThan), desc_name = 'has_less_than', cols = list(descDf.columns))
#combine descriptions
desc = pd.concat([nn, dtyp, simRow, hasLessThan, perToFloat])
#check column names - are any Unnamed? - then those should be reported to deal with later
#######
## fixing column names
#######
## Known issues:
#1) there may not be any column names
#2) columns may also be partly in first row
#3) columns may be entirely in first and second row
# if column is most float and the top one or two rows are str then combine them (to make column name)
#4) check if numbers are in column names and there are repeating elemnts
# i.e F30.1, F30.2, F30.3 - or unnamed:0, unnamed:1, etc
#5) combining two columns: if one column is mostly NaNs and the other one to either side
# has values mostly for where the other has NaNs, then merge the two
#6) if row is mostly NaN (ie. only has one value - drop?)
## first row goes with column
newCol = checkIfColInFirstRow(df)
#check the number NaNs per row
nanPerRow = checkNanPerRow(df)
#TODO: define a threshold here - how many is too many (all but one or two?)
tooManyNans = (nanPerRow >= (df.shape[1] - 1))
if tooManyNans.any():
#TODO: added to findings to a rule implemented table
df = df.iloc[~tooManyNans, :]
#check Nans per column
nanPerCol = checkNanPerCol(df)
tooManyNansC = (nanPerCol >= (df.shape[0] - 3))
#TODO: instead of dropping column may want to merge them
if tooManyNansC.any():
#TODO: add findings to a rule implemented table
df = df.iloc[:, ~tooManyNansC]
##########
### check contents of table
##########
## check how many elements can be converted to floats are integers in each column
##less than symbols
#check columns, are there <1 values? (or similar)
#-if so replace
print('trying to get numbers')
lth = checkForExpression(df, expression = "<", return_bool = True)
if lth.any():
ltNum = df.values[lth]
#remove the less than, turn to float and divide by 2
df.values[lth] = np.array([ float(re.sub('<', '', x)) / 2 for x in ltNum])
#check if given table is similar to the previous
#-want to have criteria, how many columns with the same name
#-can missing names be implied?
##########
### Summary of what's been changed in the column
##########
########
#store results in a
res = {'table_name': t + '_' + str(p),
'column_desc': desc,
'cleaned_table': df}
result += [res]
| [
"numpy.mean",
"pandas.isnull",
"os.listdir",
"difflib.SequenceMatcher",
"re.match",
"numpy.array",
"numpy.isnan",
"numpy.concatenate",
"re.sub",
"pandas.concat",
"numpy.arange",
"re.search"
] | [((596, 608), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (605, 608), True, 'import numpy as np\n'), ((945, 972), 'numpy.concatenate', 'np.concatenate', (['out'], {'axis': '(0)'}), '(out, axis=0)\n', (959, 972), True, 'import numpy as np\n'), ((1013, 1033), 'numpy.mean', 'np.mean', (['out'], {'axis': '(0)'}), '(out, axis=0)\n', (1020, 1033), True, 'import numpy as np\n'), ((2429, 2451), 'numpy.arange', 'np.arange', (['df.shape[1]'], {}), '(df.shape[1])\n', (2438, 2451), True, 'import numpy as np\n'), ((3090, 3112), 'numpy.arange', 'np.arange', (['df.shape[0]'], {}), '(df.shape[0])\n', (3099, 3112), True, 'import numpy as np\n'), ((3203, 3216), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (3211, 3216), True, 'import numpy as np\n'), ((3324, 3346), 'numpy.arange', 'np.arange', (['df.shape[1]'], {}), '(df.shape[1])\n', (3333, 3346), True, 'import numpy as np\n'), ((3437, 3450), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (3445, 3450), True, 'import numpy as np\n'), ((3709, 3730), 'os.listdir', 'os.listdir', (['table_dir'], {}), '(table_dir)\n', (3719, 3730), False, 'import os\n'), ((1637, 1657), 'numpy.array', 'np.array', (['df.columns'], {}), '(df.columns)\n', (1645, 1657), True, 'import numpy as np\n'), ((2891, 2918), 'numpy.concatenate', 'np.concatenate', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (2905, 2918), True, 'import numpy as np\n'), ((2945, 2958), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (2953, 2958), True, 'import numpy as np\n'), ((6495, 6549), 'pandas.concat', 'pd.concat', (['[nn, dtyp, simRow, hasLessThan, perToFloat]'], {}), '([nn, dtyp, simRow, hasLessThan, perToFloat])\n', (6504, 6549), True, 'import pandas as pd\n'), ((407, 432), 'numpy.array', 'np.array', (['([desc_name] + x)'], {}), '([desc_name] + x)\n', (415, 432), True, 'import numpy as np\n'), ((3884, 3904), 're.sub', 're.sub', (['"""_.*"""', '""""""', 'f'], {}), "('_.*', '', f)\n", (3890, 3904), False, 'import re\n'), ((839, 861), 'numpy.arange', 'np.arange', (['df.shape[1]'], {}), '(df.shape[1])\n', (848, 861), True, 'import numpy as np\n'), ((2604, 2622), 'pandas.isnull', 'pd.isnull', (['element'], {}), '(element)\n', (2613, 2622), True, 'import pandas as pd\n'), ((784, 821), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', 'rw[i]', 'cols[i]'], {}), '(None, rw[i], cols[i])\n', (799, 821), False, 'from difflib import SequenceMatcher\n'), ((4111, 4137), 're.sub', 're.sub', (['"""^.*_|.csv"""', '""""""', 't'], {}), "('^.*_|.csv', '', t)\n", (4117, 4137), False, 'import re\n'), ((2632, 2661), 're.match', 're.match', (['expression', 'element'], {}), '(expression, element)\n', (2640, 2661), False, 'import re\n'), ((3141, 3153), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (3150, 3153), True, 'import pandas as pd\n'), ((3375, 3387), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (3384, 3387), True, 'import pandas as pd\n'), ((4161, 4177), 're.search', 're.search', (['tg', 't'], {}), '(tg, t)\n', (4170, 4177), False, 'import re\n'), ((5141, 5171), 'numpy.isnan', 'np.isnan', (['df.iloc[:, i].values'], {}), '(df.iloc[:, i].values)\n', (5149, 5171), True, 'import numpy as np\n'), ((9132, 9150), 're.sub', 're.sub', (['"""<"""', '""""""', 'x'], {}), "('<', '', x)\n", (9138, 9150), False, 'import re\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate timeseries in 2 clusters: NSR and SVT from mit-bih data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl import app
from absl import flags
import numpy as np
import wfdb
FLAGS = flags.FLAGS
flags.DEFINE_string(
'input_dir', None,
'Local input directory containing the mit-bih file that can be copied from '
'/namespace/health-research/unencrypted/reference/user/milah/mit_bih/.')
flags.DEFINE_string('outfile_dir', None,
'Output filepath.')
def main(argv):
del argv
all_ = [100, 101, 102, 103, 104, 105, 106, 107, 108, 111, 112, 113, 114, 115,
116, 117, 118, 119, 121, 122, 123, 124, 200, 201, 202, 203, 205, 207,
208, 209, 210, 212, 213, 214, 215, 217, 219, 220, 221, 222, 223, 228,
230, 231, 232, 233, 234]
target_rhythms = ['AB', 'AFIB', 'AFL', 'B', 'BII', 'IVR', 'N', 'NOD', 'P',
'PREX', 'SBR', 'SVTA', 'T', 'VFL', 'VT']
rhythms = dict()
for a in all_:
ann_ref = wfdb.rdann(FLAGS.input_dir + str(a), 'atr')
for k, label in enumerate(ann_ref.aux_note):
label = str(label).strip('\x00').strip('(')
if label in target_rhythms:
sampfrom = max(0, ann_ref.sample[k] - 140)
sampto = ann_ref.sample[k] + 361
sig, _ = wfdb.rdsamp(FLAGS.input_dir + str(a), channels=[0, 1],
sampfrom=sampfrom, sampto=sampto)
for channel in [0, 1]:
key = str(a) + ':' + str(k) + ':' + str(channel) + ':' + str(
ann_ref.sample[k])
x = np.array(sig)
x = x[:, channel]
record = ','.join([key, str(channel), str(label)] + [
str(i) for i in x])
if label not in rhythms:
rhythms[label] = []
rhythms[label].append(record)
all_rhythms = sorted(rhythms.keys())
print(all_rhythms)
random.seed(1984)
with file(FLAGS.outfile + 'all.csv', 'w') as f_all:
for label in all_rhythms:
records = rhythms[label]
idxs = range(len(records)/2)
random.shuffle(idxs)
outfile = FLAGS.outfile + label + '.csv'
with file(outfile, 'w') as f:
for i in idxs:
f.write(records[2*i] + '\n')
f.write(records[2*i+1] + '\n')
f_all.write(records[2*i] + '\n')
f_all.write(records[2*i+1] + '\n')
if __name__ == '__main__':
app.run(main)
| [
"random.shuffle",
"absl.app.run",
"random.seed",
"numpy.array",
"absl.flags.DEFINE_string"
] | [((904, 1098), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""input_dir"""', 'None', '"""Local input directory containing the mit-bih file that can be copied from /namespace/health-research/unencrypted/reference/user/milah/mit_bih/."""'], {}), "('input_dir', None,\n 'Local input directory containing the mit-bih file that can be copied from /namespace/health-research/unencrypted/reference/user/milah/mit_bih/.'\n )\n", (923, 1098), False, 'from absl import flags\n'), ((1106, 1166), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""outfile_dir"""', 'None', '"""Output filepath."""'], {}), "('outfile_dir', None, 'Output filepath.')\n", (1125, 1166), False, 'from absl import flags\n'), ((2543, 2560), 'random.seed', 'random.seed', (['(1984)'], {}), '(1984)\n', (2554, 2560), False, 'import random\n'), ((3043, 3056), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (3050, 3056), False, 'from absl import app\n'), ((2717, 2737), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (2731, 2737), False, 'import random\n'), ((2233, 2246), 'numpy.array', 'np.array', (['sig'], {}), '(sig)\n', (2241, 2246), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""
Module with pixel and frame subsampling functions.
"""
__author__ = '<NAME>, <NAME>'
__all__ = ['cube_collapse',
'cube_subsample',
'cube_subsample_trimmean']
import numpy as np
def cube_collapse(cube, mode='median', n=50, w=None):
""" Collapses a cube into a frame (3D array -> 2D array) depending on the
parameter ``mode``. It's possible to perform a trimmed mean combination of
the frames based on description in Brandt+ 2012.
Parameters
----------
cube : numpy ndarray
Cube.
mode : {'median', 'mean', 'sum', 'trimmean', 'max', 'wmean'}, str optional
Sets the way of collapsing the images in the cube.
'wmean' stands for weighted mean and requires weights w to be provided.
n : int, optional
Sets the discarded values at high and low ends. When n = N is the same
as taking the mean, when n = 1 is like taking the median.
w: 1d numpy array or list, optional
Weights to be applied for a weighted mean. Need to be provided if
collapse mode is 'wmean'.
Returns
-------
frame : numpy ndarray
Output array, cube combined.
"""
arr = cube
if arr.ndim != 3:
raise TypeError('The input array is not a cube or 3d array.')
if mode == 'wmean':
if w is None:
raise ValueError("Weights have to be provided for weighted mean mode")
if len(w) != cube.shape[0]:
raise TypeError("Weights need same length as cube")
if isinstance(w,list):
w = np.array(w)
if mode == 'mean':
frame = np.nanmean(arr, axis=0)
elif mode == 'median':
frame = np.nanmedian(arr, axis=0)
elif mode == 'sum':
frame = np.nansum(arr, axis=0)
elif mode == 'max':
frame = np.nanmax(arr, axis=0)
elif mode == 'trimmean':
N = arr.shape[0]
k = (N - n)//2
if N%2 != n%2:
n+=1
frame = np.empty_like(arr[0])
for index, _ in np.ndenumerate(arr[0]):
sort = np.sort(arr[:, index[0], index[1]])
frame[index] = np.nanmean(sort[k:k+n])
elif mode == 'wmean':
arr[np.where(np.isnan(arr))]=0 # to avoid product with nan
frame = np.inner(w, np.moveaxis(arr,0,-1))
return frame
def cube_subsample(array, n, mode="mean", parallactic=None, verbose=True):
"""Mean/Median combines frames in 3d or 4d cube with window ``n``.
Parameters
----------
array : numpy ndarray
Input 3d array, cube.
n : int
Window for mean/median.
mode : {'mean','median'}, optional
Switch for choosing mean or median.
parallactic : numpy ndarray, optional
List of corresponding parallactic angles.
verbose : bool optional
Returns
-------
arr_view : numpy ndarray
Resulting array.
If ``parallactic`` is provided the the new cube and angles are returned.
"""
if array.ndim not in [3, 4]:
raise TypeError('The input array is not a cube or 3d or 4d array')
if mode == 'median':
func = np.median
elif mode == 'mean':
func = np.mean
else:
raise ValueError('`Mode` should be either Mean or Median')
if array.ndim == 3:
m = int(array.shape[0] / n)
resid = array.shape[0] % n
y = array.shape[1]
x = array.shape[2]
arr = np.empty([m, y, x])
if parallactic is not None:
angles = np.zeros(m)
for i in range(m):
arr[i, :, :] = func(array[n * i:n * i + n, :, :], axis=0)
if parallactic is not None:
angles[i] = func(parallactic[n * i:n * i + n])
elif array.ndim == 4:
m = int(array.shape[1] / n)
resid = array.shape[1] % n
w = array.shape[0]
y = array.shape[2]
x = array.shape[3]
arr = np.empty([w, m, y, x])
if parallactic is not None:
angles = np.zeros(m)
for j in range(w):
for i in range(m):
arr[j, i, :, :] = func(array[j, n * i:n * i + n, :, :], axis=0)
if parallactic is not None:
angles[i] = func(parallactic[n * i:n * i + n])
if verbose:
msg = "Cube temporally subsampled by taking the {} of every {} frames"
print(msg.format(mode, n))
if resid > 0:
print("Initial # of frames and window are not multiples ({} "
"frames were dropped)".format(resid))
print("New shape: {}".format(arr.shape))
if parallactic is not None:
return arr, angles
else:
return arr
def cube_subsample_trimmean(arr, n, m):
"""Performs a trimmed mean combination every m frames in a cube. Based on
description in Brandt+ 2012.
Parameters
----------
arr : numpy ndarray
Cube.
n : int
Sets the discarded values at high and low ends. When n = N is the same
as taking the mean, when n = 1 is like taking the median.
m : int
Window from the trimmed mean.
Returns
-------
arr_view : numpy ndarray
Output array, cube combined.
"""
if arr.ndim != 3:
raise TypeError('The input array is not a cube or 3d array')
num = int(arr.shape[0]/m)
res = int(arr.shape[0]%m)
y = arr.shape[1]
x = arr.shape[2]
arr2 = np.empty([num+2, y, x])
for i in range(num):
arr2[0] = cube_collapse(arr[:m, :, :], 'trimmean', n)
if i > 0:
arr2[i] = cube_collapse(arr[m*i:m*i+m, :, :], 'trimmean', n)
arr2[num] = cube_collapse(arr[-res:, :, :], 'trimmean', n)
arr_view = arr2[:num+1] # slicing until m+1 - last index not included
msg = "Cube temporally subsampled by taking the trimmed mean of every {} "
msg += "frames"
print(msg.format(m))
return arr_view | [
"numpy.nanmedian",
"numpy.sort",
"numpy.ndenumerate",
"numpy.nanmean",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.nanmax",
"numpy.empty_like",
"numpy.isnan",
"numpy.moveaxis",
"numpy.nansum"
] | [((5651, 5676), 'numpy.empty', 'np.empty', (['[num + 2, y, x]'], {}), '([num + 2, y, x])\n', (5659, 5676), True, 'import numpy as np\n'), ((1652, 1675), 'numpy.nanmean', 'np.nanmean', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1662, 1675), True, 'import numpy as np\n'), ((3560, 3579), 'numpy.empty', 'np.empty', (['[m, y, x]'], {}), '([m, y, x])\n', (3568, 3579), True, 'import numpy as np\n'), ((1596, 1607), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (1604, 1607), True, 'import numpy as np\n'), ((1719, 1744), 'numpy.nanmedian', 'np.nanmedian', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1731, 1744), True, 'import numpy as np\n'), ((3637, 3648), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (3645, 3648), True, 'import numpy as np\n'), ((4043, 4065), 'numpy.empty', 'np.empty', (['[w, m, y, x]'], {}), '([w, m, y, x])\n', (4051, 4065), True, 'import numpy as np\n'), ((1785, 1807), 'numpy.nansum', 'np.nansum', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1794, 1807), True, 'import numpy as np\n'), ((4123, 4134), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (4131, 4134), True, 'import numpy as np\n'), ((1848, 1870), 'numpy.nanmax', 'np.nanmax', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1857, 1870), True, 'import numpy as np\n'), ((2062, 2083), 'numpy.empty_like', 'np.empty_like', (['arr[0]'], {}), '(arr[0])\n', (2075, 2083), True, 'import numpy as np\n'), ((2144, 2166), 'numpy.ndenumerate', 'np.ndenumerate', (['arr[0]'], {}), '(arr[0])\n', (2158, 2166), True, 'import numpy as np\n'), ((2187, 2222), 'numpy.sort', 'np.sort', (['arr[:, index[0], index[1]]'], {}), '(arr[:, index[0], index[1]])\n', (2194, 2222), True, 'import numpy as np\n'), ((2250, 2275), 'numpy.nanmean', 'np.nanmean', (['sort[k:k + n]'], {}), '(sort[k:k + n])\n', (2260, 2275), True, 'import numpy as np\n'), ((2395, 2418), 'numpy.moveaxis', 'np.moveaxis', (['arr', '(0)', '(-1)'], {}), '(arr, 0, -1)\n', (2406, 2418), True, 'import numpy as np\n'), ((2321, 2334), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (2329, 2334), True, 'import numpy as np\n')] |
import operator
import numpy as np
import bitpacking.packing as pk
from boolnet.utils import PackedMatrix
FUNCTIONS = {
'add': operator.add,
'sub': operator.sub,
'mul': operator.mul,
'div': operator.floordiv,
'mod': operator.mod,
}
def to_binary(value, num_bits):
# little-endian
return np.flipud(np.array(
[int(i) for i in np.binary_repr(value, num_bits)]))
def two_input_mapping(num_bits_per_operand, functor):
# Upper limit
upper = 2**num_bits_per_operand
# generate dict for function
function = {i1*upper + i2: functor(i1, i2) % upper
for i1 in range(upper)
for i2 in range(upper)}
return function
def binmap_from_function(func, Ni, No):
M = np.zeros((len(func), Ni+No), dtype=np.uint8)
# views into M
I, T = np.split(M, [Ni], axis=1)
for idx, (inp, out) in enumerate(func.items()):
I[idx] = to_binary(inp, Ni)[:Ni]
T[idx] = to_binary(out, No)[:No]
return PackedMatrix(pk.packmat(M), M.shape[0], Ni)
def mapping_to_file(function, numbits, numout_limit, outfile):
if not outfile:
outfile = '{}{}.npz'.format(function, numbits)
n = numbits
func = two_input_mapping(n, FUNCTIONS[function])
Ni = 2*n
No = n
if numout_limit:
No = numout_limit
Mp = binmap_from_function(func, Ni, No)
np.savez(outfile, matrix=Mp, Ni=Ni, Ne=Mp.Ne)
def mapping_from_file(filename):
with np.load(filename) as f:
return PackedMatrix(f['matrix'], f['Ne'], f['Ni'])
| [
"numpy.savez",
"numpy.binary_repr",
"numpy.split",
"bitpacking.packing.packmat",
"numpy.load",
"boolnet.utils.PackedMatrix"
] | [((826, 851), 'numpy.split', 'np.split', (['M', '[Ni]'], {'axis': '(1)'}), '(M, [Ni], axis=1)\n', (834, 851), True, 'import numpy as np\n'), ((1374, 1419), 'numpy.savez', 'np.savez', (['outfile'], {'matrix': 'Mp', 'Ni': 'Ni', 'Ne': 'Mp.Ne'}), '(outfile, matrix=Mp, Ni=Ni, Ne=Mp.Ne)\n', (1382, 1419), True, 'import numpy as np\n'), ((1012, 1025), 'bitpacking.packing.packmat', 'pk.packmat', (['M'], {}), '(M)\n', (1022, 1025), True, 'import bitpacking.packing as pk\n'), ((1464, 1481), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1471, 1481), True, 'import numpy as np\n'), ((1503, 1546), 'boolnet.utils.PackedMatrix', 'PackedMatrix', (["f['matrix']", "f['Ne']", "f['Ni']"], {}), "(f['matrix'], f['Ne'], f['Ni'])\n", (1515, 1546), False, 'from boolnet.utils import PackedMatrix\n'), ((369, 400), 'numpy.binary_repr', 'np.binary_repr', (['value', 'num_bits'], {}), '(value, num_bits)\n', (383, 400), True, 'import numpy as np\n')] |
import torch
import load_data
import random
import numpy as np
from HSCNN_model import HSCNN_network
from HSCNN_train import train
from HSCNN_classifier import predict,get_compare
random.seed(1234)
np.random.seed(1234)
torch.cuda.manual_seed(1234)
torch.backends.cudnn.deterministic = True
print('load data............')
train_pairs_iter, valid_pairs_iter, train_data_iter, test_data_iter = load_data.load_pairs()
embedding_size = 300
keep_probab = 0.5
kernel_size = [3, 4, 5]
vocab_size = 41993
patience = 5
d = 103
sia_model = HSCNN_network(output_size=1, in_channels=1, out_channels=128, kernel_size=kernel_size, stride=1, padding=0,
keep_probab=keep_probab, vocab_size=vocab_size, embedding_size=embedding_size,d = d)
if torch.cuda.is_available():
sia_model.cuda()
siamese_model_dict = sia_model.state_dict()
cnn_model_dict = torch.load('./data/CNN_checkpoint.pt')
state_dict = {k: v for k, v in cnn_model_dict.items() if k in siamese_model_dict.keys()}
siamese_model_dict.update(state_dict)
sia_model.load_state_dict(siamese_model_dict)
print('Start training siamese networks............')
model, train_loss_record, val_loss_record = train(train_pairs_iter, valid_pairs_iter, sia_model, patience)
print('Start get_compare')
train_result = get_compare(train_data_iter, model)
print(len(train_result))
print('Start predict')
predict(test_data_iter, train_data_iter, train_result, model)
| [
"HSCNN_model.HSCNN_network",
"HSCNN_classifier.get_compare",
"torch.load",
"random.seed",
"HSCNN_classifier.predict",
"load_data.load_pairs",
"torch.cuda.is_available",
"numpy.random.seed",
"HSCNN_train.train",
"torch.cuda.manual_seed"
] | [((181, 198), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (192, 198), False, 'import random\n'), ((199, 219), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (213, 219), True, 'import numpy as np\n'), ((220, 248), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1234)'], {}), '(1234)\n', (242, 248), False, 'import torch\n'), ((394, 416), 'load_data.load_pairs', 'load_data.load_pairs', ([], {}), '()\n', (414, 416), False, 'import load_data\n'), ((534, 735), 'HSCNN_model.HSCNN_network', 'HSCNN_network', ([], {'output_size': '(1)', 'in_channels': '(1)', 'out_channels': '(128)', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(0)', 'keep_probab': 'keep_probab', 'vocab_size': 'vocab_size', 'embedding_size': 'embedding_size', 'd': 'd'}), '(output_size=1, in_channels=1, out_channels=128, kernel_size=\n kernel_size, stride=1, padding=0, keep_probab=keep_probab, vocab_size=\n vocab_size, embedding_size=embedding_size, d=d)\n', (547, 735), False, 'from HSCNN_model import HSCNN_network\n'), ((751, 776), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (774, 776), False, 'import torch\n'), ((862, 900), 'torch.load', 'torch.load', (['"""./data/CNN_checkpoint.pt"""'], {}), "('./data/CNN_checkpoint.pt')\n", (872, 900), False, 'import torch\n'), ((1172, 1234), 'HSCNN_train.train', 'train', (['train_pairs_iter', 'valid_pairs_iter', 'sia_model', 'patience'], {}), '(train_pairs_iter, valid_pairs_iter, sia_model, patience)\n', (1177, 1234), False, 'from HSCNN_train import train\n'), ((1278, 1313), 'HSCNN_classifier.get_compare', 'get_compare', (['train_data_iter', 'model'], {}), '(train_data_iter, model)\n', (1289, 1313), False, 'from HSCNN_classifier import predict, get_compare\n'), ((1363, 1424), 'HSCNN_classifier.predict', 'predict', (['test_data_iter', 'train_data_iter', 'train_result', 'model'], {}), '(test_data_iter, train_data_iter, train_result, model)\n', (1370, 1424), False, 'from HSCNN_classifier import predict, get_compare\n')] |
import numpy as np
import scipy.sparse as sparse
from scipy.sparse import linalg
import pandas as pd
# global variables
#This will change the initial condition used. Currently it starts from the first# value
shift_k = 0
approx_res_size = 5000
model_params = {'tau': 0.25,
'nstep': 1000,
'N': 12,
'd': 22}
res_params = {'radius':0.1,
'degree': 3,
'sigma': 0.5,
'train_length': 400,
'spin_off': 50,
'N': int(np.floor(approx_res_size/model_params['N']) * model_params['N']),
'num_inputs': model_params['N'],
'predict_length': 300,
'beta': 0.0001
}
# The ESN functions for training
def generate_reservoir(size,radius,degree):
sparsity = degree/float(size);
A = sparse.rand(size,size,density=sparsity).todense()
vals = np.linalg.eigvals(A)
e = np.max(np.abs(vals))
A = (A/e) * radius
return A
def reservoir_layer(A, Win, input, res_params):
states = np.zeros((res_params['N'],res_params['train_length']))
for i in range(res_params['train_length']-1):
states[:,i+1] = np.tanh(np.dot(A,states[:,i]) + np.dot(Win,input[:,i]))
return states
def reservoir_layer_reset(A, Win, input, res_params):
states = np.zeros((res_params['N'],res_params['spin_off']))
for i in range(res_params['spin_off']-1):
states[:,i+1] = np.tanh(np.dot(A,states[:,i]) + np.dot(Win,input[:,i]))
x_reset = states[:,-1]
return x_reset
def train_reservoir(res_params, data):
A = generate_reservoir(res_params['N'], res_params['radius'], res_params['degree'])
q = int(res_params['N']/res_params['num_inputs'])
Win = np.zeros((res_params['N'],res_params['num_inputs']))
for i in range(res_params['num_inputs']):
np.random.seed(seed=i)
Win[i*q: (i+1)*q,i] = res_params['sigma'] * (-1 + 2 * np.random.rand(1,q)[0])
states = reservoir_layer(A, Win, data, res_params)
Wout = train(res_params, states, data)
x = states[:,-1]
return x, Wout, A, Win
def train(res_params,states,data):
beta = res_params['beta']
idenmat = beta * sparse.identity(res_params['N'])
states2 = states.copy()
for j in range(2,np.shape(states2)[0]-2):
if (np.mod(j,2)==0):
states2[j,:] = (states[j-1,:]*states[j-2,:]).copy()
U = np.dot(states2,states2.transpose()) + idenmat
Uinv = np.linalg.inv(U)
Wout = np.dot(Uinv,np.dot(states2,data.transpose()))
return Wout.transpose()
def predict(A, Win, res_params, x, Wout):
output = np.zeros((res_params['num_inputs'],res_params['predict_length']))
for i in range(res_params['predict_length']):
x_aug = x.copy()
for j in range(2,np.shape(x_aug)[0]-2):
if (np.mod(j,2)==0):
x_aug[j] = (x[j-1]*x[j-2]).copy()
out = np.squeeze(np.asarray(np.dot(Wout,x_aug)))
output[:,i] = out
x1 = np.tanh(np.dot(A,x) + np.dot(Win,out))
x = np.squeeze(np.asarray(x1))
return output, x
if __name__ == '__main__':
print('Echo state network file')
'''
Example of use
# data=np.load('data.npy') # rows are state, columns are samples in time
# Train reservoir
x,Wout,A,Win = train_reservoir(res_params,data[:,shift_k:shift_k+res_params['train_length']])
x_reset=reservoir_layer_reset(A, Win, data[:,shift_k:shift_k+res_params['spin_off']], res_params)
# Prediction
output, _ = predict(A, Win,res_params,x_reset,Wout)
np.save('prediction_output',output)
'''
| [
"numpy.abs",
"scipy.sparse.rand",
"numpy.random.rand",
"numpy.floor",
"numpy.asarray",
"numpy.linalg.eigvals",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.dot",
"numpy.random.seed",
"numpy.shape",
"scipy.sparse.identity",
"numpy.mod"
] | [((898, 918), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['A'], {}), '(A)\n', (915, 918), True, 'import numpy as np\n'), ((1046, 1101), 'numpy.zeros', 'np.zeros', (["(res_params['N'], res_params['train_length'])"], {}), "((res_params['N'], res_params['train_length']))\n", (1054, 1101), True, 'import numpy as np\n'), ((1317, 1368), 'numpy.zeros', 'np.zeros', (["(res_params['N'], res_params['spin_off'])"], {}), "((res_params['N'], res_params['spin_off']))\n", (1325, 1368), True, 'import numpy as np\n'), ((1732, 1785), 'numpy.zeros', 'np.zeros', (["(res_params['N'], res_params['num_inputs'])"], {}), "((res_params['N'], res_params['num_inputs']))\n", (1740, 1785), True, 'import numpy as np\n'), ((2455, 2471), 'numpy.linalg.inv', 'np.linalg.inv', (['U'], {}), '(U)\n', (2468, 2471), True, 'import numpy as np\n'), ((2613, 2679), 'numpy.zeros', 'np.zeros', (["(res_params['num_inputs'], res_params['predict_length'])"], {}), "((res_params['num_inputs'], res_params['predict_length']))\n", (2621, 2679), True, 'import numpy as np\n'), ((934, 946), 'numpy.abs', 'np.abs', (['vals'], {}), '(vals)\n', (940, 946), True, 'import numpy as np\n'), ((1839, 1861), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'i'}), '(seed=i)\n', (1853, 1861), True, 'import numpy as np\n'), ((2190, 2222), 'scipy.sparse.identity', 'sparse.identity', (["res_params['N']"], {}), "(res_params['N'])\n", (2205, 2222), True, 'import scipy.sparse as sparse\n'), ((524, 569), 'numpy.floor', 'np.floor', (["(approx_res_size / model_params['N'])"], {}), "(approx_res_size / model_params['N'])\n", (532, 569), True, 'import numpy as np\n'), ((837, 878), 'scipy.sparse.rand', 'sparse.rand', (['size', 'size'], {'density': 'sparsity'}), '(size, size, density=sparsity)\n', (848, 878), True, 'import scipy.sparse as sparse\n'), ((2309, 2321), 'numpy.mod', 'np.mod', (['j', '(2)'], {}), '(j, 2)\n', (2315, 2321), True, 'import numpy as np\n'), ((3043, 3057), 'numpy.asarray', 'np.asarray', (['x1'], {}), '(x1)\n', (3053, 3057), True, 'import numpy as np\n'), ((1183, 1206), 'numpy.dot', 'np.dot', (['A', 'states[:, i]'], {}), '(A, states[:, i])\n', (1189, 1206), True, 'import numpy as np\n'), ((1207, 1231), 'numpy.dot', 'np.dot', (['Win', 'input[:, i]'], {}), '(Win, input[:, i])\n', (1213, 1231), True, 'import numpy as np\n'), ((1446, 1469), 'numpy.dot', 'np.dot', (['A', 'states[:, i]'], {}), '(A, states[:, i])\n', (1452, 1469), True, 'import numpy as np\n'), ((1470, 1494), 'numpy.dot', 'np.dot', (['Win', 'input[:, i]'], {}), '(Win, input[:, i])\n', (1476, 1494), True, 'import numpy as np\n'), ((2272, 2289), 'numpy.shape', 'np.shape', (['states2'], {}), '(states2)\n', (2280, 2289), True, 'import numpy as np\n'), ((2818, 2830), 'numpy.mod', 'np.mod', (['j', '(2)'], {}), '(j, 2)\n', (2824, 2830), True, 'import numpy as np\n'), ((2921, 2940), 'numpy.dot', 'np.dot', (['Wout', 'x_aug'], {}), '(Wout, x_aug)\n', (2927, 2940), True, 'import numpy as np\n'), ((2989, 3001), 'numpy.dot', 'np.dot', (['A', 'x'], {}), '(A, x)\n', (2995, 3001), True, 'import numpy as np\n'), ((3003, 3019), 'numpy.dot', 'np.dot', (['Win', 'out'], {}), '(Win, out)\n', (3009, 3019), True, 'import numpy as np\n'), ((2779, 2794), 'numpy.shape', 'np.shape', (['x_aug'], {}), '(x_aug)\n', (2787, 2794), True, 'import numpy as np\n'), ((1924, 1944), 'numpy.random.rand', 'np.random.rand', (['(1)', 'q'], {}), '(1, q)\n', (1938, 1944), True, 'import numpy as np\n')] |
import gym
import unittest
import numpy as np
from connect_four.envs.connect_four_env import ConnectFourEnv
from connect_four.evaluation.incremental_victor.graph.graph_manager import GraphManager
from connect_four.evaluation.incremental_victor.solution.victor_solution_manager import VictorSolutionManager
from connect_four.problem import ConnectFourGroupManager
@unittest.skip("deprecated")
class TestGraphManager4x4(unittest.TestCase):
def setUp(self) -> None:
self.env = gym.make('connect_four-v0')
ConnectFourEnv.M = 4
ConnectFourEnv.N = 4
self.env.reset()
def test_evaluate_4x4(self):
self.env.state = np.array([
[
[0, 0, 0, 0, ],
[0, 0, 0, 0, ],
[0, 0, 0, 0, ],
[0, 0, 0, 0, ],
],
[
[0, 0, 0, 0, ],
[0, 0, 0, 0, ],
[0, 0, 0, 0, ],
[0, 0, 0, 0, ],
],
])
problem_manager = ConnectFourGroupManager(env_variables=self.env.env_variables)
solution_manager = VictorSolutionManager(env_variables=self.env.env_variables)
gm = GraphManager(player=0, problem_manager=problem_manager, solution_manager=solution_manager)
got_evaluation = gm.evaluate()
self.assertIsNotNone(got_evaluation)
if __name__ == '__main__':
unittest.main()
| [
"connect_four.evaluation.incremental_victor.graph.graph_manager.GraphManager",
"connect_four.evaluation.incremental_victor.solution.victor_solution_manager.VictorSolutionManager",
"numpy.array",
"connect_four.problem.ConnectFourGroupManager",
"unittest.main",
"unittest.skip",
"gym.make"
] | [((368, 395), 'unittest.skip', 'unittest.skip', (['"""deprecated"""'], {}), "('deprecated')\n", (381, 395), False, 'import unittest\n'), ((1393, 1408), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1406, 1408), False, 'import unittest\n'), ((490, 517), 'gym.make', 'gym.make', (['"""connect_four-v0"""'], {}), "('connect_four-v0')\n", (498, 517), False, 'import gym\n'), ((660, 790), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]])\n', (668, 790), True, 'import numpy as np\n'), ((1022, 1083), 'connect_four.problem.ConnectFourGroupManager', 'ConnectFourGroupManager', ([], {'env_variables': 'self.env.env_variables'}), '(env_variables=self.env.env_variables)\n', (1045, 1083), False, 'from connect_four.problem import ConnectFourGroupManager\n'), ((1111, 1170), 'connect_four.evaluation.incremental_victor.solution.victor_solution_manager.VictorSolutionManager', 'VictorSolutionManager', ([], {'env_variables': 'self.env.env_variables'}), '(env_variables=self.env.env_variables)\n', (1132, 1170), False, 'from connect_four.evaluation.incremental_victor.solution.victor_solution_manager import VictorSolutionManager\n'), ((1185, 1280), 'connect_four.evaluation.incremental_victor.graph.graph_manager.GraphManager', 'GraphManager', ([], {'player': '(0)', 'problem_manager': 'problem_manager', 'solution_manager': 'solution_manager'}), '(player=0, problem_manager=problem_manager, solution_manager=\n solution_manager)\n', (1197, 1280), False, 'from connect_four.evaluation.incremental_victor.graph.graph_manager import GraphManager\n')] |
from cv2 import cv2
import tello
import time
import numpy as np
drone = tello.Tello('', 8889)
time.sleep(10)
chase_count = 0
chase_image_list = []
chase_corner_list = []
objp = np.zeros((9*6, 3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
while(True):
frame = drone.read()
imageSize = (frame.shape[0], frame.shape[1])
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, corner = cv2.findChessboardCorners(frame, (9,6), None)
if ret == True:
chase_count += 1
cv2.cornerSubPix(frame, corner, (11,11), (-1,-1), criteria)
chase_image_list.append(objp)
chase_corner_list.append(corner)
cv2.waitKey(500)
if chase_count > 10:
retval, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(chase_image_list, chase_corner_list, imageSize, None, None)
break
cv2.imshow('frame', frame)
key = cv2.waitKey(33)
drone.keyboard(key)
f = cv2.FileStorage('calibration.xml', cv2.FILE_STORAGE_WRITE)
f.write("intrinsic", cameraMatrix)
f.write("distortion", distCoeffs)
f.release() | [
"tello.Tello",
"cv2.cv2.findChessboardCorners",
"cv2.cv2.waitKey",
"time.sleep",
"cv2.cv2.calibrateCamera",
"numpy.zeros",
"cv2.cv2.FileStorage",
"cv2.cv2.cvtColor",
"cv2.cv2.cornerSubPix",
"cv2.cv2.imshow"
] | [((73, 94), 'tello.Tello', 'tello.Tello', (['""""""', '(8889)'], {}), "('', 8889)\n", (84, 94), False, 'import tello\n'), ((95, 109), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (105, 109), False, 'import time\n'), ((178, 210), 'numpy.zeros', 'np.zeros', (['(9 * 6, 3)', 'np.float32'], {}), '((9 * 6, 3), np.float32)\n', (186, 210), True, 'import numpy as np\n'), ((1077, 1135), 'cv2.cv2.FileStorage', 'cv2.FileStorage', (['"""calibration.xml"""', 'cv2.FILE_STORAGE_WRITE'], {}), "('calibration.xml', cv2.FILE_STORAGE_WRITE)\n", (1092, 1135), False, 'from cv2 import cv2\n'), ((431, 469), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (443, 469), False, 'from cv2 import cv2\n'), ((482, 521), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (494, 521), False, 'from cv2 import cv2\n'), ((541, 587), 'cv2.cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['frame', '(9, 6)', 'None'], {}), '(frame, (9, 6), None)\n', (566, 587), False, 'from cv2 import cv2\n'), ((991, 1017), 'cv2.cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (1001, 1017), False, 'from cv2 import cv2\n'), ((1028, 1043), 'cv2.cv2.waitKey', 'cv2.waitKey', (['(33)'], {}), '(33)\n', (1039, 1043), False, 'from cv2 import cv2\n'), ((640, 701), 'cv2.cv2.cornerSubPix', 'cv2.cornerSubPix', (['frame', 'corner', '(11, 11)', '(-1, -1)', 'criteria'], {}), '(frame, corner, (11, 11), (-1, -1), criteria)\n', (656, 701), False, 'from cv2 import cv2\n'), ((787, 803), 'cv2.cv2.waitKey', 'cv2.waitKey', (['(500)'], {}), '(500)\n', (798, 803), False, 'from cv2 import cv2\n'), ((891, 970), 'cv2.cv2.calibrateCamera', 'cv2.calibrateCamera', (['chase_image_list', 'chase_corner_list', 'imageSize', 'None', 'None'], {}), '(chase_image_list, chase_corner_list, imageSize, None, None)\n', (910, 970), False, 'from cv2 import cv2\n')] |
import os, sys
import argparse
import numpy as np
import cv2
from skimage import filters
from linefiller.thinning import thinning
from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, \
show_fill_map, my_merge_fill
def dline_of(x, low_thr=1, high_thr=20, bf_args=[30,40,30]):
xm = cv2.medianBlur(x, 5)
# xga = cv2.GaussianBlur(x,(5, 5),cv2.BORDER_DEFAULT)
xb = cv2.bilateralFilter(x, bf_args[0], bf_args[1], bf_args[2])
# xb = cv2.bilateralFilter(xb, 20, 60, 10 )
xg = cv2.cvtColor(xb, cv2.COLOR_RGB2GRAY)
xl = cv2.Laplacian(xb, ddepth = cv2.CV_32F, ksize=5)
xgg = xl
xgg = xgg.astype(np.float32) * (255. / (xgg.astype(np.float32).max() * 1.0))
xh = filters.apply_hysteresis_threshold(xgg, low_thr, high_thr)
xgg[xh == False] = 0
# xgg[xh == True] = 255
xgg1 = xgg.copy() * 20
xgg1 = np.max(xgg1, axis=2)
return np.clip(255 - xgg1, 0, 255)
def squeeze_label_map(label_map):
ret_label_map = label_map.copy()
labels, counts = np.unique(ret_label_map, return_counts=True)
label_orders = np.argsort(counts)
for ord_id, ord_val in enumerate(label_orders):
mask = (label_map == labels[ord_val])
ret_label_map[mask] = ord_id
return ret_label_map
def trapped_ball_processed(binary, in_image=None, do_merge=True):
fills = []
result = binary
fill = trapped_ball_fill_multi(result, 3, method='max')
fills += fill
result = mark_fill(result, fill)
print('result num 3: ', len(fills))
fill = trapped_ball_fill_multi(result, 2, method=None)
fills += fill
result = mark_fill(result, fill)
print('result num 2: ', len(fills))
fill = trapped_ball_fill_multi(result, 1, method=None)
fills += fill
result = mark_fill(result, fill)
print('result num 1: ', len(fills))
fill = flood_fill_multi(result)
fills += fill
print('flood_fill_multi num 1: ', len(fills))
fillmap = build_fill_map(result, fills)
# print('fillmap num: ', len(np.unique(fillmap)))
if do_merge:
if in_image is None:
fillmap = merge_fill(fillmap, max_iter=10)
else:
fillmap = my_merge_fill(in_image, fillmap)
fillmap = thinning(fillmap)
return fillmap
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_root')
parser.add_argument('output_root')
parser.add_argument('--start_idx', default=0,
help='')
parser.add_argument('--end_idx', default=None,
help='')
parser.add_argument('--height', default=960,
help='height of the generated flow, default: 960')
parser.add_argument('--width', default=540,
help='width of the generated flow, default: 540')
parser.add_argument('--use_gpu', action='store_true')
args = parser.parse_args()
######
folder_root = args.input_root
save_root = args.output_root
use_gpu = args.use_gpu
start_idx = int(args.start_idx)
end_idx = None if args.end_idx is None else int(args.end_idx)
# tar_size = (1280, 720)
tar_size = (args.height, args.width)
# tar_size = (640, 360)
######
print('use gpu: ', use_gpu)
sys.stdout.flush()
if not os.path.exists(save_root):
os.makedirs(save_root)
folderList = sorted(os.listdir(folder_root))
print('folderList length: ', len(folderList))
for f_idx, folder in enumerate(folderList[start_idx:end_idx]):
f_idx += start_idx
input_subfolder = os.path.join(folder_root, folder)
imgFileNames = sorted(os.listdir(input_subfolder))
print('-- [%d] %s'%(f_idx, folder))
print(imgFileNames)
saveFolder = os.path.join(save_root, folder)
labelMap1_savePath = os.path.join(saveFolder, 'labelmap_1.npy')
labelMap2_savePath = os.path.join(saveFolder, 'labelmap_3.npy')
# if os.path.exists(labelMap1_savePath) and os.path.exists(labelMap2_savePath):
# try:
# binMap1 = np.load(labelMap1_savePath)
# binMap3 = np.load(labelMap2_savePath)
# except IOError:
# print("labelmap file corrupted")
# else:
# print("already generated")
# continue
sys.stdout.flush()
img1 = cv2.imread(os.path.join(input_subfolder, imgFileNames[0]))
img3 = cv2.imread(os.path.join(input_subfolder, imgFileNames[-1]))
# segmentation
img1_rs = cv2.resize(img1, tar_size)
img3_rs = cv2.resize(img3, tar_size)
if 'Disney' in folder:
boundImg1 = dline_of(img1_rs, 1, 20, [30,40,30]).astype(np.uint8)
boundImg3 = dline_of(img3_rs, 1, 20, [30,40,30]).astype(np.uint8)
else:
boundImg1 = dline_of(img1_rs, 2, 20, [10,10,10]).astype(np.uint8)
boundImg3 = dline_of(img3_rs, 2, 20, [10,10,10]).astype(np.uint8)
ret, binMap1 = cv2.threshold(boundImg1, 220, 255, cv2.THRESH_BINARY)
ret, binMap3 = cv2.threshold(boundImg3, 220, 255, cv2.THRESH_BINARY)
print('- trapped_ball_processed()')
sys.stdout.flush()
fillMap1 = trapped_ball_processed(binMap1, img1_rs)
fillMap3 = trapped_ball_processed(binMap3, img3_rs)
labelMap1 = squeeze_label_map(fillMap1)
labelMap3 = squeeze_label_map(fillMap3)
# save flows
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
np.save(labelMap1_savePath, labelMap1)
np.save(labelMap2_savePath, labelMap3)
print('save to %s, %s'%(labelMap1_savePath, labelMap2_savePath))
sys.stdout.flush()
labelMap1_img = show_fill_map(labelMap1)
labelMap3_img = show_fill_map(labelMap3)
cv2.imwrite(os.path.join(saveFolder, 'labelmap_1.jpg'), labelMap1_img)
cv2.imwrite(os.path.join(saveFolder, 'labelmap_3.jpg'), labelMap3_img) | [
"numpy.clip",
"linefiller.trappedball_fill.trapped_ball_fill_multi",
"numpy.argsort",
"numpy.save",
"linefiller.trappedball_fill.flood_fill_multi",
"os.path.exists",
"cv2.Laplacian",
"os.listdir",
"linefiller.thinning.thinning",
"argparse.ArgumentParser",
"linefiller.trappedball_fill.mark_fill",... | [((362, 382), 'cv2.medianBlur', 'cv2.medianBlur', (['x', '(5)'], {}), '(x, 5)\n', (376, 382), False, 'import cv2\n'), ((450, 508), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['x', 'bf_args[0]', 'bf_args[1]', 'bf_args[2]'], {}), '(x, bf_args[0], bf_args[1], bf_args[2])\n', (469, 508), False, 'import cv2\n'), ((566, 602), 'cv2.cvtColor', 'cv2.cvtColor', (['xb', 'cv2.COLOR_RGB2GRAY'], {}), '(xb, cv2.COLOR_RGB2GRAY)\n', (578, 602), False, 'import cv2\n'), ((612, 657), 'cv2.Laplacian', 'cv2.Laplacian', (['xb'], {'ddepth': 'cv2.CV_32F', 'ksize': '(5)'}), '(xb, ddepth=cv2.CV_32F, ksize=5)\n', (625, 657), False, 'import cv2\n'), ((765, 823), 'skimage.filters.apply_hysteresis_threshold', 'filters.apply_hysteresis_threshold', (['xgg', 'low_thr', 'high_thr'], {}), '(xgg, low_thr, high_thr)\n', (799, 823), False, 'from skimage import filters\n'), ((921, 941), 'numpy.max', 'np.max', (['xgg1'], {'axis': '(2)'}), '(xgg1, axis=2)\n', (927, 941), True, 'import numpy as np\n'), ((958, 985), 'numpy.clip', 'np.clip', (['(255 - xgg1)', '(0)', '(255)'], {}), '(255 - xgg1, 0, 255)\n', (965, 985), True, 'import numpy as np\n'), ((1084, 1128), 'numpy.unique', 'np.unique', (['ret_label_map'], {'return_counts': '(True)'}), '(ret_label_map, return_counts=True)\n', (1093, 1128), True, 'import numpy as np\n'), ((1148, 1166), 'numpy.argsort', 'np.argsort', (['counts'], {}), '(counts)\n', (1158, 1166), True, 'import numpy as np\n'), ((1450, 1498), 'linefiller.trappedball_fill.trapped_ball_fill_multi', 'trapped_ball_fill_multi', (['result', '(3)'], {'method': '"""max"""'}), "(result, 3, method='max')\n", (1473, 1498), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((1530, 1553), 'linefiller.trappedball_fill.mark_fill', 'mark_fill', (['result', 'fill'], {}), '(result, fill)\n', (1539, 1553), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((1610, 1657), 'linefiller.trappedball_fill.trapped_ball_fill_multi', 'trapped_ball_fill_multi', (['result', '(2)'], {'method': 'None'}), '(result, 2, method=None)\n', (1633, 1657), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((1689, 1712), 'linefiller.trappedball_fill.mark_fill', 'mark_fill', (['result', 'fill'], {}), '(result, fill)\n', (1698, 1712), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((1769, 1816), 'linefiller.trappedball_fill.trapped_ball_fill_multi', 'trapped_ball_fill_multi', (['result', '(1)'], {'method': 'None'}), '(result, 1, method=None)\n', (1792, 1816), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((1848, 1871), 'linefiller.trappedball_fill.mark_fill', 'mark_fill', (['result', 'fill'], {}), '(result, fill)\n', (1857, 1871), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((1924, 1948), 'linefiller.trappedball_fill.flood_fill_multi', 'flood_fill_multi', (['result'], {}), '(result)\n', (1940, 1948), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((2032, 2061), 'linefiller.trappedball_fill.build_fill_map', 'build_fill_map', (['result', 'fills'], {}), '(result, fills)\n', (2046, 2061), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((2301, 2318), 'linefiller.thinning.thinning', 'thinning', (['fillmap'], {}), '(fillmap)\n', (2309, 2318), False, 'from linefiller.thinning import thinning\n'), ((2389, 2414), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2412, 2414), False, 'import argparse\n'), ((3344, 3362), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3360, 3362), False, 'import os, sys\n'), ((3375, 3400), 'os.path.exists', 'os.path.exists', (['save_root'], {}), '(save_root)\n', (3389, 3400), False, 'import os, sys\n'), ((3410, 3432), 'os.makedirs', 'os.makedirs', (['save_root'], {}), '(save_root)\n', (3421, 3432), False, 'import os, sys\n'), ((3458, 3481), 'os.listdir', 'os.listdir', (['folder_root'], {}), '(folder_root)\n', (3468, 3481), False, 'import os, sys\n'), ((3654, 3687), 'os.path.join', 'os.path.join', (['folder_root', 'folder'], {}), '(folder_root, folder)\n', (3666, 3687), False, 'import os, sys\n'), ((3841, 3872), 'os.path.join', 'os.path.join', (['save_root', 'folder'], {}), '(save_root, folder)\n', (3853, 3872), False, 'import os, sys\n'), ((3902, 3944), 'os.path.join', 'os.path.join', (['saveFolder', '"""labelmap_1.npy"""'], {}), "(saveFolder, 'labelmap_1.npy')\n", (3914, 3944), False, 'import os, sys\n'), ((3974, 4016), 'os.path.join', 'os.path.join', (['saveFolder', '"""labelmap_3.npy"""'], {}), "(saveFolder, 'labelmap_3.npy')\n", (3986, 4016), False, 'import os, sys\n'), ((4418, 4436), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4434, 4436), False, 'import os, sys\n'), ((4629, 4655), 'cv2.resize', 'cv2.resize', (['img1', 'tar_size'], {}), '(img1, tar_size)\n', (4639, 4655), False, 'import cv2\n'), ((4674, 4700), 'cv2.resize', 'cv2.resize', (['img3', 'tar_size'], {}), '(img3, tar_size)\n', (4684, 4700), False, 'import cv2\n'), ((5083, 5136), 'cv2.threshold', 'cv2.threshold', (['boundImg1', '(220)', '(255)', 'cv2.THRESH_BINARY'], {}), '(boundImg1, 220, 255, cv2.THRESH_BINARY)\n', (5096, 5136), False, 'import cv2\n'), ((5160, 5213), 'cv2.threshold', 'cv2.threshold', (['boundImg3', '(220)', '(255)', 'cv2.THRESH_BINARY'], {}), '(boundImg3, 220, 255, cv2.THRESH_BINARY)\n', (5173, 5213), False, 'import cv2\n'), ((5267, 5285), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5283, 5285), False, 'import os, sys\n'), ((5618, 5656), 'numpy.save', 'np.save', (['labelMap1_savePath', 'labelMap1'], {}), '(labelMap1_savePath, labelMap1)\n', (5625, 5656), True, 'import numpy as np\n'), ((5665, 5703), 'numpy.save', 'np.save', (['labelMap2_savePath', 'labelMap3'], {}), '(labelMap2_savePath, labelMap3)\n', (5672, 5703), True, 'import numpy as np\n'), ((5785, 5803), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5801, 5803), False, 'import os, sys\n'), ((5829, 5853), 'linefiller.trappedball_fill.show_fill_map', 'show_fill_map', (['labelMap1'], {}), '(labelMap1)\n', (5842, 5853), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((5878, 5902), 'linefiller.trappedball_fill.show_fill_map', 'show_fill_map', (['labelMap3'], {}), '(labelMap3)\n', (5891, 5902), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((2185, 2217), 'linefiller.trappedball_fill.merge_fill', 'merge_fill', (['fillmap'], {'max_iter': '(10)'}), '(fillmap, max_iter=10)\n', (2195, 2217), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((2254, 2286), 'linefiller.trappedball_fill.my_merge_fill', 'my_merge_fill', (['in_image', 'fillmap'], {}), '(in_image, fillmap)\n', (2267, 2286), False, 'from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, show_fill_map, my_merge_fill\n'), ((3718, 3745), 'os.listdir', 'os.listdir', (['input_subfolder'], {}), '(input_subfolder)\n', (3728, 3745), False, 'import os, sys\n'), ((4464, 4510), 'os.path.join', 'os.path.join', (['input_subfolder', 'imgFileNames[0]'], {}), '(input_subfolder, imgFileNames[0])\n', (4476, 4510), False, 'import os, sys\n'), ((4538, 4585), 'os.path.join', 'os.path.join', (['input_subfolder', 'imgFileNames[-1]'], {}), '(input_subfolder, imgFileNames[-1])\n', (4550, 4585), False, 'import os, sys\n'), ((5540, 5566), 'os.path.exists', 'os.path.exists', (['saveFolder'], {}), '(saveFolder)\n', (5554, 5566), False, 'import os, sys\n'), ((5580, 5600), 'os.mkdir', 'os.mkdir', (['saveFolder'], {}), '(saveFolder)\n', (5588, 5600), False, 'import os, sys\n'), ((5923, 5965), 'os.path.join', 'os.path.join', (['saveFolder', '"""labelmap_1.jpg"""'], {}), "(saveFolder, 'labelmap_1.jpg')\n", (5935, 5965), False, 'import os, sys\n'), ((6002, 6044), 'os.path.join', 'os.path.join', (['saveFolder', '"""labelmap_3.jpg"""'], {}), "(saveFolder, 'labelmap_3.jpg')\n", (6014, 6044), False, 'import os, sys\n')] |
""" Defines the Plot class.
"""
# Major library imports
import itertools
import warnings
import six
import six.moves as sm
from numpy import arange, array, ndarray, linspace
from types import FunctionType
# Enthought library imports
from traits.api import Delegate, Dict, Instance, Int, List, Property, Str
# Local, relative imports
from .abstract_colormap import AbstractColormap
from .abstract_data_source import AbstractDataSource
from .abstract_plot_data import AbstractPlotData
from .array_data_source import ArrayDataSource
from .array_plot_data import ArrayPlotData
from .base_xy_plot import BaseXYPlot
from .barplot import BarPlot
from .candle_plot import CandlePlot
from .colormapped_scatterplot import ColormappedScatterPlot
from .contour_line_plot import ContourLinePlot
from .contour_poly_plot import ContourPolyPlot
from .cmap_image_plot import CMapImagePlot
from .data_range_1d import DataRange1D
from .data_view import DataView
from .default_colormaps import Spectral
from .grid_data_source import GridDataSource
from .grid_mapper import GridMapper
from .image_data import ImageData
from .image_plot import ImagePlot
from .legend import Legend
from .lineplot import LinePlot
from .line_scatterplot_1d import LineScatterPlot1D
from .linear_mapper import LinearMapper
from .log_mapper import LogMapper
from .plot_label import PlotLabel
from .polygon_plot import PolygonPlot
from .scatterplot import ScatterPlot
from .scatterplot_1d import ScatterPlot1D
from .segment_plot import ColormappedSegmentPlot, SegmentPlot
from .text_plot import TextPlot
from .text_plot_1d import TextPlot1D
from .filled_line_plot import FilledLinePlot
from .quiverplot import QuiverPlot
from .jitterplot import JitterPlot
#-----------------------------------------------------------------------------
# The Plot class
#-----------------------------------------------------------------------------
class Plot(DataView):
""" Represents a correlated set of data, renderers, and axes in a single
screen region.
A Plot can reference an arbitrary amount of data and can have an
unlimited number of renderers on it, but it has a single X-axis and a
single Y-axis for all of its associated data. Therefore, there is a single
range in X and Y, although there can be many different data series. A Plot
also has a single set of grids and a single background layer for all of its
renderers. It cannot be split horizontally or vertically; to do so,
create a VPlotContainer or HPlotContainer and put the Plots inside those.
Plots can be overlaid as well; be sure to set the **bgcolor** of the
overlaying plots to "none" or "transparent".
A Plot consists of composable sub-plots. Each of these is created
or destroyed using the plot() or delplot() methods. Every time that
new data is used to drive these sub-plots, it is added to the Plot's
list of data and data sources. Data sources are reused whenever
possible; in order to have the same actual array drive two de-coupled
data sources, create those data sources before handing them to the Plot.
"""
#------------------------------------------------------------------------
# Data-related traits
#------------------------------------------------------------------------
#: The PlotData instance that drives this plot.
data = Instance(AbstractPlotData)
#: Mapping of data names from self.data to their respective datasources.
datasources = Dict(Str, Instance(AbstractDataSource))
#------------------------------------------------------------------------
# General plotting traits
#------------------------------------------------------------------------
#: Mapping of plot names to *lists* of plot renderers.
plots = Dict(Str, List)
#: The default index to use when adding new subplots.
default_index = Instance(AbstractDataSource)
#: Optional mapper for the color axis. Not instantiated until first use;
#: destroyed if no color plots are on the plot.
color_mapper = Instance(AbstractColormap)
#: List of colors to cycle through when auto-coloring is requested. Picked
#: and ordered to be red-green color-blind friendly, though should not
#: be an issue for blue-yellow.
auto_colors = List(["green", "lightgreen", "blue", "lightblue", "red",
"pink", "darkgray", "silver"])
# index into auto_colors list
_auto_color_idx = Int(-1)
_auto_edge_color_idx = Int(-1)
_auto_face_color_idx = Int(-1)
#: Mapping of renderer type string to renderer class
#: This can be overriden to customize what renderer type the Plot
#: will instantiate for its various plotting methods.
renderer_map = Dict(dict(line = LinePlot,
bar = BarPlot,
scatter = ScatterPlot,
polygon = PolygonPlot,
filled_line = FilledLinePlot,
cmap_scatter = ColormappedScatterPlot,
cmap_segment = ColormappedSegmentPlot,
img_plot = ImagePlot,
cmap_img_plot = CMapImagePlot,
contour_line_plot = ContourLinePlot,
contour_poly_plot = ContourPolyPlot,
candle = CandlePlot,
quiver = QuiverPlot,
scatter_1d = ScatterPlot1D,
segment = SegmentPlot,
text = TextPlot,
textplot_1d = TextPlot1D,
line_scatter_1d = LineScatterPlot1D,
jitterplot = JitterPlot))
#------------------------------------------------------------------------
# Annotations and decorations
#------------------------------------------------------------------------
#: The title of the plot.
title = Property()
#: The font to use for the title.
title_font = Property()
#: Convenience attribute for title.overlay_position; can be "top",
#: "bottom", "left", or "right".
title_position = Property()
#: Use delegates to expose the other PlotLabel attributes of the plot title
title_text = Delegate("_title", prefix="text", modify=True)
title_color = Delegate("_title", prefix="color", modify=True)
title_angle = Delegate("_title", prefix="angle", modify=True)
# The PlotLabel object that contains the title.
_title = Instance(PlotLabel)
#: The legend on the plot.
legend = Instance(Legend)
#: Convenience attribute for legend.align; can be "ur", "ul", "ll", "lr".
legend_alignment = Property
#------------------------------------------------------------------------
# Public methods
#------------------------------------------------------------------------
def __init__(self, data=None, **kwtraits):
if 'origin' in kwtraits:
self.default_origin = kwtraits.pop('origin')
if "title" in kwtraits:
title = kwtraits.pop("title")
else:
title = None
super(Plot, self).__init__(**kwtraits)
if data is not None:
if isinstance(data, AbstractPlotData):
self.data = data
elif type(data) in (ndarray, tuple, list):
self.data = ArrayPlotData(data)
else:
raise ValueError("Don't know how to create PlotData for data "
"of type {0}".format(type(data)))
if not self._title:
self._title = PlotLabel(font="swiss 16", visible=False,
overlay_position="top", component=self)
if title is not None:
self.title = title
if not self.legend:
self.legend = Legend(visible=False, align="ur", error_icon="blank",
padding=10, component=self)
# ensure that we only get displayed once by new_window()
self._plot_ui_info = None
return
def add_xy_plot(self, index_name, value_name, renderer_factory, name=None,
origin=None, **kwds):
""" Add a BaseXYPlot renderer subclass to this Plot.
Parameters
----------
index_name : str
The name of the index datasource.
value_name : str
The name of the value datasource.
renderer_factory : callable
The callable that creates the renderer.
name : string (optional)
The name of the plot. If None, then a default one is created
(usually "plotNNN").
origin : string (optional)
Which corner the origin of this plot should occupy:
"bottom left", "top left", "bottom right", "top right"
**kwds :
Additional keywords to pass to the factory.
"""
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
index = self._get_or_create_datasource(index_name)
self.index_range.add(index)
value = self._get_or_create_datasource(value_name)
self.value_range.add(value)
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range)
else:
imap = LogMapper(range=self.index_range)
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range)
else:
vmap = LogMapper(range=self.value_range)
renderer = renderer_factory(
index = index,
value = value,
index_mapper = imap,
value_mapper = vmap,
orientation = self.orientation,
origin = origin,
**kwds
)
self.add(renderer)
self.plots[name] = [renderer]
self.invalidate_and_redraw()
return self.plots[name]
def plot(self, data, type="line", name=None, index_scale="linear",
value_scale="linear", origin=None, **styles):
""" Adds a new sub-plot using the given data and plot style.
Parameters
----------
data : string, tuple(string), list(string)
The data to be plotted. The type of plot and the number of
arguments determines how the arguments are interpreted:
one item: (line, scatter, segment)
The data is treated as the value and self.default_index is
used as the index. If **default_index** does not exist, one is
created from arange(len(*data*))
two or more items: (line, scatter, segment)
Interpreted as (index, value1, value2, ...). Each index,value
pair forms a new plot of the type specified.
three items: (cmap_scatter, cmap_segment)
Interpreted as (index, value, color)
three items: (text)
Interpreted as (index, value, text).
four items: (cmap_segment)
Interpreted as (index, val1, color_val1, width)
For segment plots index and value arrays alternate between
coordinates for the start and end points of segments.
type : comma-delimited string of plot type
The types of plots to add. One of "line", "scatter",
"cmap_scatter", "polygon", "bar", "filled_line", "segment", "text"
name : string
The name of the plot. If None, then a default one is created
(usually "plotNNN").
index_scale : string
The type of scale to use for the index axis. If not "linear", then
a log scale is used.
value_scale : string
The type of scale to use for the value axis. If not "linear", then
a log scale is used.
origin : string
Which corner the origin of this plot should occupy:
"bottom left", "top left", "bottom right", "top right"
styles : series of keyword arguments
attributes and values that apply to one or more of the
plot types requested, e.g.,'line_color' or 'line_width'.
Examples
--------
::
plot("my_data", type="line", name="myplot", color=lightblue)
plot(("x-data", "y-data"), type="scatter")
plot(("x", "y1", "y2", "y3"))
Returns
-------
[renderers] -> list of renderers created in response to this call to plot()
"""
if len(data) == 0:
return
if isinstance(data, six.string_types):
data = (data,)
self.index_scale = index_scale
self.value_scale = value_scale
# TODO: support lists of plot types
plot_type = type
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
if plot_type in ("line", "scatter", "polygon", "bar", "filled_line",
"segment"):
# Tie data to the index range
if len(data) == 1:
if self.default_index is None:
# Create the default index based on the length of the first
# data series
value = self._get_or_create_datasource(data[0])
self.default_index = ArrayDataSource(arange(len(value.get_data())),
sort_order="none")
self.index_range.add(self.default_index)
index = self.default_index
else:
index = self._get_or_create_datasource(data[0])
if self.default_index is None:
self.default_index = index
self.index_range.add(index)
data = data[1:]
# Tie data to the value_range and create the renderer for each data
new_plots = []
simple_plot_types = ("line", "scatter", "segment")
for value_name in data:
value = self._get_or_create_datasource(value_name)
self.value_range.add(value)
if plot_type in simple_plot_types:
cls = self.renderer_map[plot_type]
# handle auto-coloring request
if styles.get("color") == "auto":
self._auto_color_idx = \
(self._auto_color_idx + 1) % len(self.auto_colors)
styles["color"] = self.auto_colors[self._auto_color_idx]
elif plot_type in ("polygon", "filled_line"):
cls = self.renderer_map[plot_type]
# handle auto-coloring request
if styles.get("edge_color") == "auto":
self._auto_edge_color_idx = \
(self._auto_edge_color_idx + 1) % len(self.auto_colors)
styles["edge_color"] = self.auto_colors[self._auto_edge_color_idx]
if styles.get("face_color") == "auto":
self._auto_face_color_idx = \
(self._auto_face_color_idx + 1) % len(self.auto_colors)
styles["face_color"] = self.auto_colors[self._auto_face_color_idx]
elif plot_type == 'bar':
cls = self.renderer_map[plot_type]
# handle auto-coloring request
if styles.get("color") == "auto":
self._auto_color_idx = \
(self._auto_color_idx + 1) % len(self.auto_colors)
styles["fill_color"] = self.auto_colors[self._auto_color_idx]
else:
raise ValueError("Unhandled plot type: " + plot_type)
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
plot = cls(index=index,
value=value,
index_mapper=imap,
value_mapper=vmap,
orientation=self.orientation,
origin = origin,
**styles)
self.add(plot)
new_plots.append(plot)
if plot_type == 'bar':
# For bar plots, compute the ranges from the data to make the
# plot look clean.
def custom_index_func(data_low, data_high, margin, tight_bounds):
""" Compute custom bounds of the plot along index (in
data space).
"""
bar_width = styles.get('bar_width', cls().bar_width)
plot_low = data_low - bar_width
plot_high = data_high + bar_width
return plot_low, plot_high
if self.index_range.bounds_func is None:
self.index_range.bounds_func = custom_index_func
def custom_value_func(data_low, data_high, margin, tight_bounds):
""" Compute custom bounds of the plot along value (in
data space).
"""
plot_low = data_low - (data_high-data_low)*0.1
plot_high = data_high + (data_high-data_low)*0.1
return plot_low, plot_high
if self.value_range.bounds_func is None:
self.value_range.bounds_func = custom_value_func
self.index_range.tight_bounds = False
self.value_range.tight_bounds = False
self.index_range.refresh()
self.value_range.refresh()
self.plots[name] = new_plots
elif plot_type in ("text"):
if len(data) != 3:
raise ValueError("Text plots require (index, value, text) data")
index = self._get_or_create_datasource(data[0])
if self.default_index is None:
self.default_index = index
self.index_range.add(index)
value = self._get_or_create_datasource(data[1])
self.value_range.add(value)
text = self._get_or_create_datasource(data[2])
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
cls = self.renderer_map[plot_type]
plot = cls(index=index,
index_mapper=imap,
value=value,
value_mapper=vmap,
text=text,
orientation=self.orientation,
origin=origin,
**styles)
self.add(plot)
self.plots[name] = [plot]
elif plot_type in ("cmap_scatter", "cmap_segment"):
if plot_type == "cmap_scatter" and len(data) != 3:
raise ValueError("Colormapped scatter plots require (index, value, color) data")
elif len(data) > 4 or len(data) < 3:
raise ValueError("Colormapped segment plots require (index, value, color) or (index, value, color, width) data")
index = self._get_or_create_datasource(data[0])
if self.default_index is None:
self.default_index = index
self.index_range.add(index)
value = self._get_or_create_datasource(data[1])
self.value_range.add(value)
color = self._get_or_create_datasource(data[2])
if "color_mapper" not in styles:
raise ValueError("Scalar 2D data requires a color_mapper.")
colormap = styles.pop("color_mapper")
if self.color_mapper is not None and self.color_mapper.range is not None:
color_range = self.color_mapper.range
else:
color_range = DataRange1D()
if isinstance(colormap, AbstractColormap):
self.color_mapper = colormap
if colormap.range is None:
color_range.add(color)
colormap.range = color_range
elif callable(colormap):
color_range.add(color)
self.color_mapper = colormap(color_range)
else:
raise ValueError("Unexpected colormap %r in plot()." % colormap)
if self.color_mapper is not None and self.color_mapper.range is not None:
color_range = self.color_mapper.range
else:
color_range = DataRange1D()
if len(data) == 4:
size = self._get_or_create_datasource(data[3])
size_range = DataRange1D()
size_range.add(size)
size_min = styles.pop("size_min", 1)
size_max = styles.pop("size_max", 10)
sizemap = LinearMapper(low_pos=size_min, high_pos=size_max,
range=size_range)
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
cls = self.renderer_map[plot_type]
plot = cls(index=index,
index_mapper=imap,
value=value,
value_mapper=vmap,
color_data=color,
color_mapper=self.color_mapper,
orientation=self.orientation,
origin=origin,
**styles)
if len(data) == 4:
plot.width_data = size
plot.width_mapper = sizemap
plot.width_by_data = True
self.add(plot)
self.plots[name] = [plot]
else:
raise ValueError("Unknown plot type: " + plot_type)
return self.plots[name]
def img_plot(self, data, name=None, colormap=None,
xbounds=None, ybounds=None, origin=None, hide_grids=True,
**styles):
""" Adds image plots to this Plot object.
If *data* has shape (N, M, 3) or (N, M, 4), then it is treated as RGB or
RGBA (respectively) and *colormap* is ignored.
If *data* is an array of floating-point data, then a colormap can
be provided via the *colormap* argument, or the default of 'Spectral'
will be used.
*Data* should be in row-major order, so that xbounds corresponds to
*data*'s second axis, and ybounds corresponds to the first axis.
Parameters
----------
data : string
The name of the data array in self.plot_data
name : string
The name of the plot; if omitted, then a name is generated.
xbounds, ybounds : string, tuple, or ndarray
Bounds where this image resides. Bound may be: a) names of
data in the plot data; b) tuples of (low, high) in data space,
c) 1D arrays of values representing the pixel boundaries (must
be 1 element larger than underlying data), or
d) 2D arrays as obtained from a meshgrid operation
origin : string
Which corner the origin of this plot should occupy:
"bottom left", "top left", "bottom right", "top right"
hide_grids : bool, default True
Whether or not to automatically hide the grid lines on the plot
styles : series of keyword arguments
Attributes and values that apply to one or more of the
plot types requested, e.g.,'line_color' or 'line_width'.
"""
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
value = self._get_or_create_datasource(data)
array_data = value.get_data()
if len(array_data.shape) == 3:
if array_data.shape[2] not in (3,4):
raise ValueError("Image plots require color depth of 3 or 4.")
cls = self.renderer_map["img_plot"]
kwargs = dict(**styles)
else:
if colormap is None:
if self.color_mapper is None:
colormap = Spectral(DataRange1D(value))
else:
colormap = self.color_mapper
elif isinstance(colormap, AbstractColormap):
if colormap.range is None:
colormap.range = DataRange1D(value)
else:
colormap = colormap(DataRange1D(value))
self.color_mapper = colormap
cls = self.renderer_map["cmap_img_plot"]
kwargs = dict(value_mapper=colormap, **styles)
return self._create_2d_plot(cls, name, origin, xbounds, ybounds, value,
hide_grids, cell_plot=True, **kwargs)
def contour_plot(self, data, type="line", name=None, poly_cmap=None,
xbounds=None, ybounds=None, origin=None, hide_grids=True, **styles):
""" Adds contour plots to this Plot object.
Parameters
----------
data : string
The name of the data array in self.plot_data, which must be
floating point data.
type : comma-delimited string of "line", "poly"
The type of contour plot to add. If the value is "poly"
and no colormap is provided via the *poly_cmap* argument, then
a default colormap of 'Spectral' is used.
name : string
The name of the plot; if omitted, then a name is generated.
poly_cmap : string
The name of the color-map function to call (in
chaco.default_colormaps) or an AbstractColormap instance
to use for contour poly plots (ignored for contour line plots)
xbounds, ybounds : string, tuple, or ndarray
Bounds where this image resides. Bound may be: a) names of
data in the plot data; b) tuples of (low, high) in data space,
c) 1D arrays of values representing the pixel boundaries (must
be 1 element larger than underlying data), or
d) 2D arrays as obtained from a meshgrid operation
origin : string
Which corner the origin of this plot should occupy:
"bottom left", "top left", "bottom right", "top right"
hide_grids : bool, default True
Whether or not to automatically hide the grid lines on the plot
styles : series of keyword arguments
Attributes and values that apply to one or more of the
plot types requested, e.g.,'line_color' or 'line_width'.
"""
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
value = self._get_or_create_datasource(data)
if value.value_depth != 1:
raise ValueError("Contour plots require 2D scalar field")
if type == "line":
cls = self.renderer_map["contour_line_plot"]
kwargs = dict(**styles)
# if colors is given as a factory func, use it to make a
# concrete colormapper. Better way to do this?
if "colors" in kwargs:
cmap = kwargs["colors"]
if isinstance(cmap, FunctionType):
kwargs["colors"] = cmap(DataRange1D(value))
elif getattr(cmap, 'range', 'dummy') is None:
cmap.range = DataRange1D(value)
elif type == "poly":
if poly_cmap is None:
poly_cmap = Spectral(DataRange1D(value))
elif isinstance(poly_cmap, FunctionType):
poly_cmap = poly_cmap(DataRange1D(value))
elif getattr(poly_cmap, 'range', 'dummy') is None:
poly_cmap.range = DataRange1D(value)
cls = self.renderer_map["contour_poly_plot"]
kwargs = dict(color_mapper=poly_cmap, **styles)
else:
raise ValueError("Unhandled contour plot type: " + type)
return self._create_2d_plot(cls, name, origin, xbounds, ybounds, value,
hide_grids, cell_plot=False, **kwargs)
def _process_2d_bounds(self, bounds, array_data, axis, cell_plot):
"""Transform an arbitrary bounds definition into a linspace.
Process all the ways the user could have defined the x- or y-bounds
of a 2d plot and return a linspace between the lower and upper
range of the bounds.
Parameters
----------
bounds : any
User bounds definition
array_data : 2D array
The 2D plot data
axis : int
The axis along which the bounds are to be set
cell_plot : bool
Is the data plotted at the vertices or in the cells bounded by
the grid (eg. contour plot vs. image plot)
"""
if cell_plot:
num_ticks = array_data.shape[axis] + 1
else:
num_ticks = array_data.shape[axis]
if bounds is None:
return arange(num_ticks)
if isinstance(bounds, tuple):
# create a linspace with the bounds limits
return linspace(bounds[0], bounds[1], num_ticks)
elif isinstance(bounds, ndarray) and bounds.ndim == 1:
if len(bounds) != num_ticks:
# bounds is 1D, but of the wrong size
msg = ("1D bounds of an image plot needs to have 1 more "
"element than its corresponding data shape, because "
"they represent the locations of pixel boundaries.")
raise ValueError(msg)
else:
return bounds
elif isinstance(bounds, ndarray) and bounds.ndim == 2:
# bounds is 2D, assumed to be a meshgrid
# This is triggered when doing something like
# >>> xbounds, ybounds = meshgrid(...)
if bounds.shape[axis] != num_ticks:
msg = ("2D bounds of an image plot needs to have the same "
"shape as the underlying data, because "
"they are assumed to be generated from meshgrids.")
raise ValueError(msg)
else:
if axis == 0:
bounds = bounds[:,0]
else:
bounds = bounds[0,:]
return bounds
raise ValueError("bounds must be None, a tuple, an array, "
"or a PlotData name")
def _create_2d_plot(self, cls, name, origin, xbounds, ybounds, value_ds,
hide_grids, cell_plot=False, **kwargs):
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
array_data = value_ds.get_data()
# process bounds to get linspaces
if isinstance(xbounds, six.string_types):
xbounds = self._get_or_create_datasource(xbounds).get_data()
xs = self._process_2d_bounds(xbounds, array_data, 1, cell_plot)
if isinstance(ybounds, six.string_types):
ybounds = self._get_or_create_datasource(ybounds).get_data()
ys = self._process_2d_bounds(ybounds, array_data, 0, cell_plot)
# Create the index and add its datasources to the appropriate ranges
index = GridDataSource(xs, ys, sort_order=('ascending', 'ascending'))
self.range2d.add(index)
mapper = GridMapper(range=self.range2d,
stretch_data_x=self.x_mapper.stretch_data,
stretch_data_y=self.y_mapper.stretch_data)
plot = cls(index=index,
value=value_ds,
index_mapper=mapper,
orientation=self.orientation,
origin=origin,
**kwargs)
if hide_grids:
self.x_grid.visible = False
self.y_grid.visible = False
self.add(plot)
self.plots[name] = [plot]
return self.plots[name]
def candle_plot(self, data, name=None, value_scale="linear", origin=None,
**styles):
""" Adds a new sub-plot using the given data and plot style.
Parameters
----------
data : list(string), tuple(string)
The names of the data to be plotted in the ArrayDataSource. The
number of arguments determines how they are interpreted:
(index, bar_min, bar_max)
filled or outline-only bar extending from **bar_min** to
**bar_max**
(index, bar_min, center, bar_max)
above, plus a center line of a different color at **center**
(index, min, bar_min, bar_max, max)
bar extending from **bar_min** to **bar_max**, with thin
bars at **min** and **max** connected to the bar by a long
stem
(index, min, bar_min, center, bar_max, max)
like above, plus a center line of a different color and
configurable thickness at **center**
name : string
The name of the plot. If None, then a default one is created.
value_scale : string
The type of scale to use for the value axis. If not "linear",
then a log scale is used.
Styles
------
These are all optional keyword arguments.
bar_color : string, 3- or 4-tuple
The fill color of the bar; defaults to "auto".
bar_line_color : string, 3- or 4-tuple
The color of the rectangular box forming the bar.
stem_color : string, 3- or 4-tuple (default = bar_line_color)
The color of the stems reaching from the bar to the min and
max values.
center_color : string, 3- or 4-tuple (default = bar_line_color)
The color of the line drawn across the bar at the center values.
line_width : int (default = 1)
The thickness, in pixels, of the outline around the bar.
stem_width : int (default = line_width)
The thickness, in pixels, of the stem lines
center_width : int (default = line_width)
The width, in pixels, of the line drawn across the bar at the
center values.
end_cap : bool (default = True)
Whether or not to draw bars at the min and max extents of the
error bar.
Returns
-------
[renderers] -> list of renderers created in response to this call.
"""
if len(data) == 0:
return
self.value_scale = value_scale
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
# Create the datasources
if len(data) == 3:
index, bar_min, bar_max = sm.map(self._get_or_create_datasource, data)
self.value_range.add(bar_min, bar_max)
center = None
min = None
max = None
elif len(data) == 4:
index, bar_min, center, bar_max = sm.map(self._get_or_create_datasource, data)
self.value_range.add(bar_min, center, bar_max)
min = None
max = None
elif len(data) == 5:
index, min, bar_min, bar_max, max = \
sm.map(self._get_or_create_datasource, data)
self.value_range.add(min, bar_min, bar_max, max)
center = None
elif len(data) == 6:
index, min, bar_min, center, bar_max, max = \
sm.map(self._get_or_create_datasource, data)
self.value_range.add(min, bar_min, center, bar_max, max)
self.index_range.add(index)
if styles.get("bar_color") == "auto" or styles.get("color") == "auto":
self._auto_color_idx = \
(self._auto_color_idx + 1) % len(self.auto_colors)
styles["color"] = self.auto_colors[self._auto_color_idx]
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
cls = self.renderer_map["candle"]
plot = cls(index = index,
min_values = min,
bar_min = bar_min,
center_values = center,
bar_max = bar_max,
max_values = max,
index_mapper = imap,
value_mapper = vmap,
orientation = self.orientation,
origin = self.origin,
**styles)
self.add(plot)
self.plots[name] = [plot]
return [plot]
def quiverplot(self, data, name=None, origin=None,
**styles):
""" Adds a new sub-plot using the given data and plot style.
Parameters
----------
data : list(string), tuple(string)
The names of the data to be plotted in the ArrayDataSource. There
is only one combination accepted by this function:
(index, value, vectors)
index and value together determine the start coordinates of
each vector. The vectors are an Nx2
name : string
The name of the plot. If None, then a default one is created.
origin : string
Which corner the origin of this plot should occupy:
"bottom left", "top left", "bottom right", "top right"
Styles
------
These are all optional keyword arguments.
line_color : string (default = "black")
The color of the arrows
line_width : float (default = 1.0)
The thickness, in pixels, of the arrows.
arrow_size : int (default = 5)
The length, in pixels, of the arrowhead
Returns
-------
[renderers] -> list of renderers created in response to this call.
"""
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
index, value, vectors = list(sm.map(self._get_or_create_datasource, data))
self.index_range.add(index)
self.value_range.add(value)
imap = LinearMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
vmap = LinearMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
cls = self.renderer_map["quiver"]
plot = cls(index = index,
value = value,
vectors = vectors,
index_mapper = imap,
value_mapper = vmap,
name = name,
origin = origin,
**styles
)
self.add(plot)
self.plots[name] = [plot]
return [plot]
def plot_1d(self, data, type='scatter_1d', name=None, orientation=None,
direction=None, scale="linear", **styles):
""" Adds a new sub-plot using the given data and plot style.
Parameters
----------
data : string, tuple(string), list(string)
The data to be plotted. The each item generates a separate renderer
using the named data source
type : string
The type of plots to add. One of of "scatter_1d",
"line_scatter_1d", "textplot_1d", "jitterplot"
name : string
The name of the plot. If None, then a default one is created
(usually "plotNNN").
scale : string
The type of scale to use for the index axis. If not "linear", then
a log scale is used.
orientation : string
Whether the single dimension is horizontal ('h') or vertical ('v').
direction : string
Whether data is mapped in the usual direction (left to right or
bottom to top) or reversed.
styles : series of keyword arguments
attributes and values that apply to one or more of the
plot types requested, e.g.,'line_color' or 'line_width'.
Returns
-------
[renderers] -> list of renderers created in response to this call to plot()
"""
if len(data) == 0:
return
if isinstance(data, six.string_types):
data = (data,)
# TODO: support lists of plot types
plot_type = type
if name is None:
name = self._make_new_plot_name()
if orientation is None:
orientation = self.orientation
if direction is None:
if orientation == 'v':
if "bottom" in self.origin:
direction = 'normal'
else:
direction = 'flipped'
else:
if "left" in self.origin:
direction = 'normal'
else:
direction = 'flipped'
plots = []
if plot_type in ("scatter_1d", "textplot_1d", "line_scatter_1d",
"jitterplot"):
# Tie data to the index range
index = self._get_or_create_datasource(data[0])
if self.default_index is None:
self.default_index = index
if orientation != self.orientation:
index_range = self.value_range
index_mapper = self.value_mapper
self.value_scale = scale
else:
index_range = self.index_range
index_mapper = self.index_mapper
self.index_scale = scale
else:
raise ValueError("Unknown plot type: " + plot_type)
if plot_type in ("scatter_1d", "line_scatter_1d", "jitterplot"):
# simple 1d positional plots with no associated value
for source in data:
index = self._get_or_create_datasource(source)
index_range.add(index)
if scale == "linear":
imap = LinearMapper(range=index_range,
stretch_data=index_mapper.stretch_data)
else:
imap = LogMapper(range=index_range,
stretch_data=index_mapper.stretch_data)
cls = self.renderer_map[plot_type]
plot = cls(index=index,
index_mapper=imap,
orientation=orientation,
direction=direction,
**styles)
plots.append(plot)
self.add(plot)
elif plot_type in ("textplot_1d",):
# simple positional plots with a single associated value
for source in data[1:]:
value = self._get_or_create_datasource(source)
if scale == "linear":
imap = LinearMapper(range=index_range,
stretch_data=index_mapper.stretch_data)
else:
imap = LogMapper(range=index_range,
stretch_data=index_mapper.stretch_data)
cls = self.renderer_map[plot_type]
plot = cls(index=index,
index_mapper=imap,
value=value,
orientation=orientation,
direction=direction,
**styles)
plots.append(plot)
self.add(plot)
self.plots[name] = plots
return plots
def delplot(self, *names):
""" Removes the named sub-plots. """
# This process involves removing the plots, then checking the index range
# and value range for leftover datasources, and removing those if necessary.
# Remove all the renderers from us (container) and create a set of the
# datasources that we might have to remove from the ranges
deleted_sources = set()
for renderer in itertools.chain(*[self.plots.pop(name) for name in names]):
self.remove(renderer)
deleted_sources.add(renderer.index)
deleted_sources.add(renderer.value)
# Cull the candidate list of sources to remove by checking the other plots
sources_in_use = set()
for p in itertools.chain(*list(self.plots.values())):
sources_in_use.add(p.index)
sources_in_use.add(p.value)
unused_sources = deleted_sources - sources_in_use - set([None])
# Remove the unused sources from all ranges
for source in unused_sources:
if source.index_dimension == "scalar":
# Try both index and range, it doesn't hurt
self.index_range.remove(source)
self.value_range.remove(source)
elif source.index_dimension == "image":
self.range2d.remove(source)
else:
warnings.warn("Couldn't remove datasource from datarange.")
return
def hideplot(self, *names):
""" Convenience function to sets the named plots to be invisible. Their
renderers are not removed, and they are still in the list of plots.
"""
for renderer in itertools.chain(*[self.plots[name] for name in names]):
renderer.visible = False
return
def showplot(self, *names):
""" Convenience function to sets the named plots to be visible.
"""
for renderer in itertools.chain(*[self.plots[name] for name in names]):
renderer.visible = True
return
def new_window(self, configure=False):
"""Convenience function that creates a window containing the Plot
Don't call this if the plot is already displayed in a window.
"""
from chaco.ui.plot_window import PlotWindow
if self._plot_ui_info is None:
if configure:
self._plot_ui_info = PlotWindow(plot=self).configure_traits()
else:
self._plot_ui_info = PlotWindow(plot=self).edit_traits()
return self._plot_ui_info
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _make_new_plot_name(self):
""" Returns a string that is not already used as a plot title.
"""
n = len(self.plots)
plot_template = "plot%d"
while 1:
name = plot_template % n
if name not in self.plots:
break
else:
n += 1
return name
def _get_or_create_datasource(self, name):
""" Returns the data source associated with the given name, or creates
it if it doesn't exist.
"""
if name not in self.datasources:
data = self.data.get_data(name)
if type(data) in (list, tuple):
data = array(data)
if isinstance(data, ndarray):
if len(data.shape) == 1:
ds = ArrayDataSource(data, sort_order="none")
elif len(data.shape) == 2:
ds = ImageData(data=data, value_depth=1)
elif len(data.shape) == 3 and data.shape[2] in (3,4):
ds = ImageData(data=data, value_depth=int(data.shape[2]))
else:
raise ValueError("Unhandled array shape in creating new "
"plot: %s" % str(data.shape))
elif isinstance(data, AbstractDataSource):
ds = data
else:
raise ValueError("Couldn't create datasource for data of "
"type %s" % type(data))
self.datasources[name] = ds
return self.datasources[name]
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
def _color_mapper_changed(self):
for plist in self.plots.values():
for plot in plist:
plot.color_mapper = self.color_mapper
self.invalidate_draw()
def _data_changed(self, old, new):
if old:
old.on_trait_change(self._data_update_handler, "data_changed",
remove=True)
if new:
new.on_trait_change(self._data_update_handler, "data_changed")
def _data_update_handler(self, name, event):
# event should be a dict with keys "added", "removed", and "changed",
# per the comments in AbstractPlotData.
if "removed" in event:
for name in event["removed"]:
del self.datasources[name]
if "added" in event:
for name in event["added"]:
self._get_or_create_datasource(name)
if "changed" in event:
for name in event["changed"]:
if name in self.datasources:
source = self.datasources[name]
source.set_data(self.data.get_data(name))
def _plots_items_changed(self, event):
if self.legend:
self.legend.plots = self.plots
def _index_scale_changed(self, old, new):
if old is None: return
if new == old: return
if not self.range2d: return
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range,
screen_bounds=self.index_mapper.screen_bounds,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=self.index_range,
screen_bounds=self.index_mapper.screen_bounds,
stretch_data=self.index_mapper.stretch_data)
self.index_mapper = imap
for key in self.plots:
for plot in self.plots[key]:
if not isinstance(plot, BaseXYPlot):
raise ValueError("log scale only supported on XY plots")
if self.index_scale == "linear":
imap = LinearMapper(range=plot.index_range,
screen_bounds=plot.index_mapper.screen_bounds,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=plot.index_range,
screen_bounds=plot.index_mapper.screen_bounds,
stretch_data=self.index_mapper.stretch_data)
plot.index_mapper = imap
def _value_scale_changed(self, old, new):
if old is None: return
if new == old: return
if not self.range2d: return
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range,
screen_bounds=self.value_mapper.screen_bounds,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=self.value_range,
screen_bounds=self.value_mapper.screen_bounds,
stretch_data=self.value_mapper.stretch_data)
self.value_mapper = vmap
for key in self.plots:
for plot in self.plots[key]:
if not isinstance(plot, BaseXYPlot):
raise ValueError("log scale only supported on XY plots")
if self.value_scale == "linear":
vmap = LinearMapper(range=plot.value_range,
screen_bounds=plot.value_mapper.screen_bounds,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=plot.value_range,
screen_bounds=plot.value_mapper.screen_bounds,
stretch_data=self.value_mapper.stretch_data)
plot.value_mapper = vmap
def __title_changed(self, old, new):
self._overlay_change_helper(old, new)
def _legend_changed(self, old, new):
self._overlay_change_helper(old, new)
if new:
new.plots = self.plots
def _handle_range_changed(self, name, old, new):
""" Overrides the DataView default behavior.
Primarily changes how the list of renderers is looked up.
"""
mapper = getattr(self, name+"_mapper")
if mapper.range == old:
mapper.range = new
if old is not None:
for datasource in old.sources[:]:
old.remove(datasource)
if new is not None:
new.add(datasource)
range_name = name + "_range"
for renderer in itertools.chain(*six.itervalues(self.plots)):
if hasattr(renderer, range_name):
setattr(renderer, range_name, new)
#------------------------------------------------------------------------
# Property getters and setters
#------------------------------------------------------------------------
def _set_legend_alignment(self, align):
if self.legend:
self.legend.align = align
def _get_legend_alignment(self):
if self.legend:
return self.legend.align
else:
return None
def _set_title(self, text):
self._title.text = text
if text.strip() != "":
self._title.visible = True
else:
self._title.visible = False
def _get_title(self):
return self._title.text
def _set_title_position(self, pos):
if self._title is not None:
self._title.overlay_position = pos
def _get_title_position(self):
if self._title is not None:
return self._title.overlay_position
else:
return None
def _set_title_font(self, font):
old_font = self._title.font
self._title.font = font
self.trait_property_changed("title_font", old_font, font)
def _get_title_font(self):
return self._title.font
| [
"itertools.chain",
"traits.api.Instance",
"traits.api.Property",
"traits.api.Delegate",
"six.itervalues",
"traits.api.Dict",
"numpy.array",
"numpy.linspace",
"chaco.ui.plot_window.PlotWindow",
"traits.api.Int",
"warnings.warn",
"six.moves.map",
"numpy.arange",
"traits.api.List"
] | [((3361, 3387), 'traits.api.Instance', 'Instance', (['AbstractPlotData'], {}), '(AbstractPlotData)\n', (3369, 3387), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((3783, 3798), 'traits.api.Dict', 'Dict', (['Str', 'List'], {}), '(Str, List)\n', (3787, 3798), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((3878, 3906), 'traits.api.Instance', 'Instance', (['AbstractDataSource'], {}), '(AbstractDataSource)\n', (3886, 3906), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((4057, 4083), 'traits.api.Instance', 'Instance', (['AbstractColormap'], {}), '(AbstractColormap)\n', (4065, 4083), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((4293, 4384), 'traits.api.List', 'List', (["['green', 'lightgreen', 'blue', 'lightblue', 'red', 'pink', 'darkgray',\n 'silver']"], {}), "(['green', 'lightgreen', 'blue', 'lightblue', 'red', 'pink', 'darkgray',\n 'silver'])\n", (4297, 4384), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((4462, 4469), 'traits.api.Int', 'Int', (['(-1)'], {}), '(-1)\n', (4465, 4469), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((4497, 4504), 'traits.api.Int', 'Int', (['(-1)'], {}), '(-1)\n', (4500, 4504), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((4532, 4539), 'traits.api.Int', 'Int', (['(-1)'], {}), '(-1)\n', (4535, 4539), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((6023, 6033), 'traits.api.Property', 'Property', ([], {}), '()\n', (6031, 6033), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((6090, 6100), 'traits.api.Property', 'Property', ([], {}), '()\n', (6098, 6100), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((6231, 6241), 'traits.api.Property', 'Property', ([], {}), '()\n', (6239, 6241), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((6340, 6386), 'traits.api.Delegate', 'Delegate', (['"""_title"""'], {'prefix': '"""text"""', 'modify': '(True)'}), "('_title', prefix='text', modify=True)\n", (6348, 6386), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((6405, 6452), 'traits.api.Delegate', 'Delegate', (['"""_title"""'], {'prefix': '"""color"""', 'modify': '(True)'}), "('_title', prefix='color', modify=True)\n", (6413, 6452), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((6471, 6518), 'traits.api.Delegate', 'Delegate', (['"""_title"""'], {'prefix': '"""angle"""', 'modify': '(True)'}), "('_title', prefix='angle', modify=True)\n", (6479, 6518), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((6585, 6604), 'traits.api.Instance', 'Instance', (['PlotLabel'], {}), '(PlotLabel)\n', (6593, 6604), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((6650, 6666), 'traits.api.Instance', 'Instance', (['Legend'], {}), '(Legend)\n', (6658, 6666), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((3494, 3522), 'traits.api.Instance', 'Instance', (['AbstractDataSource'], {}), '(AbstractDataSource)\n', (3502, 3522), False, 'from traits.api import Delegate, Dict, Instance, Int, List, Property, Str\n'), ((47950, 48004), 'itertools.chain', 'itertools.chain', (['*[self.plots[name] for name in names]'], {}), '(*[self.plots[name] for name in names])\n', (47965, 48004), False, 'import itertools\n'), ((48199, 48253), 'itertools.chain', 'itertools.chain', (['*[self.plots[name] for name in names]'], {}), '(*[self.plots[name] for name in names])\n', (48214, 48253), False, 'import itertools\n'), ((30983, 31000), 'numpy.arange', 'arange', (['num_ticks'], {}), '(num_ticks)\n', (30989, 31000), False, 'from numpy import arange, array, ndarray, linspace\n'), ((31114, 31155), 'numpy.linspace', 'linspace', (['bounds[0]', 'bounds[1]', 'num_ticks'], {}), '(bounds[0], bounds[1], num_ticks)\n', (31122, 31155), False, 'from numpy import arange, array, ndarray, linspace\n'), ((36869, 36913), 'six.moves.map', 'sm.map', (['self._get_or_create_datasource', 'data'], {}), '(self._get_or_create_datasource, data)\n', (36875, 36913), True, 'import six.moves as sm\n'), ((40674, 40718), 'six.moves.map', 'sm.map', (['self._get_or_create_datasource', 'data'], {}), '(self._get_or_create_datasource, data)\n', (40680, 40718), True, 'import six.moves as sm\n'), ((37112, 37156), 'six.moves.map', 'sm.map', (['self._get_or_create_datasource', 'data'], {}), '(self._get_or_create_datasource, data)\n', (37118, 37156), True, 'import six.moves as sm\n'), ((49689, 49700), 'numpy.array', 'array', (['data'], {}), '(data)\n', (49694, 49700), False, 'from numpy import arange, array, ndarray, linspace\n'), ((55570, 55596), 'six.itervalues', 'six.itervalues', (['self.plots'], {}), '(self.plots)\n', (55584, 55596), False, 'import six\n'), ((37357, 37401), 'six.moves.map', 'sm.map', (['self._get_or_create_datasource', 'data'], {}), '(self._get_or_create_datasource, data)\n', (37363, 37401), True, 'import six.moves as sm\n'), ((47648, 47707), 'warnings.warn', 'warnings.warn', (['"""Couldn\'t remove datasource from datarange."""'], {}), '("Couldn\'t remove datasource from datarange.")\n', (47661, 47707), False, 'import warnings\n'), ((37592, 37636), 'six.moves.map', 'sm.map', (['self._get_or_create_datasource', 'data'], {}), '(self._get_or_create_datasource, data)\n', (37598, 37636), True, 'import six.moves as sm\n'), ((48661, 48682), 'chaco.ui.plot_window.PlotWindow', 'PlotWindow', ([], {'plot': 'self'}), '(plot=self)\n', (48671, 48682), False, 'from chaco.ui.plot_window import PlotWindow\n'), ((48757, 48778), 'chaco.ui.plot_window.PlotWindow', 'PlotWindow', ([], {'plot': 'self'}), '(plot=self)\n', (48767, 48778), False, 'from chaco.ui.plot_window import PlotWindow\n')] |
# Created by <NAME> (<EMAIL>)
import numpy as np
from collections import namedtuple
def zeros(system, size):
"""
Create an all zeros trajectory.
Parameters
----------
system : System
System for trajectory
size : int
Size of trajectory
"""
obs = np.zeros((size, system.obs_dim))
ctrls = np.zeros((size, system.ctrl_dim))
return Trajectory(system, size, obs, ctrls)
def empty(system, size):
"""
Create a trajectory with uninitialized states
and controls. If not initialized, states/controls
will be non-deterministic.
Parameters
----------
system : System
System for trajectory
size : int
Size of trajectory
"""
obs = np.empty((size, system.obs_dim))
ctrls = np.empty((size, system.ctrl_dim))
return Trajectory(system, size, obs, ctrls)
def extend(traj, obs, ctrls):
"""
Create a new trajectory which extends an existing trajectory
by one or more timestep.
Parameters
----------
traj : Trajectory
Trajectory to extend
obs : numpy array of shape (N, system.obs_dim)
New observations
ctrls : numpy array of shape (N, system.ctrl_dim)
New controls
"""
newobs = np.concatenate([traj.obs, obs])
newctrls = np.concatenate([traj.ctrls, ctrls])
newtraj = Trajectory(traj.system, newobs.shape[0],
newobs, newctrls)
return newtraj
TimeStep = namedtuple("TimeStep", "obs ctrl")
"""
TimeStep represents a particular time step of a trajectory
and is returned by indexing traj[i].
.. py:attribute:: obs
Observation. Numpy array of size system.obs_dim
.. py:attribute:: ctrl
Control. Numpy array of size system.ctrl_dim
"""
class Trajectory:
"""
The Trajectory object represents a discrete-time state and control
trajectory.
"""
def __init__(self, system, size, obs, ctrls):
"""
Parameters
----------
system : System
The corresponding robot system
size : int
Number of time steps in the trajectrory
obs : numpy array of shape (size, system.obs_dim)
Observations at all timesteps
ctrls : numpy array of shape (size, system.ctrl_dim)
Controls at all timesteps.
"""
self._system = system
self._size = size
# Check inputs
if obs.shape != (size, system.obs_dim):
raise ValueError("obs is wrong shape")
if ctrls.shape != (size, system.ctrl_dim):
raise ValueError("ctrls is wrong shape")
self._obs = obs
self._ctrls = ctrls
def __eq__(self, other):
return (self._system == other.system
and self._size == other._size
and np.array_equal(self._obs, other._obs)
and np.array_equal(self._ctrls, other._ctrls))
def __getitem__(self, idx):
if isinstance(idx, tuple):
if (not isinstance(idx[0], slice) and (idx[0] < -self.size
or idx[0] >= self.size)):
raise IndexError("Time index out of range.")
if idx[1] in self._system.observations:
obs_idx = self._system.observations.index(idx[1])
return self._obs[idx[0], obs_idx]
elif idx[1] in self._system.controls:
ctrl_idx = self._system.controls.index(idx[1])
return self._ctrls[idx[0], ctrl_idx]
else:
raise IndexError("Unknown label")
elif isinstance(idx, slice):
#if idx.start < -self.size or idx.stop >= self.size:
# raise IndexError("Time index out of range.")
obs = self._obs[idx, :]
ctrls = self._ctrls[idx, :]
return Trajectory(self._system, obs.shape[0], obs, ctrls)
else:
if idx < -self.size or idx >= self.size:
raise IndexError("Time index out of range.")
return TimeStep(self._obs[idx,:], self._ctrls[idx,:])
def __setitem__(self, idx, val):
if isinstance(idx, tuple):
if isinstance(idx[0], int):
if idx[0] < -self.size or idx[0] >= self.size:
raise IndexError("Time index out of range.")
if idx[1] in self._system.observations:
obs_idx = self._system.observations.index(idx[1])
self._obs[idx[0], obs_idx] = val
elif idx[1] in self._system.controls:
ctrl_idx = self._system.controls.index(idx[1])
self._ctrls[idx[0], ctrl_idx] = val
else:
raise IndexError("Unknown label")
elif isinstance(idx, int):
raise IndexError("Cannot assign to time steps.")
else:
raise IndexError("Unknown index type")
def __len__(self):
return self._size
def __str__(self):
return "Trajectory, length={}, system={}".format(self._size,self._system)
@property
def system(self):
"""
Get trajectory System object.
"""
return self._system
@property
def size(self):
"""
Number of time steps in trajectory
"""
return self._size
@property
def obs(self):
"""
Get trajectory observations as a numpy array of
shape (size, self.system.obs_dim)
"""
return self._obs
@obs.setter
def obs(self, obs):
if obs.shape != (self._size, self._system.obs_dim):
raise ValueError("obs is wrong shape")
self._obs = obs[:]
@property
def ctrls(self):
"""
Get trajectory controls as a numpy array of
shape (size, self.system.ctrl_dim)
"""
return self._ctrls
@ctrls.setter
def ctrls(self, ctrls):
if ctrls.shape != (self._size, self._system.ctrl_dim):
raise ValueError("ctrls is wrong shape")
self._ctrls = ctrls[:]
| [
"collections.namedtuple",
"numpy.zeros",
"numpy.array_equal",
"numpy.empty",
"numpy.concatenate"
] | [((1450, 1484), 'collections.namedtuple', 'namedtuple', (['"""TimeStep"""', '"""obs ctrl"""'], {}), "('TimeStep', 'obs ctrl')\n", (1460, 1484), False, 'from collections import namedtuple\n'), ((297, 329), 'numpy.zeros', 'np.zeros', (['(size, system.obs_dim)'], {}), '((size, system.obs_dim))\n', (305, 329), True, 'import numpy as np\n'), ((342, 375), 'numpy.zeros', 'np.zeros', (['(size, system.ctrl_dim)'], {}), '((size, system.ctrl_dim))\n', (350, 375), True, 'import numpy as np\n'), ((735, 767), 'numpy.empty', 'np.empty', (['(size, system.obs_dim)'], {}), '((size, system.obs_dim))\n', (743, 767), True, 'import numpy as np\n'), ((780, 813), 'numpy.empty', 'np.empty', (['(size, system.ctrl_dim)'], {}), '((size, system.ctrl_dim))\n', (788, 813), True, 'import numpy as np\n'), ((1251, 1282), 'numpy.concatenate', 'np.concatenate', (['[traj.obs, obs]'], {}), '([traj.obs, obs])\n', (1265, 1282), True, 'import numpy as np\n'), ((1298, 1333), 'numpy.concatenate', 'np.concatenate', (['[traj.ctrls, ctrls]'], {}), '([traj.ctrls, ctrls])\n', (1312, 1333), True, 'import numpy as np\n'), ((2789, 2826), 'numpy.array_equal', 'np.array_equal', (['self._obs', 'other._obs'], {}), '(self._obs, other._obs)\n', (2803, 2826), True, 'import numpy as np\n'), ((2847, 2888), 'numpy.array_equal', 'np.array_equal', (['self._ctrls', 'other._ctrls'], {}), '(self._ctrls, other._ctrls)\n', (2861, 2888), True, 'import numpy as np\n')] |
import os
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.models import load_model
import numpy as np
"""
Here we test our fooling images with more models and check if the models are equally fooled by them.
"""
model_folder = os.path.join(os.getcwd(), 'saved_models')
data_folder = os.path.join(os.getcwd(), 'saved_datasets')
model_1_path = "mnist_cnn.h5"
model_1_path = os.path.join(model_folder, model_1_path)
model_2_path = "mnist_cnn_diff.h5"
model_2_path = os.path.join(model_folder, model_2_path)
data_path = "mnist.npz"
data_path = os.path.join(data_folder, data_path)
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
if not os.path.isdir(data_folder):
os.mkdir(data_folder)
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data(path=data_path)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train[x_train < 127] = 0
x_train[x_train >= 127] = 1
x_test[x_test < 127] = 0
x_test[x_test >= 127] = 1
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_orig_train = np.copy(y_train)
y_orig_test = np.copy(y_test)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if not os.path.isdir(model_folder):
os.mkdir(model_folder)
try:
model_1 = load_model(model_1_path)
except OSError:
model_1 = Sequential()
model_1.add(
Conv2D(
32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape
)
)
model_1.add(Conv2D(64, (3, 3), activation='relu'))
model_1.add(MaxPooling2D(pool_size=(2, 2)))
model_1.add(Dropout(0.25))
model_1.add(Flatten())
model_1.add(Dense(128, activation='relu'))
model_1.add(Dropout(0.5))
model_1.add(Dense(num_classes, activation='softmax'))
model_1.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy']
)
model_1.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test)
)
score = model_1.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model_1.save(model_1_path)
try:
model_2 = load_model(model_2_path)
except OSError:
model_2 = Sequential()
model_2.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model_2.add(Conv2D(64, (3, 3), activation='relu'))
model_2.add(Conv2D(128, (3, 3), activation='relu'))
model_2.add(Conv2D(64, (3, 3), activation='relu'))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Dropout(0.5))
model_2.add(Flatten())
model_2.add(Dense(128, activation='relu'))
model_2.add(Dropout(0.5))
model_2.add(Dense(64, activation='relu'))
model_2.add(Dropout(0.5))
model_2.add(Dense(num_classes, activation='softmax'))
model_2.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy']
)
model_2.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test)
)
score = model_2.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model_2.save(model_2_path)
fooling_images = []
fooling_path = os.path.join(os.path.dirname(os.getcwd()), 'MNIST_Fooling')
for root, dirs, filenames in os.walk(fooling_path):
for f in filenames:
temp = np.load(os.path.join(fooling_path, f))
temp = temp.reshape((10, 28, 28, 1))
# Add it to the new array
for x in temp:
fooling_images.append(x)
fooling_images = np.array(fooling_images)
pred1 = model_1.predict(fooling_images)
pred2 = model_2.predict(fooling_images)
errors_1 = 0
for i, x in enumerate(pred1):
if x[i % 10] < 0.7:
print(i)
errors_1 += 1
errors_2 = 0
for i, x in enumerate(pred2):
if x[i % 10] < 0.7:
errors_2 += 1
print("Error by the original model:", errors_1)
print("Error by the different model:", errors_2)
| [
"numpy.copy",
"keras.layers.Conv2D",
"keras.backend.image_data_format",
"keras.models.load_model",
"keras.datasets.mnist.load_data",
"keras.layers.MaxPooling2D",
"keras.layers.Flatten",
"os.path.join",
"keras.utils.to_categorical",
"os.getcwd",
"numpy.array",
"keras.models.Sequential",
"os.p... | [((545, 585), 'os.path.join', 'os.path.join', (['model_folder', 'model_1_path'], {}), '(model_folder, model_1_path)\n', (557, 585), False, 'import os\n'), ((636, 676), 'os.path.join', 'os.path.join', (['model_folder', 'model_2_path'], {}), '(model_folder, model_2_path)\n', (648, 676), False, 'import os\n'), ((713, 749), 'os.path.join', 'os.path.join', (['data_folder', 'data_path'], {}), '(data_folder, data_path)\n', (725, 749), False, 'import os\n'), ((1012, 1043), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {'path': 'data_path'}), '(path=data_path)\n', (1027, 1043), False, 'from keras.datasets import mnist\n'), ((1752, 1768), 'numpy.copy', 'np.copy', (['y_train'], {}), '(y_train)\n', (1759, 1768), True, 'import numpy as np\n'), ((1783, 1798), 'numpy.copy', 'np.copy', (['y_test'], {}), '(y_test)\n', (1790, 1798), True, 'import numpy as np\n'), ((1809, 1857), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (1835, 1857), False, 'import keras\n'), ((1867, 1914), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (1893, 1914), False, 'import keras\n'), ((4254, 4275), 'os.walk', 'os.walk', (['fooling_path'], {}), '(fooling_path)\n', (4261, 4275), False, 'import os\n'), ((4513, 4537), 'numpy.array', 'np.array', (['fooling_images'], {}), '(fooling_images)\n', (4521, 4537), True, 'import numpy as np\n'), ((412, 423), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (421, 423), False, 'import os\n'), ((468, 479), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (477, 479), False, 'import os\n'), ((859, 885), 'os.path.isdir', 'os.path.isdir', (['data_folder'], {}), '(data_folder)\n', (872, 885), False, 'import os\n'), ((891, 912), 'os.mkdir', 'os.mkdir', (['data_folder'], {}), '(data_folder)\n', (899, 912), False, 'import os\n'), ((1048, 1069), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (1067, 1069), True, 'from keras import backend as K\n'), ((1923, 1950), 'os.path.isdir', 'os.path.isdir', (['model_folder'], {}), '(model_folder)\n', (1936, 1950), False, 'import os\n'), ((1956, 1978), 'os.mkdir', 'os.mkdir', (['model_folder'], {}), '(model_folder)\n', (1964, 1978), False, 'import os\n'), ((1999, 2023), 'keras.models.load_model', 'load_model', (['model_1_path'], {}), '(model_1_path)\n', (2009, 2023), False, 'from keras.models import load_model\n'), ((3016, 3040), 'keras.models.load_model', 'load_model', (['model_2_path'], {}), '(model_2_path)\n', (3026, 3040), False, 'from keras.models import load_model\n'), ((2054, 2066), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2064, 2066), False, 'from keras.models import Sequential\n'), ((3071, 3083), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3081, 3083), False, 'from keras.models import Sequential\n'), ((4194, 4205), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4203, 4205), False, 'import os\n'), ((2092, 2166), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (2098, 2166), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((2235, 2272), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (2241, 2272), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((2290, 2320), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2302, 2320), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((2338, 2351), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2345, 2351), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2369, 2378), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2376, 2378), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2396, 2425), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2401, 2425), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2443, 2455), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2450, 2455), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2473, 2513), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (2478, 2513), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3100, 3162), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, (3, 3), activation='relu', input_shape=input_shape)\n", (3106, 3162), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3180, 3217), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3186, 3217), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3235, 3273), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (3241, 3273), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3291, 3328), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3297, 3328), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3346, 3376), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3358, 3376), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3394, 3406), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3401, 3406), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3424, 3433), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3431, 3433), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3451, 3480), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (3456, 3480), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3498, 3510), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3505, 3510), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3528, 3556), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (3533, 3556), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3574, 3586), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3581, 3586), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3604, 3644), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (3609, 3644), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((4324, 4353), 'os.path.join', 'os.path.join', (['fooling_path', 'f'], {}), '(fooling_path, f)\n', (4336, 4353), False, 'import os\n'), ((2607, 2634), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (2632, 2634), False, 'import keras\n'), ((3738, 3765), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (3763, 3765), False, 'import keras\n')] |
#!/usr/bin/env python2
import logging
import argparse
import numpy as np
import pandas as pd
import cv2
def read_motion_vector(filename):
""" read the motion vector file under csv format
:param filename: input path to input csv file
:return: data under DataFrame format (see pandas lib)
"""
logging.info('reading vectors from {}'.format(filename))
return pd.read_csv(filename)
def write_motion_vector(filename, data):
"""
write DataFrame to file
:param filename: input path to output csv file
:param data: input data to be writen at DataFrame format
"""
logging.info('writing vectors to {}'.format(filename))
data.to_csv(filename, index=False)
def filter_out_static(input_data, reprojection_threshold=2.0):
"""
:param input_data: input DataFrame of motion vectors of all frames.
:return: list of boolean, True when the input vector has moved
"""
frame_count = input_data['framenum'].max()
motion_all_mask = []
for frame_number in range(1, frame_count+1):
logging.info('loading frame {}'.format(frame_number))
#select motion vector for the current frame only
info_frame = input_data[input_data['framenum'] == frame_number]
if info_frame.empty: # happens for keyframes, just skip it
logging.info('no vector info for frame {}'.format(frame_number))
continue
# reformat source and destination coordinates
src_pts = np.float32(info_frame[['srcx', 'srcy']]).reshape(-1, 1, 2)
dst_pts = np.float32(info_frame[['dstx', 'dsty']]).reshape(-1, 1, 2)
# compute 2D transformation using RANSAC method
M, homography_mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, reprojection_threshold)
# object in motions are the ones that does not fit the homography
# a logical not should be enough (for first try)
motion_mask = np.logical_not(homography_mask).ravel().tolist()
# then accumulate the results
motion_all_mask += motion_mask
return motion_all_mask
def main():
parser = argparse.ArgumentParser(description='create a video of the vector field.')
parser.add_argument('input_path', help='input path to motion vector file (.csv)')
parser.add_argument('output_path', help='output path to video')
parser.add_argument('-t', '--threshold', type=float, default=2.0, help='threshold value in pixel for motion detection')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose')
args = parser.parse_args()
# logging handling
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
input_motion_vectors = read_motion_vector(args.input_path)
mask = filter_out_static(input_motion_vectors, args.threshold)
output_motion_vectors = input_motion_vectors[mask]
write_motion_vector(args.output_path, output_motion_vectors)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"argparse.ArgumentParser",
"pandas.read_csv",
"cv2.findHomography",
"numpy.logical_not",
"numpy.float32"
] | [((381, 402), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (392, 402), True, 'import pandas as pd\n'), ((2100, 2174), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""create a video of the vector field."""'}), "(description='create a video of the vector field.')\n", (2123, 2174), False, 'import argparse\n'), ((1693, 1765), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', 'reprojection_threshold'], {}), '(src_pts, dst_pts, cv2.RANSAC, reprojection_threshold)\n', (1711, 1765), False, 'import cv2\n'), ((1471, 1511), 'numpy.float32', 'np.float32', (["info_frame[['srcx', 'srcy']]"], {}), "(info_frame[['srcx', 'srcy']])\n", (1481, 1511), True, 'import numpy as np\n'), ((1548, 1588), 'numpy.float32', 'np.float32', (["info_frame[['dstx', 'dsty']]"], {}), "(info_frame[['dstx', 'dsty']])\n", (1558, 1588), True, 'import numpy as np\n'), ((2617, 2636), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2634, 2636), False, 'import logging\n'), ((1919, 1950), 'numpy.logical_not', 'np.logical_not', (['homography_mask'], {}), '(homography_mask)\n', (1933, 1950), True, 'import numpy as np\n')] |
import numpy as np
import pandas.compat as compat
import pandas as pd
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""
Calculate table chape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""
Calculate appropriate figure size based on left and right data.
"""
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(l)[0] for l in left),
self._shape(right)[0])
hcells = (max(self._shape(l)[1] for l in left) +
self._shape(right)[1])
else:
vcells = max([self._shape(l)[0] for l in left] +
[self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] +
[self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max(self._shape(l)[1] for l in left)
max_left_rows = max(self._shape(l)[0] for l in left)
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label,
height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max(self._shape(df)[0] for df in left + [right])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i + sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i),
data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [data.columns._get_level_values(i).values
for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
class _WritableDoc(type):
# Remove this when Python2 support is dropped
# __doc__ is not mutable for new-style classes in Python2, which means
# we can't use @Appender to share class docstrings. This can be used
# with `add_metaclass` to make cls.__doc__ mutable.
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
| [
"pandas.compat.iteritems",
"numpy.max",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"pandas.plotting.table",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas.concat",
"matplotlib.pyplot.show"
] | [((6185, 6256), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [10, 11, 12], 'B': [20, 21, 22], 'C': [30, 31, 32]}"], {}), "({'A': [10, 11, 12], 'B': [20, 21, 22], 'C': [30, 31, 32]})\n", (6197, 6256), True, 'import pandas as pd\n'), ((6315, 6359), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [10, 12], 'C': [30, 32]}"], {}), "({'A': [10, 12], 'C': [30, 32]})\n", (6327, 6359), True, 'import pandas as pd\n'), ((6484, 6494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6492, 6494), True, 'import matplotlib.pyplot as plt\n'), ((6506, 6550), 'pandas.DataFrame', 'pd.DataFrame', (["{'X': [10, 12], 'Z': [30, 32]}"], {}), "({'X': [10, 12], 'Z': [30, 32]})\n", (6518, 6550), True, 'import pandas as pd\n'), ((6684, 6694), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6692, 6694), True, 'import matplotlib.pyplot as plt\n'), ((6706, 6797), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'), (2, 'C')]"], {}), "([(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'),\n (2, 'C')])\n", (6731, 6797), True, 'import pandas as pd\n'), ((6841, 6888), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[(1, 'A'), (1, 'B')]"], {}), "([(1, 'A'), (1, 'B')])\n", (6866, 6888), True, 'import pandas as pd\n'), ((6899, 6977), 'pandas.DataFrame', 'pd.DataFrame', (["{'v1': [1, 2, 3, 4, 5, 6], 'v2': [5, 6, 7, 8, 9, 10]}"], {'index': 'idx'}), "({'v1': [1, 2, 3, 4, 5, 6], 'v2': [5, 6, 7, 8, 9, 10]}, index=idx)\n", (6911, 6977), True, 'import pandas as pd\n'), ((7088, 7098), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7096, 7098), True, 'import matplotlib.pyplot as plt\n'), ((2297, 2324), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2307, 2324), True, 'import matplotlib.pyplot as plt\n'), ((5194, 5223), 'pandas.plotting.table', 'plotting.table', (['ax', 'df'], {'loc': '(9)'}), '(ax, df, loc=9)\n', (5208, 5223), True, 'import pandas.plotting as plotting\n'), ((5394, 5426), 'pandas.compat.iteritems', 'compat.iteritems', (["props['celld']"], {}), "(props['celld'])\n", (5410, 5426), True, 'import pandas.compat as compat\n'), ((6408, 6429), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {}), '([df1, df2])\n', (6417, 6429), True, 'import pandas as pd\n'), ((6599, 6628), 'pandas.concat', 'pd.concat', (['[df1, df3]'], {'axis': '(1)'}), '([df1, df3], axis=1)\n', (6608, 6628), True, 'import pandas as pd\n'), ((2825, 2859), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:, max_left_cols:]'], {}), '(gs[:, max_left_cols:])\n', (2836, 2859), True, 'import matplotlib.pyplot as plt\n'), ((3162, 3190), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', 'hcells'], {}), '(1, hcells)\n', (3179, 3190), True, 'import matplotlib.gridspec as gridspec\n'), ((3500, 3522), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, i:]'], {}), '(gs[0, i:])\n', (3511, 3522), True, 'import matplotlib.pyplot as plt\n'), ((4661, 4681), 'pandas.DataFrame', 'pd.DataFrame', (['values'], {}), '(values)\n', (4673, 4681), True, 'import pandas as pd\n'), ((4743, 4768), 'pandas.concat', 'pd.concat', (['[col_df, data]'], {}), '([col_df, data])\n', (4752, 4768), True, 'import pandas as pd\n'), ((3128, 3144), 'numpy.max', 'np.max', (['max_rows'], {}), '(max_rows)\n', (3134, 3144), True, 'import numpy as np\n')] |
import numpy as np
def score(input):
if (input[2]) <= (2.6):
var0 = np.asarray([1.0, 0.0, 0.0])
else:
if (input[2]) <= (4.8500004):
if (input[3]) <= (1.6500001):
var0 = np.asarray([0.0, 1.0, 0.0])
else:
var0 = np.asarray([0.0, 0.3333333333333333, 0.6666666666666666])
else:
if (input[3]) <= (1.75):
var0 = np.asarray([0.0, 0.42857142857142855, 0.5714285714285714])
else:
var0 = np.asarray([0.0, 0.0, 1.0])
return var0
| [
"numpy.asarray"
] | [((80, 107), 'numpy.asarray', 'np.asarray', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (90, 107), True, 'import numpy as np\n'), ((221, 248), 'numpy.asarray', 'np.asarray', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (231, 248), True, 'import numpy as np\n'), ((290, 347), 'numpy.asarray', 'np.asarray', (['[0.0, 0.3333333333333333, 0.6666666666666666]'], {}), '([0.0, 0.3333333333333333, 0.6666666666666666])\n', (300, 347), True, 'import numpy as np\n'), ((422, 480), 'numpy.asarray', 'np.asarray', (['[0.0, 0.42857142857142855, 0.5714285714285714]'], {}), '([0.0, 0.42857142857142855, 0.5714285714285714])\n', (432, 480), True, 'import numpy as np\n'), ((522, 549), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (532, 549), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import paths
import counterfactuals.infrastructurefunctions as infr
import counterfactuals.infrastructureequilibrium as ie
# %%
avg_price_elasts = np.array([-4., -2.5, -1.8])
sigmas = np.array([0., 0.2, 0.4, 0.6, 0.8, 0.9])
# %%
# Define functions to load results
p_stars = lambda x,y: np.load(f"{paths.arrays_path}p_stars_e{x}_n{y}.npy")
R_stars = lambda x,y: np.load(f"{paths.arrays_path}R_stars_e{x}_n{y}.npy")
q_stars = lambda x,y: np.load(f"{paths.arrays_path}q_stars_e{x}_n{y}.npy")
cs_by_type = lambda x,y: np.load(f"{paths.arrays_path}cs_by_type_e{x}_n{y}.npy")
cs = lambda x,y: np.load(f"{paths.arrays_path}cs_e{x}_n{y}.npy")
ps = lambda x,y: np.load(f"{paths.arrays_path}ps_e{x}_n{y}.npy")
ts = lambda x,y: np.load(f"{paths.arrays_path}ts_e{x}_n{y}.npy")
partial_elasts = lambda x,y: np.load(f"{paths.arrays_path}partial_elasts_e{x}_n{y}.npy")
full_elasts = lambda x,y: np.load(f"{paths.arrays_path}full_elasts_e{x}_n{y}.npy")
partial_Pif_partial_bf = lambda x,y: np.load(f"{paths.arrays_path}partial_Pif_partial_bf_e{x}_n{y}.npy")
partial_Pif_partial_b = lambda x,y: np.load(f"{paths.arrays_path}partial_Pif_partial_b_e{x}_n{y}.npy")
partial_CS_partial_b = lambda x,y: np.load(f"{paths.arrays_path}partial_CS_partial_b_e{x}_n{y}.npy")
p_stars_se = lambda x,y: np.load(f"{paths.arrays_path}p_stars_se_e{x}_n{y}.npy")
R_stars_se = lambda x,y: np.load(f"{paths.arrays_path}R_stars_se_e{x}_n{y}.npy")
q_stars_se = lambda x,y: np.load(f"{paths.arrays_path}q_stars_se_e{x}_n{y}.npy")
cs_by_type_se = lambda x,y: np.load(f"{paths.arrays_path}cs_by_type_se_e{x}_n{y}.npy")
cs_se = lambda x,y: np.load(f"{paths.arrays_path}cs_se_e{x}_n{y}.npy")
ps_se = lambda x,y: np.load(f"{paths.arrays_path}ps_se_e{x}_n{y}.npy")
ts_se = lambda x,y: np.load(f"{paths.arrays_path}ts_se_e{x}_n{y}.npy")
partial_elasts_se = lambda x,y: np.load(f"{paths.arrays_path}partial_elasts_se_e{x}_n{y}.npy")
full_elasts_se = lambda x,y: np.load(f"{paths.arrays_path}full_elasts_se_e{x}_n{y}.npy")
partial_Pif_partial_bf_se = lambda x,y: np.load(f"{paths.arrays_path}partial_Pif_partial_bf_se_e{x}_n{y}.npy")
partial_Pif_partial_b_se = lambda x,y: np.load(f"{paths.arrays_path}partial_Pif_partial_b_se_e{x}_n{y}.npy")
partial_CS_partial_b_se = lambda x,y: np.load(f"{paths.arrays_path}partial_CS_partial_b_se_e{x}_n{y}.npy")
# %%
# Define common graph features
num_firms_to_simulate = 6
num_firms_array = np.arange(num_firms_to_simulate, dtype=int) + 1
elast_ids = np.array([1, 2])[::-1]
alpha = 0.6
lw = 3.
# %%
# Plot effect of number of firms
fig, axs = plt.subplots(elast_ids.shape[0], 4, figsize=(15,3.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
# dlim = 2,000 prices
axs[i,0].plot(num_firms_array, p_stars(elast_id,3)[:,0], color="black", lw=lw, alpha=alpha)
axs[i,0].plot(num_firms_array, p_stars(elast_id,3)[:,0] + 1.96 * p_stars_se(elast_id,3)[:,0], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].plot(num_firms_array, p_stars(elast_id,3)[:,0] - 1.96 * p_stars_se(elast_id,3)[:,0], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].set_xlabel("number of firms")
axs[i,0].set_ylabel("$p_{j}^{*}$ (in \u20ac)")
# dlim = 10,000 prices
axs[i,1].plot(num_firms_array, p_stars(elast_id,3)[:,1], color="black", lw=lw, alpha=alpha)
axs[i,1].plot(num_firms_array, p_stars(elast_id,3)[:,1] + 1.96 * p_stars_se(elast_id,3)[:,1], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].plot(num_firms_array, p_stars(elast_id,3)[:,1] - 1.96 * p_stars_se(elast_id,3)[:,1], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].set_xlabel("number of firms")
axs[i,1].set_ylabel("$p_{j}^{*}$ (in \u20ac)")
# investment
axs[i,2].plot(num_firms_array, R_stars(elast_id,3), color="black", label=f"{-avg_price_elasts[i]}", lw=lw, alpha=alpha)
axs[i,2].plot(num_firms_array, R_stars(elast_id,3) + 1.96 * R_stars_se(elast_id,3), color="black", label=f"{-avg_price_elasts[i]}", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].plot(num_firms_array, R_stars(elast_id,3) - 1.96 * R_stars_se(elast_id,3), color="black", label=f"{-avg_price_elasts[i]}", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].set_xlabel("number of firms")
axs[i,2].set_ylabel("$R_{f}^{*}$ (in km)")
# download speeds
axs[i,3].plot(num_firms_array, q_stars(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,3].plot(num_firms_array, q_stars(elast_id,3) + 1.96 * q_stars_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,3].plot(num_firms_array, q_stars(elast_id,3) - 1.96 * q_stars_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,3].set_xlabel("number of firms")
axs[i,3].set_ylabel("$q_{f}^{*}$ (in Mbps)")
# Set titles
fontsize = 13.5
pad = 14
cols = ["2$\,$000 MB plan prices", "10$\,$000 MB plan prices", "investment", "download speeds"]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize, ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
# Set axis limits
min_y_p = np.min(np.concatenate(tuple([p_stars(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_p = np.max(np.concatenate(tuple([p_stars(elast_id,3) for elast_id in elast_ids]))) + 3.
min_y_R = np.min(np.concatenate(tuple([R_stars(elast_id,3) for elast_id in elast_ids]))) - 0.1
max_y_R = np.max(np.concatenate(tuple([R_stars(elast_id,3) for elast_id in elast_ids]))) + 0.1
min_y_q = np.min(np.concatenate(tuple([q_stars(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_q = np.max(np.concatenate(tuple([q_stars(elast_id,3) for elast_id in elast_ids]))) + 5.
for i, elast_id in enumerate(elast_ids):
for j in range(2): # first two columns
axs[i,j].set_ylim((min_y_p, max_y_p))
axs[i,2].set_ylim((min_y_R, max_y_R))
axs[i,3].set_ylim((min_y_q, max_y_q))
for j in range(4): # all columns
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_variables.pdf", bbox_inches = "tight")
# %%
# Plot elasticities
fig, axs = plt.subplots(elast_ids.shape[0], 2, figsize=(8,3.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
# dlim = 2,000 elasticities
axs[i,0].plot(num_firms_array, partial_elasts(elast_id,3)[:,0], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][0], lw=lw, alpha=alpha, label="partial")
axs[i,0].plot(num_firms_array, full_elasts(elast_id,3)[:,0], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][1], lw=lw, alpha=alpha, label="full")
axs[i,0].set_xlabel("number of firms")
axs[i,0].legend(loc="lower left")
# dlim = 10,000 elasticities
axs[i,1].plot(num_firms_array, partial_elasts(elast_id,3)[:,1], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][0], lw=lw, alpha=alpha, label="partial")
axs[i,1].plot(num_firms_array, full_elasts(elast_id,3)[:,1], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][1], lw=lw, alpha=alpha, label="full")
axs[i,1].set_xlabel("number of firms")
axs[i,1].legend(loc="lower left")
# Set titles
fontsize = 13.5
pad = 14
cols = ["2$\,$000 MB plan", "10$\,$000 MB plan"]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize, ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
# Set axis limits
min_y = np.min(np.concatenate(tuple([full_elasts(elast_id,3) for elast_id in elast_ids] + [partial_elasts(elast_id,3) for elast_id in elast_ids]))) - 0.3
max_y = np.max(np.concatenate(tuple([full_elasts(elast_id,3) for elast_id in elast_ids] + [partial_elasts(elast_id,3) for elast_id in elast_ids]))) + 0.3
for i, elast_id in enumerate(elast_ids):
for j in range(2): # all columns
axs[i,j].set_ylim((min_y, max_y))
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_elasticities.pdf", bbox_inches = "tight")
# %%
# Plot bw derivatives
fig, axs = plt.subplots(elast_ids.shape[0], 3, figsize=(11,3.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
# partial_Pif_partial_bf
axs[i,0].plot(num_firms_array, partial_Pif_partial_bf(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,0].plot(num_firms_array, partial_Pif_partial_bf(elast_id,3) + 1.96 * partial_Pif_partial_bf_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].plot(num_firms_array, partial_Pif_partial_bf(elast_id,3) - 1.96 * partial_Pif_partial_bf_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].set_xlabel("number of firms")
axs[i,0].set_ylabel("\u20ac per person in market / MHz")
# partial_Pif_partial_b
axs[i,1].plot(num_firms_array, partial_Pif_partial_b(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,1].plot(num_firms_array, partial_Pif_partial_b(elast_id,3) + 1.96 * partial_Pif_partial_b_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].plot(num_firms_array, partial_Pif_partial_b(elast_id,3) - 1.96 * partial_Pif_partial_b_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].set_xlabel("number of firms")
axs[i,1].set_ylabel("\u20ac per person in market / MHz")
# partial_CS_partial_b
axs[i,2].plot(num_firms_array, partial_CS_partial_b(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,2].plot(num_firms_array, partial_CS_partial_b(elast_id,3) + 1.96 * partial_CS_partial_b_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].plot(num_firms_array, partial_CS_partial_b(elast_id,3) - 1.96 * partial_CS_partial_b_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].set_xlabel("number of firms")
axs[i,2].set_ylabel("\u20ac per person in market / MHz")
# Set titles
fontsize = 13.5
pad = 14
cols = ["$\\frac{\\partial \\Pi_{f}}{\\partial b_{f}}$", "$\\frac{\\partial \\Pi_{f}}{\\partial b}$", "$\\frac{\\partial CS}{\\partial b}$"]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize + 3., ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
# Set axis limits
min_y_Pif_bf = np.min(np.concatenate(tuple([partial_Pif_partial_bf(elast_id,3) for elast_id in elast_ids]))) - 0.005
max_y_Pif_bf = np.max(np.concatenate(tuple([partial_Pif_partial_bf(elast_id,3) for elast_id in elast_ids]))) + 0.008
min_y_Pif_b = np.min(np.concatenate(tuple([partial_Pif_partial_b(elast_id,3) for elast_id in elast_ids]))) - 0.002
max_y_Pif_b = np.max(np.concatenate(tuple([partial_Pif_partial_b(elast_id,3) for elast_id in elast_ids]))) + 0.002
min_y_CS_b = np.min(np.concatenate(tuple([partial_CS_partial_b(elast_id,3) for elast_id in elast_ids]))) - 0.02
max_y_CS_b = np.max(np.concatenate(tuple([partial_CS_partial_b(elast_id,3) for elast_id in elast_ids]))) + 0.03
for i, elast_id in enumerate(elast_ids):
axs[i,0].set_ylim((min_y_Pif_bf, max_y_Pif_bf))
axs[i,1].set_ylim((min_y_Pif_b, max_y_Pif_b))
axs[i,2].set_ylim((min_y_CS_b, max_y_CS_b))
for j in range(3):
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_bw_deriv.pdf", bbox_inches = "tight")
# %%
# Plot welfare for number of firms
fig, axs = plt.subplots(elast_ids.shape[0], 3, figsize=(11,3.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
# consumer surplus
axs[i,0].plot(num_firms_array, cs(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,0].plot(num_firms_array, cs(elast_id,3) + 1.96 * cs_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].plot(num_firms_array, cs(elast_id,3) - 1.96 * cs_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].axvline(x=num_firms_array[np.argmax(cs(elast_id,3))], color="black", linestyle="--", alpha=0.25)
axs[i,0].set_xlabel("number of firms")
axs[i,0].set_ylabel("\u20ac")
# producer surplus
axs[i,1].plot(num_firms_array, ps(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,1].plot(num_firms_array, ps(elast_id,3) + 1.96 * ps_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].plot(num_firms_array, ps(elast_id,3) - 1.96 * ps_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].axvline(x=num_firms_array[np.argmax(ps(elast_id,3))], color="black", linestyle="--", alpha=0.25)
axs[i,1].set_xlabel("number of firms")
axs[i,1].set_ylabel("\u20ac")
# total surplus
axs[i,2].plot(num_firms_array, ts(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,2].plot(num_firms_array, ts(elast_id,3) + 1.96 * ts_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].plot(num_firms_array, ts(elast_id,3) - 1.96 * ts_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].axvline(x=num_firms_array[np.argmax(ts(elast_id,3))], color="black", linestyle="--", alpha=0.25)
axs[i,2].set_xlabel("number of firms")
axs[i,2].set_ylabel("\u20ac")
# Set titles
fontsize = 13.5
pad = 14
cols = ["consumer surplus", "producer surplus", "total surplus"]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize, ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
# Set axis limits
min_y_cs = np.min(np.concatenate(tuple([cs(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_cs = np.max(np.concatenate(tuple([cs(elast_id,3) for elast_id in elast_ids]))) + 20.
min_y_ps = np.min(np.concatenate(tuple([ps(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_ps = np.max(np.concatenate(tuple([ps(elast_id,3) for elast_id in elast_ids]))) + 5.
min_y_ts = np.min(np.concatenate(tuple([ts(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_ts = np.max(np.concatenate(tuple([ts(elast_id,3) for elast_id in elast_ids]))) + 15.
for i, elast_id in enumerate(elast_ids):
axs[i,0].set_ylim((min_y_cs, max_y_cs))
axs[i,1].set_ylim((min_y_ps, max_y_ps))
axs[i,2].set_ylim((min_y_ts, max_y_ts))
for j in range(3):
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_welfare.pdf", bbox_inches = "tight")
# %%
# Plot consumer surplus by type for number of firms
fig, axs = plt.subplots(elast_ids.shape[0], 5, figsize=(15,2.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
for j in range(5):
axs[i,j].plot(num_firms_array, cs_by_type(elast_id,3)[:,2*j], color="black", lw=lw, alpha=alpha)
axs[i,j].plot(num_firms_array, cs_by_type(elast_id,3)[:,2*j] + 1.96 * cs_by_type_se(elast_id,3)[:,2*j], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,j].plot(num_firms_array, cs_by_type(elast_id,3)[:,2*j] - 1.96 * cs_by_type_se(elast_id,3)[:,2*j], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,j].axvline(x=num_firms_array[np.argmax(cs_by_type(elast_id,3)[:,2*j])], color="black", linestyle="--", alpha=0.25)
axs[i,j].set_xlabel("number of firms")
axs[i,j].set_ylabel("\u20ac")
# Set titles
fontsize = 13.5
pad = 14
cols = [f"{((2*i)+1)*10}th percentile" for i in range(5)]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize, ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
for i, elast_id in enumerate(elast_ids):
for j in range(5):
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_cs_by_income.pdf", bbox_inches = "tight")
# %%
# Plot effect of number of firms
num_firms_to_simulate = 6
num_firms_array = np.arange(num_firms_to_simulate, dtype=int) + 1
fig, axs = plt.subplots(1, 4, figsize=(14,4), sharex=True)
alpha = 0.6
lw = 3.
min_y = np.min(np.concatenate((p_stars(1,0), p_stars(1,1), p_stars(1,2), p_stars(1,3)))) - 2.5
max_y = np.max(np.concatenate((p_stars(1,0), p_stars(1,1), p_stars(1,2), p_stars(1,3)))) + 5.
for i in range(5):
axs[0].plot(num_firms_array, p_stars(1,i)[:,0], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha)
# custom_lines = [Line2D([0], [0], color="black", lw=1.5),
# Line2D([0], [0], color="black", lw=1.5, ls="--")]
# axs[0].legend(custom_lines, ["$\\bar{d} = 2\\,000$ MB", "$\\bar{d} = 10\\,000$ MB"], loc="upper right")
axs[0].set_xlabel("number of firms")
axs[0].set_ylabel("$p^{*}$ (in \u20ac)")
axs[0].set_ylim((min_y, max_y))
axs[0].set_title("$\\bar{d} = 2\\,000$ MB plan prices", fontsize=12)
for i in range(5):
axs[1].plot(num_firms_array, p_stars(1,i)[:,1], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha)
axs[1].set_xlabel("number of firms")
axs[1].set_ylabel("$p^{*}$ (in \u20ac)")
axs[1].set_ylim((min_y, max_y))
axs[1].set_title("$\\bar{d} = 10\\,000$ MB plan prices", fontsize=12)
for i in range(5):
axs[2].plot(num_firms_array, num_firms_array * infr.num_stations(R_stars(1,i), 16.299135), color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], label=f"{sigmas[i]}", lw=lw, alpha=alpha)
axs[2].set_xlabel("number of firms")
axs[2].set_ylabel("total number of stations")
axs[2].set_title("investment", fontsize=12)
for i in range(5):
axs[3].plot(num_firms_array, q_stars(1,i), color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha)
axs[3].set_xlabel("number of firms")
axs[3].set_ylabel("$q^{*}$ (in Mbps)")
axs[3].set_title("download speeds", fontsize=12)
fig.legend(loc="center right", ncol=1, title="Nesting Parameters", fontsize=12, bbox_to_anchor=(3., 0.5), bbox_transform=axs[2].transAxes)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_variables_sigmas.pdf", bbox_inches = "tight")
# %%
# Plot elasticities
fig, axs = plt.subplots(1, 2, figsize=(8,4), sharex=True)
alpha = 0.6
lw = 3.
min_y = np.min(np.concatenate((partial_elasts(1,0), partial_elasts(1,1), partial_elasts(1,2), partial_elasts(1,3), partial_elasts(1,4), full_elasts(1,0), full_elasts(1,1), full_elasts(1,2), full_elasts(1,3), full_elasts(1,4)))) - 0.2
max_y = np.max(np.concatenate((partial_elasts(1,0), partial_elasts(1,1), partial_elasts(1,2), partial_elasts(1,3), partial_elasts(1,4), full_elasts(1,0), full_elasts(1,1), full_elasts(1,2), full_elasts(1,3), full_elasts(1,4)))) + 0.2
for i in range(5):
axs[0].plot(num_firms_array, partial_elasts(1,i)[:,0], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha, label=f"{sigmas[i]} partial")
axs[0].plot(num_firms_array, full_elasts(1,i)[:,0], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha, linestyle="--", label=f" full")
axs[0].set_xlabel("number of firms")
axs[0].set_ylim((min_y, max_y))
axs[0].set_title("$\\bar{d} = 2\\,000$ MB plan", fontsize=12)
for i in range(5):
axs[1].plot(num_firms_array, partial_elasts(1,i)[:,1], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha)
axs[1].plot(num_firms_array, full_elasts(1,i)[:,1], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha, linestyle="--")
axs[1].set_xlabel("number of firms")
axs[1].set_ylim((min_y, max_y))
axs[1].set_title("$\\bar{d} = 10\\,000$ MB plan", fontsize=12)
fig.legend(loc="center right", ncol=1, title="Nesting Parameters", fontsize=12, bbox_to_anchor=(1.565, 0.5), bbox_transform=axs[1].transAxes)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_elasticities_sigmas.pdf", bbox_inches = "tight")
# %%
# Plot bw derivatives
fig, axs = plt.subplots(1, 3, figsize=(11,4), sharex=True)
for i in range(5):
axs[0].plot(num_firms_array, partial_Pif_partial_bf(1,i), color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha, label=f"{sigmas[i]}")
axs[0].set_xlabel("number of firms")
axs[0].set_ylabel("\u20ac per person in market / MHz")
axs[0].set_title("$\\frac{\\partial \\Pi_{f}}{\\partial b_{f}}$", fontsize=17, y=1.05)
for i in range(5):
axs[1].plot(num_firms_array, partial_Pif_partial_b(1,i), color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha)
axs[1].set_xlabel("number of firms")
axs[1].set_ylabel("\u20ac per person in market / MHz")
axs[1].set_title("$\\frac{\\partial \\Pi_{f}}{\\partial b}$", fontsize=17, y=1.05)
for i in range(5):
axs[2].plot(num_firms_array, partial_CS_partial_b(1,i), color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha)
axs[2].set_xlabel("number of firms")
axs[2].set_ylabel("\u20ac per person in market / MHz")
axs[2].set_title("$\\frac{\\partial CS}{\\partial b}$", fontsize=17, y=1.05)
fig.legend(loc="center right", ncol=1, title="Nesting Parameters", fontsize=12, bbox_to_anchor=(1.75, 0.5), bbox_transform=axs[2].transAxes)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_bw_deriv_sigmas.pdf", bbox_inches = "tight")
# %%
# Plot welfare for number of firms
fig, axs = plt.subplots(1, 3, figsize=(10,4), sharex=True)
alpha = 0.6
lw = 3.
for i in range(5):
axs[0].plot(num_firms_array, cs(1,i) / 10000., color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha, label=f"{sigmas[i]}")
axs[0].axvline(x=num_firms_array[np.argmax(cs(1,i))] + (-1. * (i/5.) + 1. * ((5.-i)/5.)) * 0.15, color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], linestyle="--", alpha=0.75 * alpha)
axs[0].set_xlabel("number of firms")
axs[0].set_ylabel("$10\\,000$ \u20ac")
axs[0].set_title("consumer surplus", fontsize=12)
for i in range(5):
axs[1].plot(num_firms_array, ps(1,i) / 10000., color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha)
axs[1].axvline(x=num_firms_array[np.argmax(ps(1,i))] + (-1. * (i/5.) + 1. * ((5.-i)/5.)) * 0.15, color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], linestyle="--", alpha=0.75 * alpha)
axs[1].set_xlabel("number of firms")
axs[1].set_ylabel("$10\\,000$ \u20ac")
axs[1].set_title("producer surplus", fontsize=12)
for i in range(5):
axs[2].plot(num_firms_array, ts(1,i) / 10000., color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], lw=lw, alpha=alpha)
axs[2].axvline(x=num_firms_array[np.argmax(ts(1,i))] + (-1. * (i/5.) + 1. * ((5.-i)/5.)) * 0.15, color=plt.rcParams['axes.prop_cycle'].by_key()['color'][i], linestyle="--", alpha=0.75 * alpha)
axs[2].set_xlabel("number of firms")
axs[2].set_ylabel("$10\\,000$ \u20ac")
axs[2].set_title("total surplus", fontsize=12)
fig.legend(loc="center right", ncol=1, title="Nesting Parameters", fontsize=12, bbox_to_anchor=(1.8, 0.5), bbox_transform=axs[2].transAxes)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_welfare_sigmas.pdf", bbox_inches = "tight")
# %%
# Plot consumer surplus by type for number of firms
fig, axs = plt.subplots(1, 5, figsize=(15,4.5), sharex=True)
for i in range(5):
for j in range(5):
axs[i].plot(num_firms_array, cs_by_type(1,j)[:,2*i], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][j], lw=lw, alpha=alpha, label=f"{sigmas[j]}" if i == 0 else None)
axs[i].axvline(x=num_firms_array[np.argmax(cs_by_type(1,j)[:,2*i])] + (-1. * (j/5.) + 1. * ((5.-j)/5.)) * 0.15, color=plt.rcParams['axes.prop_cycle'].by_key()['color'][j], linestyle="--", alpha=0.75 * alpha)
axs[i].set_xlabel("number of firms")
axs[i].set_ylabel("\u20ac")
axs[i].set_title(f"{((2*i)+1)*10}th percentile", fontsize=12)
fig.legend(loc="center right", ncol=1, title="Nesting Parameters", fontsize=12, bbox_to_anchor=(4.5, 0.5), bbox_transform=axs[2].transAxes)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_cs_by_income_sigmas.pdf", bbox_inches = "tight")
| [
"matplotlib.pyplot.savefig",
"matplotlib.use",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"numpy.load",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((386, 414), 'numpy.array', 'np.array', (['[-4.0, -2.5, -1.8]'], {}), '([-4.0, -2.5, -1.8])\n', (394, 414), True, 'import numpy as np\n'), ((423, 463), 'numpy.array', 'np.array', (['[0.0, 0.2, 0.4, 0.6, 0.8, 0.9]'], {}), '([0.0, 0.2, 0.4, 0.6, 0.8, 0.9])\n', (431, 463), True, 'import numpy as np\n'), ((2776, 2868), 'matplotlib.pyplot.subplots', 'plt.subplots', (['elast_ids.shape[0]', '(4)'], {'figsize': '(15, 3.5 * elast_ids.shape[0])', 'sharex': '(True)'}), '(elast_ids.shape[0], 4, figsize=(15, 3.5 * elast_ids.shape[0]),\n sharex=True)\n', (2788, 2868), True, 'import matplotlib.pyplot as plt\n'), ((6640, 6658), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6656, 6658), True, 'import matplotlib.pyplot as plt\n'), ((6660, 6749), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_variables.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_variables.pdf', bbox_inches\n ='tight')\n", (6671, 6749), True, 'import matplotlib.pyplot as plt\n'), ((6785, 6876), 'matplotlib.pyplot.subplots', 'plt.subplots', (['elast_ids.shape[0]', '(2)'], {'figsize': '(8, 3.5 * elast_ids.shape[0])', 'sharex': '(True)'}), '(elast_ids.shape[0], 2, figsize=(8, 3.5 * elast_ids.shape[0]),\n sharex=True)\n', (6797, 6876), True, 'import matplotlib.pyplot as plt\n'), ((8967, 8985), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8983, 8985), True, 'import matplotlib.pyplot as plt\n'), ((8987, 9078), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_elasticities.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_elasticities.pdf',\n bbox_inches='tight')\n", (8998, 9078), True, 'import matplotlib.pyplot as plt\n'), ((9117, 9209), 'matplotlib.pyplot.subplots', 'plt.subplots', (['elast_ids.shape[0]', '(3)'], {'figsize': '(11, 3.5 * elast_ids.shape[0])', 'sharex': '(True)'}), '(elast_ids.shape[0], 3, figsize=(11, 3.5 * elast_ids.shape[0]),\n sharex=True)\n', (9129, 9209), True, 'import matplotlib.pyplot as plt\n'), ((12743, 12761), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12759, 12761), True, 'import matplotlib.pyplot as plt\n'), ((12763, 12851), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_bw_deriv.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_bw_deriv.pdf', bbox_inches=\n 'tight')\n", (12774, 12851), True, 'import matplotlib.pyplot as plt\n'), ((12902, 12994), 'matplotlib.pyplot.subplots', 'plt.subplots', (['elast_ids.shape[0]', '(3)'], {'figsize': '(11, 3.5 * elast_ids.shape[0])', 'sharex': '(True)'}), '(elast_ids.shape[0], 3, figsize=(11, 3.5 * elast_ids.shape[0]),\n sharex=True)\n', (12914, 12994), True, 'import matplotlib.pyplot as plt\n'), ((16229, 16247), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16245, 16247), True, 'import matplotlib.pyplot as plt\n'), ((16249, 16336), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_welfare.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_welfare.pdf', bbox_inches=\n 'tight')\n", (16260, 16336), True, 'import matplotlib.pyplot as plt\n'), ((16404, 16496), 'matplotlib.pyplot.subplots', 'plt.subplots', (['elast_ids.shape[0]', '(5)'], {'figsize': '(15, 2.5 * elast_ids.shape[0])', 'sharex': '(True)'}), '(elast_ids.shape[0], 5, figsize=(15, 2.5 * elast_ids.shape[0]),\n sharex=True)\n', (16416, 16496), True, 'import matplotlib.pyplot as plt\n'), ((18015, 18033), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18031, 18033), True, 'import matplotlib.pyplot as plt\n'), ((18035, 18126), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_cs_by_income.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_cs_by_income.pdf',\n bbox_inches='tight')\n", (18046, 18126), True, 'import matplotlib.pyplot as plt\n'), ((18269, 18317), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'figsize': '(14, 4)', 'sharex': '(True)'}), '(1, 4, figsize=(14, 4), sharex=True)\n', (18281, 18317), True, 'import matplotlib.pyplot as plt\n'), ((20180, 20198), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20196, 20198), True, 'import matplotlib.pyplot as plt\n'), ((20200, 20295), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_variables_sigmas.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_variables_sigmas.pdf',\n bbox_inches='tight')\n", (20211, 20295), True, 'import matplotlib.pyplot as plt\n'), ((20332, 20379), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 4)', 'sharex': '(True)'}), '(1, 2, figsize=(8, 4), sharex=True)\n', (20344, 20379), True, 'import matplotlib.pyplot as plt\n'), ((21948, 21966), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21964, 21966), True, 'import matplotlib.pyplot as plt\n'), ((21968, 22066), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_elasticities_sigmas.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_elasticities_sigmas.pdf',\n bbox_inches='tight')\n", (21979, 22066), True, 'import matplotlib.pyplot as plt\n'), ((22105, 22153), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(11, 4)', 'sharex': '(True)'}), '(1, 3, figsize=(11, 4), sharex=True)\n', (22117, 22153), True, 'import matplotlib.pyplot as plt\n'), ((23324, 23342), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23340, 23342), True, 'import matplotlib.pyplot as plt\n'), ((23344, 23438), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_bw_deriv_sigmas.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_bw_deriv_sigmas.pdf',\n bbox_inches='tight')\n", (23355, 23438), True, 'import matplotlib.pyplot as plt\n'), ((23490, 23538), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 4)', 'sharex': '(True)'}), '(1, 3, figsize=(10, 4), sharex=True)\n', (23502, 23538), True, 'import matplotlib.pyplot as plt\n'), ((25141, 25159), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25157, 25159), True, 'import matplotlib.pyplot as plt\n'), ((25161, 25254), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_welfare_sigmas.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_welfare_sigmas.pdf',\n bbox_inches='tight')\n", (25172, 25254), True, 'import matplotlib.pyplot as plt\n'), ((25323, 25373), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {'figsize': '(15, 4.5)', 'sharex': '(True)'}), '(1, 5, figsize=(15, 4.5), sharex=True)\n', (25335, 25373), True, 'import matplotlib.pyplot as plt\n'), ((26104, 26122), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26120, 26122), True, 'import matplotlib.pyplot as plt\n'), ((26124, 26222), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{paths.graphs_path}counterfactual_cs_by_income_sigmas.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{paths.graphs_path}counterfactual_cs_by_income_sigmas.pdf',\n bbox_inches='tight')\n", (26135, 26222), True, 'import matplotlib.pyplot as plt\n'), ((153, 167), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (160, 167), True, 'import matplotlib as mpl\n'), ((526, 578), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}p_stars_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}p_stars_e{x}_n{y}.npy')\n", (533, 578), True, 'import numpy as np\n'), ((601, 653), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}R_stars_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}R_stars_e{x}_n{y}.npy')\n", (608, 653), True, 'import numpy as np\n'), ((676, 728), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}q_stars_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}q_stars_e{x}_n{y}.npy')\n", (683, 728), True, 'import numpy as np\n'), ((754, 809), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}cs_by_type_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}cs_by_type_e{x}_n{y}.npy')\n", (761, 809), True, 'import numpy as np\n'), ((827, 874), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}cs_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}cs_e{x}_n{y}.npy')\n", (834, 874), True, 'import numpy as np\n'), ((892, 939), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}ps_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}ps_e{x}_n{y}.npy')\n", (899, 939), True, 'import numpy as np\n'), ((957, 1004), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}ts_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}ts_e{x}_n{y}.npy')\n", (964, 1004), True, 'import numpy as np\n'), ((1034, 1093), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}partial_elasts_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}partial_elasts_e{x}_n{y}.npy')\n", (1041, 1093), True, 'import numpy as np\n'), ((1120, 1176), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}full_elasts_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}full_elasts_e{x}_n{y}.npy')\n", (1127, 1176), True, 'import numpy as np\n'), ((1214, 1281), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}partial_Pif_partial_bf_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}partial_Pif_partial_bf_e{x}_n{y}.npy')\n", (1221, 1281), True, 'import numpy as np\n'), ((1318, 1384), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}partial_Pif_partial_b_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}partial_Pif_partial_b_e{x}_n{y}.npy')\n", (1325, 1384), True, 'import numpy as np\n'), ((1420, 1485), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}partial_CS_partial_b_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}partial_CS_partial_b_e{x}_n{y}.npy')\n", (1427, 1485), True, 'import numpy as np\n'), ((1512, 1567), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}p_stars_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}p_stars_se_e{x}_n{y}.npy')\n", (1519, 1567), True, 'import numpy as np\n'), ((1593, 1648), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}R_stars_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}R_stars_se_e{x}_n{y}.npy')\n", (1600, 1648), True, 'import numpy as np\n'), ((1674, 1729), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}q_stars_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}q_stars_se_e{x}_n{y}.npy')\n", (1681, 1729), True, 'import numpy as np\n'), ((1758, 1816), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}cs_by_type_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}cs_by_type_se_e{x}_n{y}.npy')\n", (1765, 1816), True, 'import numpy as np\n'), ((1837, 1887), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}cs_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}cs_se_e{x}_n{y}.npy')\n", (1844, 1887), True, 'import numpy as np\n'), ((1908, 1958), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}ps_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}ps_se_e{x}_n{y}.npy')\n", (1915, 1958), True, 'import numpy as np\n'), ((1979, 2029), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}ts_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}ts_se_e{x}_n{y}.npy')\n", (1986, 2029), True, 'import numpy as np\n'), ((2062, 2124), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}partial_elasts_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}partial_elasts_se_e{x}_n{y}.npy')\n", (2069, 2124), True, 'import numpy as np\n'), ((2154, 2213), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}full_elasts_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}full_elasts_se_e{x}_n{y}.npy')\n", (2161, 2213), True, 'import numpy as np\n'), ((2254, 2324), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}partial_Pif_partial_bf_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}partial_Pif_partial_bf_se_e{x}_n{y}.npy')\n", (2261, 2324), True, 'import numpy as np\n'), ((2364, 2433), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}partial_Pif_partial_b_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}partial_Pif_partial_b_se_e{x}_n{y}.npy')\n", (2371, 2433), True, 'import numpy as np\n'), ((2472, 2540), 'numpy.load', 'np.load', (['f"""{paths.arrays_path}partial_CS_partial_b_se_e{x}_n{y}.npy"""'], {}), "(f'{paths.arrays_path}partial_CS_partial_b_se_e{x}_n{y}.npy')\n", (2479, 2540), True, 'import numpy as np\n'), ((2622, 2665), 'numpy.arange', 'np.arange', (['num_firms_to_simulate'], {'dtype': 'int'}), '(num_firms_to_simulate, dtype=int)\n', (2631, 2665), True, 'import numpy as np\n'), ((2682, 2698), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (2690, 2698), True, 'import numpy as np\n'), ((18209, 18252), 'numpy.arange', 'np.arange', (['num_firms_to_simulate'], {'dtype': 'int'}), '(num_firms_to_simulate, dtype=int)\n', (18218, 18252), True, 'import numpy as np\n')] |
import random
import numpy as np
import torch
from openprompt.utils.logging import logger
from typing import *
def set_seed(seed:Optional[int] = None):
"""set seed for reproducibility
Args:
seed (:obj:`int`): the seed to seed everything for reproducibility. if None, do nothing.
"""
if seed:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
logger.info(f"Global seed set to {seed}")
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"random.seed",
"openprompt.utils.logging.logger.info",
"numpy.random.seed"
] | [((328, 345), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (339, 345), False, 'import random\n'), ((354, 374), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (368, 374), True, 'import numpy as np\n'), ((383, 406), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (400, 406), False, 'import torch\n'), ((415, 447), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (441, 447), False, 'import torch\n'), ((456, 497), 'openprompt.utils.logging.logger.info', 'logger.info', (['f"""Global seed set to {seed}"""'], {}), "(f'Global seed set to {seed}')\n", (467, 497), False, 'from openprompt.utils.logging import logger\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.